markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Training data
n_training_samples = 5000 training_snapshots = [10, 50, 1000] interference_powers = [10, 20, 30] n_interference_list = [1, 2, 3] # interference_powers = [20] # n_interference_list = [1] # sigma = 10**(20/10) training_noise_interference_data_various_snapshots = [] for training_snapshot in training_snapshots: training_noise_interference_data = [] for i_training_sample in range(n_training_samples): n_interferences = np.random.choice(n_interference_list) nv = np.zeros((microphone_array.n_mics, training_snapshot), dtype=complex) for _ in range(n_interferences): u = np.random.uniform(0, 1) vi = compute_steering_vector_ULA(u, microphone_array) sigma = 10**(np.random.choice(interference_powers)/10) ii = generate_gaussian_samples(power=sigma, shape=(1, training_snapshot)) nv += vi.dot(ii) noise = generate_gaussian_samples(power=sigma_n, shape=(microphone_array.n_mics, training_snapshot)) nv += noise training_noise_interference_data.append(nv) training_noise_interference_data_various_snapshots.append(training_noise_interference_data)
_____no_output_____
MIT
playground_baseline_dictionary_various_training_snapshots_various_noise_interferences_random.ipynb
dung-n-tran/speech-enhancement-beamforming
Train baseline dictionary
dictionaries = [] for i_training_snapshot in range(len(training_snapshots)): training_noise_interference_data = training_noise_interference_data_various_snapshots[i_training_snapshot] dictionary = BaseDLBeamformer(vs) dictionary.fit(training_noise_interference_data); dictionaries.append(dictionary)
_____no_output_____
MIT
playground_baseline_dictionary_various_training_snapshots_various_noise_interferences_random.ipynb
dung-n-tran/speech-enhancement-beamforming
Testing
n_trials = 200 snapshots = np.array([10, 20, 30, 40, 60, 100, 200, 500, 1000]) n_snapshots = len(snapshots) ui1 = np.random.uniform(0, 1) ui2 = np.random.uniform(0, 1) sigma_1 = 10**(20/10) sigma_2 = 0*10**(20/10) vi1 = compute_steering_vector_ULA(ui1, microphone_array) vi2 = compute_steering_vector_ULA(ui2, microphone_array) n_interferences = np.random.choice(n_interference_list) interference_steering_vectors = [] for _ in range(n_interferences): u = np.random.uniform(0, 1) vi = compute_steering_vector_ULA(u, microphone_array) interference_steering_vectors.append(vi) sinr_snr_mvdr = np.zeros((n_SNRs, n_snapshots)) sinr_snr_mpdr = np.zeros((n_SNRs, n_snapshots)) sinr_snr_baseline_mpdr = np.zeros((len(training_snapshots), n_SNRs, n_snapshots)) for i_SNR in tqdm_notebook(range(n_SNRs), desc="SNRs"): sigma_s = 10**(SNRs[i_SNR] / 10) Rs = sigma_s * vs.dot(vs.transpose().conjugate()) for i_snapshot in tqdm_notebook(range(n_snapshots), desc="Snapshots", leave=False): snapshot = snapshots[i_snapshot] sinr_mvdr = np.zeros(n_trials) sinr_mpdr = np.zeros(n_trials) sinr_baseline_mpdr = np.zeros((len(training_snapshots), n_trials)) for i_trial in range(n_trials): ss = generate_gaussian_samples(power=sigma_s, shape=(1, snapshot)) # signal samples nn = generate_gaussian_samples(power=sigma_n, shape=(microphone_array.n_mics, snapshot)) # Gaussian noise samples # ii1 = generate_gaussian_samples(power=sigma_1, shape=(1, snapshot)) # first interference samples # ii2 = generate_gaussian_samples(power=sigma_2, shape=(1, snapshot)) # second interference samples nv = np.zeros((microphone_array.n_mics, snapshot), dtype=complex) Rn = np.zeros((microphone_array.n_mics, microphone_array.n_mics), dtype=complex) for i_interference in range(n_interferences): sigma = 10**(np.random.choice(interference_powers)/10) ii = generate_gaussian_samples(power=sigma, shape=(1, snapshot)) nv += interference_steering_vectors[i_interference].dot(ii) Rn += sigma*interference_steering_vectors[i_interference].dot(interference_steering_vectors[i_interference].transpose().conjugate()) Rn += sigma_n*np.identity(microphone_array.n_mics) Rninv = np.linalg.inv(Rn) Wo = Rninv.dot(vs) / (vs.transpose().conjugate().dot(Rninv).dot(vs)) SINRopt = ( np.real(Wo.transpose().conjugate().dot(Rs).dot(Wo)) / np.real(Wo.transpose().conjugate().dot(Rn).dot(Wo)) )[0][0] nv += nn sv = vs.dot(ss) xx = sv + nv wv = compute_MVDR_weight(vs, nv) wp = compute_MVDR_weight(vs, xx) for i_dictionary in range(len(dictionaries)): dictionary = dictionaries[i_dictionary] w_baseline_p = dictionary.choose_weights(xx) sinr_baseline_mpdr[i_dictionary, i_trial] = np.real(w_baseline_p.transpose().conjugate().dot(Rs).dot(w_baseline_p)) / np.real(w_baseline_p.transpose().conjugate().dot(Rn).dot(w_baseline_p)) sinr_mvdr[i_trial] = np.real(wv.transpose().conjugate().dot(Rs).dot(wv)) / np.real(wv.transpose().conjugate().dot(Rn).dot(wv)) sinr_mpdr[i_trial] = np.real(wp.transpose().conjugate().dot(Rs).dot(wp)) / np.real(wp.transpose().conjugate().dot(Rn).dot(wp)) sinr_snr_mvdr[i_SNR, i_snapshot] = np.sum(sinr_mvdr) / n_trials sinr_snr_mpdr[i_SNR, i_snapshot] = np.sum(sinr_mpdr) / n_trials for i_dictionary in range(len(dictionaries)): sinr_snr_baseline_mpdr[i_dictionary, i_SNR, i_snapshot] = np.sum(sinr_baseline_mpdr[i_dictionary, :]) / n_trials
_____no_output_____
MIT
playground_baseline_dictionary_various_training_snapshots_various_noise_interferences_random.ipynb
dung-n-tran/speech-enhancement-beamforming
Visualize results
fig = plt.figure(figsize=(9, 6*n_SNRs)); for i_SNR in range(n_SNRs): sigma_s = 10**(SNRs[i_SNR] / 10) Rs = sigma_s * vs.dot(vs.transpose().conjugate()) SINRopt = ( np.real(Wo.transpose().conjugate().dot(Rs).dot(Wo)) / np.real(Wo.transpose().conjugate().dot(Rn).dot(Wo)) )[0][0] ax = fig.add_subplot(n_SNRs, 1, i_SNR+1) ax.semilogx(snapshots, 10*np.log10(sinr_snr_mvdr[i_SNR, :]), marker="o", label="MVDR") ax.semilogx(snapshots, 10*np.log10(sinr_snr_mpdr[i_SNR, :]), marker="*", label="MPDR") for i_training_snapshot in range(len(training_snapshots)): ax.semilogx(snapshots, 10*np.log10(sinr_snr_baseline_mpdr[i_training_snapshot, i_SNR, :]), label="Baseline - {} training snapshots".format(training_snapshots[i_training_snapshot])) ax.set_xlim(10, 1000); ax.set_ylim(-10, 45) ax.legend(loc="lower right") ax.set_xlabel("Number of snapshots") ax.set_ylabel(r"$SINR_0$ [dB]") ax.set_title("Testing performance, {} training samples".format(n_training_samples)) plt.tight_layout() fig.savefig("baseline_dl_mvdr_various_interferences.jpg", dpi=600)
_____no_output_____
MIT
playground_baseline_dictionary_various_training_snapshots_various_noise_interferences_random.ipynb
dung-n-tran/speech-enhancement-beamforming
Example of a Camera Display to HTML5 via MPLD3
%matplotlib inline import matplotlib.pylab as plt from ctapipe.instrument import CameraGeometry from ctapipe.visualization import CameraDisplay from ctapipe.image import toymodel from ctapipe.image import hillas_parameters, tailcuts_clean import numpy as np
_____no_output_____
BSD-3-Clause
examples/notebooks/camera_display.ipynb
orelgueta/ctapipe
Just a quick function to mark a pixel and draw lines to its neighbors
def draw_neighbors(geom, pixel_index, color='r', **kwargs): """ draw lines between a pixel and its neighbors""" neigh = geom.neighbors[pixel_index] # neighbor indices (not pixel ids) x, y = geom.pix_x[pixel_index].value, geom.pix_y[pixel_index].value for nn in neigh: nx, ny = geom.pix_x[nn].value, geom.pix_y[nn].value plt.plot([x, nx], [y, ny], color=color, **kwargs)
_____no_output_____
BSD-3-Clause
examples/notebooks/camera_display.ipynb
orelgueta/ctapipe
Now, let's create a fake Cherenkov image from a given `CameraGeometry` and fill it with some data:
# get the HESS demo camera geometry geom = CameraGeometry.from_name("NectarCam") # create a fake camera image to display: model = toymodel.generate_2d_shower_model(centroid=(0.2, 0.0), width=0.01, length=0.1, psi='35d') image, sig, bg = toymodel.make_toymodel_shower_image(geom, model.pdf, intensity=50, nsb_level_pe=1000) # apply really stupid image cleaning (single threshold): mask = tailcuts_clean(geom, image, 10, 100) # calculate image parameters hillas = hillas_parameters(geom.pix_x.value[mask], geom.pix_y.value[mask], image[mask]) # show the camera image and overlay Hillas ellipse disp = CameraDisplay(geom) disp.set_limits_minmax(0, 300) disp.add_colorbar() disp.image = image disp.overlay_moments(hillas, color='grey', linewidth=3,zorder=10) #disp.highlight_pixels(mask) # draw the neighbors of pixel 100 in red, and the # neighbor-neighbors in green for ii in geom.neighbors[130]: draw_neighbors(geom, ii, color='green') draw_neighbors(geom, 130, color='red',lw=2)
_____no_output_____
BSD-3-Clause
examples/notebooks/camera_display.ipynb
orelgueta/ctapipe
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.png) Automated Machine Learning_**Orange Juice Sales Forecasting**_ Contents1. [Introduction](Introduction)1. [Setup](Setup)1. [Compute](Compute)1. [Data](Data)1. [Train](Train)1. [Predict](Predict)1. [Operationalize](Operationalize) IntroductionIn this example, we use AutoML to train, select, and operationalize a time-series forecasting model for multiple time-series.Make sure you have executed the [configuration notebook](../../../configuration.ipynb) before running this notebook.The examples in the follow code samples use the University of Chicago's Dominick's Finer Foods dataset to forecast orange juice sales. Dominick's was a grocery chain in the Chicago metropolitan area. Setup
import azureml.core import pandas as pd import numpy as np import logging from azureml.core.workspace import Workspace from azureml.core.experiment import Experiment from azureml.train.automl import AutoMLConfig from azureml.automl.core.featurization import FeaturizationConfig
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
This sample notebook may use features that are not available in previous versions of the Azure ML SDK.
print("This notebook was created using version 1.19.0 of the Azure ML SDK") print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
As part of the setup you have already created a Workspace. To run AutoML, you also need to create an Experiment. An Experiment corresponds to a prediction problem you are trying to solve, while a Run corresponds to a specific approach to the problem.
ws = Workspace.from_config() # choose a name for the run history container in the workspace experiment_name = 'automl-ojforecasting' experiment = Experiment(ws, experiment_name) output = {} output['Subscription ID'] = ws.subscription_id output['Workspace'] = ws.name output['SKU'] = ws.sku output['Resource Group'] = ws.resource_group output['Location'] = ws.location output['Run History Name'] = experiment_name pd.set_option('display.max_colwidth', -1) outputDf = pd.DataFrame(data = output, index = ['']) outputDf.T
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
ComputeYou will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targetsamlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource. Creation of AmlCompute takes approximately 5 minutes. If the AmlCompute with that name is already in your workspace this code will skip the creation process.As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this article on the default limits and how to request more quota.
from azureml.core.compute import ComputeTarget, AmlCompute from azureml.core.compute_target import ComputeTargetException # Choose a name for your CPU cluster amlcompute_cluster_name = "oj-cluster" # Verify that cluster does not exist already try: compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name) print('Found existing cluster, use it.') except ComputeTargetException: compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2', max_nodes=6) compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config) compute_target.wait_for_completion(show_output=True)
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
DataYou are now ready to load the historical orange juice sales data. We will load the CSV file into a plain pandas DataFrame; the time column in the CSV is called _WeekStarting_, so it will be specially parsed into the datetime type.
time_column_name = 'WeekStarting' data = pd.read_csv("dominicks_OJ.csv", parse_dates=[time_column_name]) data.head()
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
Each row in the DataFrame holds a quantity of weekly sales for an OJ brand at a single store. The data also includes the sales price, a flag indicating if the OJ brand was advertised in the store that week, and some customer demographic information based on the store location. For historical reasons, the data also include the logarithm of the sales quantity. The Dominick's grocery data is commonly used to illustrate econometric modeling techniques where logarithms of quantities are generally preferred. The task is now to build a time-series model for the _Quantity_ column. It is important to note that this dataset is comprised of many individual time-series - one for each unique combination of _Store_ and _Brand_. To distinguish the individual time-series, we define the **time_series_id_column_names** - the columns whose values determine the boundaries between time-series:
time_series_id_column_names = ['Store', 'Brand'] nseries = data.groupby(time_series_id_column_names).ngroups print('Data contains {0} individual time-series.'.format(nseries))
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
For demonstration purposes, we extract sales time-series for just a few of the stores:
use_stores = [2, 5, 8] data_subset = data[data.Store.isin(use_stores)] nseries = data_subset.groupby(time_series_id_column_names).ngroups print('Data subset contains {0} individual time-series.'.format(nseries))
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
Data SplittingWe now split the data into a training and a testing set for later forecast evaluation. The test set will contain the final 20 weeks of observed sales for each time-series. The splits should be stratified by series, so we use a group-by statement on the time series identifier columns.
n_test_periods = 20 def split_last_n_by_series_id(df, n): """Group df by series identifiers and split on last n rows for each group.""" df_grouped = (df.sort_values(time_column_name) # Sort by ascending time .groupby(time_series_id_column_names, group_keys=False)) df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-n]) df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-n:]) return df_head, df_tail train, test = split_last_n_by_series_id(data_subset, n_test_periods)
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
Upload data to datastoreThe [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace), is paired with the storage account, which contains the default data store. We will use it to upload the train and test data and create [tabular datasets](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training and testing. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation.
train.to_csv (r'./dominicks_OJ_train.csv', index = None, header=True) test.to_csv (r'./dominicks_OJ_test.csv', index = None, header=True) datastore = ws.get_default_datastore() datastore.upload_files(files = ['./dominicks_OJ_train.csv', './dominicks_OJ_test.csv'], target_path = 'dataset/', overwrite = True,show_progress = True)
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
Create dataset for training
from azureml.core.dataset import Dataset train_dataset = Dataset.Tabular.from_delimited_files(path=datastore.path('dataset/dominicks_OJ_train.csv')) train_dataset.to_pandas_dataframe().tail()
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
ModelingFor forecasting tasks, AutoML uses pre-processing and estimation steps that are specific to time-series. AutoML will undertake the following pre-processing steps:* Detect time-series sample frequency (e.g. hourly, daily, weekly) and create new records for absent time points to make the series regular. A regular time series has a well-defined frequency and has a value at every sample point in a contiguous time span * Impute missing values in the target (via forward-fill) and feature columns (using median column values) * Create features based on time series identifiers to enable fixed effects across different series* Create time-based features to assist in learning seasonal patterns* Encode categorical variables to numeric quantitiesIn this notebook, AutoML will train a single, regression-type model across **all** time-series in a given training set. This allows the model to generalize across related series. If you're looking for training multiple models for different time-series, please see the many-models notebook.You are almost ready to start an AutoML training job. First, we need to separate the target column from the rest of the DataFrame:
target_column_name = 'Quantity'
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
CustomizationThe featurization customization in forecasting is an advanced feature in AutoML which allows our customers to change the default forecasting featurization behaviors and column types through `FeaturizationConfig`. The supported scenarios include:1. Column purposes update: Override feature type for the specified column. Currently supports DateTime, Categorical and Numeric. This customization can be used in the scenario that the type of the column cannot correctly reflect its purpose. Some numerical columns, for instance, can be treated as Categorical columns which need to be converted to categorical while some can be treated as epoch timestamp which need to be converted to datetime. To tell our SDK to correctly preprocess these columns, a configuration need to be add with the columns and their desired types.2. Transformer parameters update: Currently supports parameter change for Imputer only. User can customize imputation methods. The supported imputing methods for target column are constant and ffill (forward fill). The supported imputing methods for feature columns are mean, median, most frequent, constant and ffill (forward fill). This customization can be used for the scenario that our customers know which imputation methods fit best to the input data. For instance, some datasets use NaN to represent 0 which the correct behavior should impute all the missing value with 0. To achieve this behavior, these columns need to be configured as constant imputation with `fill_value` 0.3. Drop columns: Columns to drop from being featurized. These usually are the columns which are leaky or the columns contain no useful data.
featurization_config = FeaturizationConfig() featurization_config.drop_columns = ['logQuantity'] # 'logQuantity' is a leaky feature, so we remove it. # Force the CPWVOL5 feature to be numeric type. featurization_config.add_column_purpose('CPWVOL5', 'Numeric') # Fill missing values in the target column, Quantity, with zeros. featurization_config.add_transformer_params('Imputer', ['Quantity'], {"strategy": "constant", "fill_value": 0}) # Fill missing values in the INCOME column with median value. featurization_config.add_transformer_params('Imputer', ['INCOME'], {"strategy": "median"}) # Fill missing values in the Price column with forward fill (last value carried forward). featurization_config.add_transformer_params('Imputer', ['Price'], {"strategy": "ffill"})
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
Forecasting ParametersTo define forecasting parameters for your experiment training, you can leverage the ForecastingParameters class. The table below details the forecasting parameter we will be passing into our experiment.|Property|Description||-|-||**time_column_name**|The name of your time column.||**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).||**time_series_id_column_names**|The column names used to uniquely identify the time series in data that has multiple rows with the same timestamp. If the time series identifiers are not defined, the data set is assumed to be one time series.| TrainThe [AutoMLConfig](https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client/azureml.train.automl.automlconfig.automlconfig?view=azure-ml-py) object defines the settings and data for an AutoML training job. Here, we set necessary inputs like the task type, the number of AutoML iterations to try, the training data, and cross-validation parameters.For forecasting tasks, there are some additional parameters that can be set in the `ForecastingParameters` class: the name of the column holding the date/time, the timeseries id column names, and the maximum forecast horizon. A time column is required for forecasting, while the time_series_id is optional. If time_series_id columns are not given, AutoML assumes that the whole dataset is a single time-series. We also pass a list of columns to drop prior to modeling. The _logQuantity_ column is completely correlated with the target quantity, so it must be removed to prevent a target leak.The forecast horizon is given in units of the time-series frequency; for instance, the OJ series frequency is weekly, so a horizon of 20 means that a trained model will estimate sales up to 20 weeks beyond the latest date in the training data for each series. In this example, we set the forecast horizon to the number of samples per series in the test set (n_test_periods). Generally, the value of this parameter will be dictated by business needs. For example, a demand planning application that estimates the next month of sales should set the horizon according to suitable planning time-scales. Please see the [energy_demand notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand) for more discussion of forecast horizon.We note here that AutoML can sweep over two types of time-series models:* Models that are trained for each series such as ARIMA and Facebook's Prophet.* Models trained across multiple time-series using a regression approach.In the first case, AutoML loops over all time-series in your dataset and trains one model (e.g. AutoArima or Prophet, as the case may be) for each series. This can result in long runtimes to train these models if there are a lot of series in the data. One way to mitigate this problem is to fit models for different series in parallel if you have multiple compute cores available. To enable this behavior, set the `max_cores_per_iteration` parameter in your AutoMLConfig as shown in the example in the next cell. Finally, a note about the cross-validation (CV) procedure for time-series data. AutoML uses out-of-sample error estimates to select a best pipeline/model, so it is important that the CV fold splitting is done correctly. Time-series can violate the basic statistical assumptions of the canonical K-Fold CV strategy, so AutoML implements a [rolling origin validation](https://robjhyndman.com/hyndsight/tscv/) procedure to create CV folds for time-series data. To use this procedure, you just need to specify the desired number of CV folds in the AutoMLConfig object. It is also possible to bypass CV and use your own validation set by setting the *validation_data* parameter of AutoMLConfig.Here is a summary of AutoMLConfig parameters used for training the OJ model:|Property|Description||-|-||**task**|forecasting||**primary_metric**|This is the metric that you want to optimize. Forecasting supports the following primary metrics spearman_correlationnormalized_root_mean_squared_errorr2_scorenormalized_mean_absolute_error|**experiment_timeout_hours**|Experimentation timeout in hours.||**enable_early_stopping**|If early stopping is on, training will stop when the primary metric is no longer improving.||**training_data**|Input dataset, containing both features and label column.||**label_column_name**|The name of the label column.||**compute_target**|The remote compute for training.||**n_cross_validations**|Number of cross-validation folds to use for model/pipeline selection||**enable_voting_ensemble**|Allow AutoML to create a Voting ensemble of the best performing models||**enable_stack_ensemble**|Allow AutoML to create a Stack ensemble of the best performing models||**debug_log**|Log file path for writing debugging information||**featurization**| 'auto' / 'off' / FeaturizationConfig Indicator for whether featurization step should be done automatically or not, or whether customized featurization should be used. Setting this enables AutoML to perform featurization on the input to handle *missing data*, and to perform some common *feature extraction*.||**max_cores_per_iteration**|Maximum number of cores to utilize per iteration. A value of -1 indicates all available cores should be used
from azureml.automl.core.forecasting_parameters import ForecastingParameters forecasting_parameters = ForecastingParameters( time_column_name=time_column_name, forecast_horizon=n_test_periods, time_series_id_column_names=time_series_id_column_names ) automl_config = AutoMLConfig(task='forecasting', debug_log='automl_oj_sales_errors.log', primary_metric='normalized_mean_absolute_error', experiment_timeout_hours=0.25, training_data=train_dataset, label_column_name=target_column_name, compute_target=compute_target, enable_early_stopping=True, featurization=featurization_config, n_cross_validations=3, verbosity=logging.INFO, max_cores_per_iteration=-1, forecasting_parameters=forecasting_parameters)
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
You can now submit a new training run. Depending on the data and number of iterations this operation may take several minutes.Information from each iteration will be printed to the console. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous.
remote_run = experiment.submit(automl_config, show_output=False) remote_run remote_run.wait_for_completion()
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
Retrieve the Best ModelEach run within an Experiment stores serialized (i.e. pickled) pipelines from the AutoML iterations. We can now retrieve the pipeline with the best performance on the validation dataset:
best_run, fitted_model = remote_run.get_output() print(fitted_model.steps) model_name = best_run.properties['model_name']
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
TransparencyView updated featurization summary
custom_featurizer = fitted_model.named_steps['timeseriestransformer'] custom_featurizer.get_featurization_summary()
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
ForecastingNow that we have retrieved the best pipeline/model, it can be used to make predictions on test data. First, we remove the target values from the test set:
X_test = test y_test = X_test.pop(target_column_name).values X_test.head()
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
To produce predictions on the test set, we need to know the feature values at all dates in the test set. This requirement is somewhat reasonable for the OJ sales data since the features mainly consist of price, which is usually set in advance, and customer demographics which are approximately constant for each store over the 20 week forecast horizon in the testing data.
# forecast returns the predictions and the featurized data, aligned to X_test. # This contains the assumptions that were made in the forecast y_predictions, X_trans = fitted_model.forecast(X_test)
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
If you are used to scikit pipelines, perhaps you expected `predict(X_test)`. However, forecasting requires a more general interface that also supplies the past target `y` values. Please use `forecast(X,y)` as `predict(X)` is reserved for internal purposes on forecasting models.The [forecast function notebook](../forecasting-forecast-function/auto-ml-forecasting-function.ipynb). EvaluateTo evaluate the accuracy of the forecast, we'll compare against the actual sales quantities for some select metrics, included the mean absolute percentage error (MAPE). We'll add predictions and actuals into a single dataframe for convenience in calculating the metrics.
assign_dict = {'predicted': y_predictions, target_column_name: y_test} df_all = X_test.assign(**assign_dict) from azureml.automl.core.shared import constants from azureml.automl.runtime.shared.score import scoring from matplotlib import pyplot as plt # use automl scoring module scores = scoring.score_regression( y_test=df_all[target_column_name], y_pred=df_all['predicted'], metrics=list(constants.Metric.SCALAR_REGRESSION_SET)) print("[Test data scores]\n") for key, value in scores.items(): print('{}: {:.3f}'.format(key, value)) # Plot outputs %matplotlib inline test_pred = plt.scatter(df_all[target_column_name], df_all['predicted'], color='b') test_test = plt.scatter(df_all[target_column_name], df_all[target_column_name], color='g') plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8) plt.show()
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
Operationalize _Operationalization_ means getting the model into the cloud so that other can run it after you close the notebook. We will create a docker running on Azure Container Instances with the model.
description = 'AutoML OJ forecaster' tags = None model = remote_run.register_model(model_name = model_name, description = description, tags = tags) print(remote_run.model_id)
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
Develop the scoring scriptFor the deployment we need a function which will run the forecast on serialized data. It can be obtained from the best_run.
script_file_name = 'score_fcast.py' best_run.download_file('outputs/scoring_file_v_1_0_0.py', script_file_name)
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
Deploy the model as a Web Service on Azure Container Instance
from azureml.core.model import InferenceConfig from azureml.core.webservice import AciWebservice from azureml.core.webservice import Webservice from azureml.core.model import Model inference_config = InferenceConfig(environment = best_run.get_environment(), entry_script = script_file_name) aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 2, tags = {'type': "automl-forecasting"}, description = "Automl forecasting sample service") aci_service_name = 'automl-oj-forecast-01' print(aci_service_name) aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig) aci_service.wait_for_deployment(True) print(aci_service.state) aci_service.get_logs()
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
Call the service
import json X_query = X_test.copy() # We have to convert datetime to string, because Timestamps cannot be serialized to JSON. X_query[time_column_name] = X_query[time_column_name].astype(str) # The Service object accept the complex dictionary, which is internally converted to JSON string. # The section 'data' contains the data frame in the form of dictionary. test_sample = json.dumps({'data': X_query.to_dict(orient='records')}) response = aci_service.run(input_data = test_sample) # translate from networkese to datascientese try: res_dict = json.loads(response) y_fcst_all = pd.DataFrame(res_dict['index']) y_fcst_all[time_column_name] = pd.to_datetime(y_fcst_all[time_column_name], unit = 'ms') y_fcst_all['forecast'] = res_dict['forecast'] except: print(res_dict) y_fcst_all.head()
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
Delete the web service if desired
serv = Webservice(ws, 'automl-oj-forecast-01') serv.delete() # don't do it accidentally
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
zronaghi/MachineLearningNotebooks
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing.png) Automated Machine Learning_**Classification with Deployment using a Bank Marketing Dataset**_ Contents1. [Introduction](Introduction)1. [Setup](Setup)1. [Train](Train)1. [Results](Results)1. [Deploy](Deploy)1. [Test](Test)1. [Acknowledgements](Acknowledgements) IntroductionIn this example we use the UCI Bank Marketing dataset to showcase how you can use AutoML for a classification problem and deploy it to an Azure Container Instance (ACI). The classification goal is to predict if the client will subscribe to a term deposit with the bank.If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. Please find the ONNX related documentations [here](https://github.com/onnx/onnx).In this notebook you will learn how to:1. Create an experiment using an existing workspace.2. Configure AutoML using `AutoMLConfig`.3. Train the model using local compute with ONNX compatible config on.4. Explore the results, featurization transparency options and save the ONNX model5. Inference with the ONNX model.6. Register the model.7. Create a container image.8. Create an Azure Container Instance (ACI) service.9. Test the ACI service.In addition this notebook showcases the following features- **Blacklisting** certain pipelines- Specifying **target metrics** to indicate stopping criteria- Handling **missing data** in the input SetupAs part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments.
import logging from matplotlib import pyplot as plt import pandas as pd import os import azureml.core from azureml.core.experiment import Experiment from azureml.core.workspace import Workspace from azureml.automl.core.featurization import FeaturizationConfig from azureml.core.dataset import Dataset from azureml.train.automl import AutoMLConfig from azureml.explain.model._internal.explanation_client import ExplanationClient
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
This sample notebook may use features that are not available in previous versions of the Azure ML SDK.
print("This notebook was created using version 1.8.0 of the Azure ML SDK") print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Accessing the Azure ML workspace requires authentication with Azure.The default authentication is interactive authentication using the default tenant. Executing the `ws = Workspace.from_config()` line in the cell below will prompt for authentication the first time that it is run.If you have multiple Azure tenants, you can specify the tenant by replacing the `ws = Workspace.from_config()` line in the cell below with the following:```from azureml.core.authentication import InteractiveLoginAuthenticationauth = InteractiveLoginAuthentication(tenant_id = 'mytenantid')ws = Workspace.from_config(auth = auth)```If you need to run in an environment where interactive login is not possible, you can use Service Principal authentication by replacing the `ws = Workspace.from_config()` line in the cell below with the following:```from azureml.core.authentication import ServicePrincipalAuthenticationauth = auth = ServicePrincipalAuthentication('mytenantid', 'myappid', 'mypassword')ws = Workspace.from_config(auth = auth)```For more details, see [aka.ms/aml-notebook-auth](http://aka.ms/aml-notebook-auth)
ws = Workspace.from_config() # choose a name for experiment experiment_name = 'automl-classification-bmarketing-all' experiment=Experiment(ws, experiment_name) output = {} output['Subscription ID'] = ws.subscription_id output['Workspace'] = ws.name output['Resource Group'] = ws.resource_group output['Location'] = ws.location output['Experiment Name'] = experiment.name pd.set_option('display.max_colwidth', -1) outputDf = pd.DataFrame(data = output, index = ['']) outputDf.T
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Create or Attach existing AmlComputeYou will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource. Creation of AmlCompute takes approximately 5 minutes. If the AmlCompute with that name is already in your workspace this code will skip the creation process.As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this article on the default limits and how to request more quota.
from azureml.core.compute import ComputeTarget, AmlCompute from azureml.core.compute_target import ComputeTargetException # Choose a name for your CPU cluster cpu_cluster_name = "cpu-cluster-4" # Verify that cluster does not exist already try: compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name) print('Found existing cluster, use it.') except ComputeTargetException: compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2', max_nodes=6) compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config) compute_target.wait_for_completion(show_output=True)
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Data Load DataLeverage azure compute to load the bank marketing dataset as a Tabular Dataset into the dataset variable. Training Data
data = pd.read_csv("https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv") data.head() # Add missing values in 75% of the lines. import numpy as np missing_rate = 0.75 n_missing_samples = int(np.floor(data.shape[0] * missing_rate)) missing_samples = np.hstack((np.zeros(data.shape[0] - n_missing_samples, dtype=np.bool), np.ones(n_missing_samples, dtype=np.bool))) rng = np.random.RandomState(0) rng.shuffle(missing_samples) missing_features = rng.randint(0, data.shape[1], n_missing_samples) data.values[np.where(missing_samples)[0], missing_features] = np.nan if not os.path.isdir('data'): os.mkdir('data') # Save the train data to a csv to be uploaded to the datastore pd.DataFrame(data).to_csv("data/train_data.csv", index=False) ds = ws.get_default_datastore() ds.upload(src_dir='./data', target_path='bankmarketing', overwrite=True, show_progress=True) # Upload the training data as a tabular dataset for access during training on remote compute train_data = Dataset.Tabular.from_delimited_files(path=ds.path('bankmarketing/train_data.csv')) label = "y"
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Validation Data
validation_data = "https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_validate.csv" validation_dataset = Dataset.Tabular.from_delimited_files(validation_data)
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Test Data
test_data = "https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_test.csv" test_dataset = Dataset.Tabular.from_delimited_files(test_data)
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
TrainInstantiate a AutoMLConfig object. This defines the settings and data used to run the experiment.|Property|Description||-|-||**task**|classification or regression or forecasting||**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: accuracyAUC_weightedaverage_precision_score_weightednorm_macro_recallprecision_score_weighted||**iteration_timeout_minutes**|Time limit in minutes for each iteration.||**blacklist_models** | *List* of *strings* indicating machine learning algorithms for AutoML to avoid in this run. Allowed values for **Classification**LogisticRegressionSGDMultinomialNaiveBayesBernoulliNaiveBayesSVMLinearSVMKNNDecisionTreeRandomForestExtremeRandomTreesLightGBMGradientBoostingTensorFlowDNNTensorFlowLinearClassifierAllowed values for **Regression**ElasticNetGradientBoostingDecisionTreeKNNLassoLarsSGDRandomForestExtremeRandomTreesLightGBMTensorFlowLinearRegressorTensorFlowDNNAllowed values for **Forecasting**ElasticNetGradientBoostingDecisionTreeKNNLassoLarsSGDRandomForestExtremeRandomTreesLightGBMTensorFlowLinearRegressorTensorFlowDNNArimaProphet|| **whitelist_models** | *List* of *strings* indicating machine learning algorithms for AutoML to use in this run. Same values listed above for **blacklist_models** allowed for **whitelist_models**.||**experiment_exit_score**| Value indicating the target for *primary_metric*. Once the target is surpassed the run terminates.||**experiment_timeout_hours**| Maximum amount of time in hours that all iterations combined can take before the experiment terminates.||**enable_early_stopping**| Flag to enble early termination if the score is not improving in the short term.||**featurization**| 'auto' / 'off' Indicator for whether featurization step should be done automatically or not. Note: If the input data is sparse, featurization cannot be turned on.||**n_cross_validations**|Number of cross validation splits.||**training_data**|Input dataset, containing both features and label column.||**label_column_name**|The name of the label column.|**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-trainprimary-metric)
automl_settings = { "experiment_timeout_hours" : 0.3, "enable_early_stopping" : True, "iteration_timeout_minutes": 5, "max_concurrent_iterations": 4, "max_cores_per_iteration": -1, #"n_cross_validations": 2, "primary_metric": 'AUC_weighted', "featurization": 'auto', "verbosity": logging.INFO, } automl_config = AutoMLConfig(task = 'classification', debug_log = 'automl_errors.log', compute_target=compute_target, experiment_exit_score = 0.9984, blacklist_models = ['KNN','LinearSVM'], enable_onnx_compatible_models=True, training_data = train_data, label_column_name = label, validation_data = validation_dataset, **automl_settings )
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while.
remote_run = experiment.submit(automl_config, show_output = False) remote_run
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Run the following cell to access previous runs. Uncomment the cell below and update the run_id.
#from azureml.train.automl.run import AutoMLRun #remote_run = AutoMLRun(experiment=experiment, run_id='<run_ID_goes_here') #remote_run # Wait for the remote run to complete remote_run.wait_for_completion() best_run_customized, fitted_model_customized = remote_run.get_output()
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
TransparencyView updated featurization summary
custom_featurizer = fitted_model_customized.named_steps['datatransformer'] df = custom_featurizer.get_featurization_summary() pd.DataFrame(data=df)
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Set `is_user_friendly=False` to get a more detailed summary for the transforms being applied.
df = custom_featurizer.get_featurization_summary(is_user_friendly=False) pd.DataFrame(data=df) df = custom_featurizer.get_stats_feature_type_summary() pd.DataFrame(data=df)
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Results
from azureml.widgets import RunDetails RunDetails(remote_run).show()
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Retrieve the Best Model's explanationRetrieve the explanation from the best_run which includes explanations for engineered features and raw features. Make sure that the run for generating explanations for the best model is completed.
# Wait for the best model explanation run to complete from azureml.core.run import Run model_explainability_run_id = remote_run.get_properties().get('ModelExplainRunId') print(model_explainability_run_id) if model_explainability_run_id is not None: model_explainability_run = Run(experiment=experiment, run_id=model_explainability_run_id) model_explainability_run.wait_for_completion() # Get the best run object best_run, fitted_model = remote_run.get_output()
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Download engineered feature importance from artifact storeYou can use ExplanationClient to download the engineered feature explanations from the artifact store of the best_run.
client = ExplanationClient.from_run(best_run) engineered_explanations = client.download_model_explanation(raw=False) exp_data = engineered_explanations.get_feature_importance_dict() exp_data
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Download raw feature importance from artifact storeYou can use ExplanationClient to download the raw feature explanations from the artifact store of the best_run.
client = ExplanationClient.from_run(best_run) engineered_explanations = client.download_model_explanation(raw=True) exp_data = engineered_explanations.get_feature_importance_dict() exp_data
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Retrieve the Best ONNX ModelBelow we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. The Model includes the pipeline and any pre-processing. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*.Set the parameter return_onnx_model=True to retrieve the best ONNX model, instead of the Python model.
best_run, onnx_mdl = remote_run.get_output(return_onnx_model=True)
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Save the best ONNX model
from azureml.automl.runtime.onnx_convert import OnnxConverter onnx_fl_path = "./best_model.onnx" OnnxConverter.save_onnx_model(onnx_mdl, onnx_fl_path)
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Predict with the ONNX model, using onnxruntime package
import sys import json from azureml.automl.core.onnx_convert import OnnxConvertConstants from azureml.train.automl import constants if sys.version_info < OnnxConvertConstants.OnnxIncompatiblePythonVersion: python_version_compatible = True else: python_version_compatible = False import onnxruntime from azureml.automl.runtime.onnx_convert import OnnxInferenceHelper def get_onnx_res(run): res_path = 'onnx_resource.json' run.download_file(name=constants.MODEL_RESOURCE_PATH_ONNX, output_file_path=res_path) with open(res_path) as f: onnx_res = json.load(f) return onnx_res if python_version_compatible: test_df = test_dataset.to_pandas_dataframe() mdl_bytes = onnx_mdl.SerializeToString() onnx_res = get_onnx_res(best_run) onnxrt_helper = OnnxInferenceHelper(mdl_bytes, onnx_res) pred_onnx, pred_prob_onnx = onnxrt_helper.predict(test_df) print(pred_onnx) print(pred_prob_onnx) else: print('Please use Python version 3.6 or 3.7 to run the inference helper.')
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Deploy Retrieve the Best ModelBelow we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*. Widget for Monitoring RunsThe widget will first report a "loading" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details
best_run, fitted_model = remote_run.get_output() model_name = best_run.properties['model_name'] script_file_name = 'inference/score.py' conda_env_file_name = 'inference/env.yml' best_run.download_file('outputs/scoring_file_v_1_0_0.py', 'inference/score.py') best_run.download_file('outputs/conda_env_v_1_0_0.yml', 'inference/env.yml')
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Register the Fitted Model for DeploymentIf neither `metric` nor `iteration` are specified in the `register_model` call, the iteration with the best primary metric is registered.
description = 'AutoML Model trained on bank marketing data to predict if a client will subscribe to a term deposit' tags = None model = remote_run.register_model(model_name = model_name, description = description, tags = tags) print(remote_run.model_id) # This will be written to the script file later in the notebook.
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Deploy the model as a Web Service on Azure Container Instance
from azureml.core.model import InferenceConfig from azureml.core.webservice import AciWebservice from azureml.core.webservice import Webservice from azureml.core.model import Model from azureml.core.environment import Environment myenv = Environment.from_conda_specification(name="myenv", file_path=conda_env_file_name) inference_config = InferenceConfig(entry_script=script_file_name, environment=myenv) aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 1, tags = {'area': "bmData", 'type': "automl_classification"}, description = 'sample service for Automl Classification') aci_service_name = 'automl-sample-bankmarketing-all' print(aci_service_name) aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig) aci_service.wait_for_deployment(True) print(aci_service.state)
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Delete a Web ServiceDeletes the specified web service.
#aci_service.delete()
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Get Logs from a Deployed Web ServiceGets logs from a deployed web service.
#aci_service.get_logs()
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
TestNow that the model is trained, run the test data through the trained model to get the predicted values.
# Load the bank marketing datasets. from numpy import array X_test = test_dataset.drop_columns(columns=['y']) y_test = test_dataset.keep_columns(columns=['y'], validate=True) test_dataset.take(5).to_pandas_dataframe() X_test = X_test.to_pandas_dataframe() y_test = y_test.to_pandas_dataframe() y_pred = fitted_model.predict(X_test) actual = array(y_test) actual = actual[:,0] print(y_pred.shape, " ", actual.shape)
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Calculate metrics for the predictionNow visualize the data on a scatter plot to show what our truth (actual) values are compared to the predicted values from the trained model that was returned.
%matplotlib notebook test_pred = plt.scatter(actual, y_pred, color='b') test_test = plt.scatter(actual, actual, color='g') plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8) plt.show()
_____no_output_____
MIT
how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
hyoshioka0128/MachineLearningNotebooks
Lesson outlineIf you're familiar with NumPy (esp. the following operations), feel free to skim through this lesson.- Create a NumPy array: - from a pandas dataframe: [pandas.DataFrame.values](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.values.html) - from a Python sequence: [numpy.array](http://docs.scipy.org/doc/numpy/reference/generated/numpy.array.html) - with constant initial values: [numpy.ones, numpy.zeros](http://docs.scipy.org/doc/numpy/reference/generated/numpy.ones.html) - with random values: [numpy.random](http://docs.scipy.org/doc/numpy/reference/routines.random.html)- Access array attributes: [shape](http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.shape.html), [ndim](http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.ndim.html), [size](http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.size.html), [dtype](http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.dtype.html)- Compute statistics: [sum](http://docs.scipy.org/doc/numpy/reference/generated/numpy.sum.html), [min](http://docs.scipy.org/doc/numpy/reference/generated/numpy.min.html), [max](http://docs.scipy.org/doc/numpy/reference/generated/numpy.max.html), [mean](http://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html)- Carry out arithmetic operations: [add](http://docs.scipy.org/doc/numpy/reference/generated/numpy.add.html), [subtract](http://docs.scipy.org/doc/numpy/reference/generated/numpy.subtract.html), [multiply](http://docs.scipy.org/doc/numpy/reference/generated/numpy.multiply.html), [divide](http://docs.scipy.org/doc/numpy/reference/generated/numpy.divide.html)- Measure execution time: [time.time](https://docs.python.org/2/library/time.htmltime.time), [profile](https://docs.python.org/2/library/profile.html)- Manipulate array elements: [Using simple indices and slices](http://docs.scipy.org/doc/numpy/reference/arrays.indexing.htmlbasic-slicing-and-indexing), [integer arrays](http://docs.scipy.org/doc/numpy/reference/arrays.indexing.htmlinteger-array-indexing), [boolean arrays](http://docs.scipy.org/doc/numpy/reference/arrays.indexing.htmlboolean-array-indexing)
'''Creating NumPy arrays.''' import numpy as np def test_run(): # List to 1D array print np.array([2, 3, 4]) print '' #List of tuples to 2D array print np.array([(2, 3, 4), (5, 6, 7)]) if __name__ == '__main__': test_run() '''Arrays with initial values.''' import numpy as np def test_run(): # Empty array print np.empty(5) print np.empty((5,4)) #Arrays of 1s print np.ones((5,4)) if __name__ == '__main__': test_run() '''Specify the datatype.''' import numpy as np def test_run(): #Arrays of integers 1s print np.ones((5,4), dtype=np.int) if __name__ == '__main__': test_run() '''Generating random numbers.''' import numpy as np def test_run(): #Generate an anrray full of rando, numbers, uniformly sampled from [0.0, 1.0) print np.random.random((5,4)) # Pass in a size tuple print '' # Sample numbers from a Gaussian (normal) distribution print 'Standard Normal' print np.random.normal(size=(2, 3)) # "Standard normal" (mean =0, s.d. = 1) print '' print 'Standard Normal' print np.random.normal(50,10, size=(2, 3)) # Change mean to 50 and s.d. = 10 print '' #Random integers print 'A single integer' print np.random.randint(10) # A single integer in [0, 10) print '' print 'A single integer' print np.random.randint(0, 10) # Same as above, especifying [low, high) explicit print '' print '1d-array' print np.random.randint(0, 10, size = 5) # 5 random integers as a 1D array print '' print '2d-array' print np.random.randint(0, 10, size = (2, 3)) # 2x3 array of random integers if __name__ == '__main__': test_run() '''Array attributes.''' import numpy as np def test_run(): a = np.random.random((5,4)) # 5x4 array of random numbers print a print a.shape print a.shape[0] # Number of rows print a.shape[1] # Number of columns print len(a.shape) print a.size print a.dtype if __name__ == '__main__': test_run() '''Operations on arrays.''' import numpy as np def test_run(): a = np.random.randint(0,10, size = (5,4)) # 5x4 random integers in [0, 10) print 'Array:\n', a #Sum of all elements print 'Sum of all elements:', a.sum() #Iterate over rows, to compute sum of each column print 'Sum of each column:', a.sum(axis=0) #Iterate over columns, to compute sum of each row print 'Sum of each row:', a.sum(axis=1) #Statistics: min, max, mean (accross rows, cols, and overall) print 'Minimum of each column:\n', a.min(axis=0) print 'Maximum of each row:\n', a.min(axis=1) print 'Mean of all elements:\n', a.min() # Leave out axis arg. if __name__ == '__main__': test_run()
Array: [[0 9 4 4] [6 9 0 8] [8 1 4 8] [1 8 2 1] [2 3 1 5]] Sum of all elements: 84 Sum of each column: [17 30 11 26] Sum of each row: [17 23 21 12 11] Minimum of each column: [0 1 0 1] Maximum of each row: [0 0 1 1 1] Mean of all elements: 0
MIT
01-03_The_power_of_NumPy.ipynb
Collumbus/Machine_Learning_for_Trading-Udacity
--- Quiz: Locate Maximum Value
"""Locate maximum value.""" import numpy as np def get_max_index(a): """Return the index of the maximum value in given 1D array.""" return np.argmax(a) def test_run(): a = np.array([9, 6, 2, 3, 12, 14, 7, 10], dtype=np.int32) # 32-bit integer array print "Array:", a # Find the maximum and its index in array print "Maximum value:", a.max() print "Index of max.:", get_max_index(a) if __name__ == "__main__": test_run()
Array: [ 9 6 2 3 12 14 7 10] Maximum value: 14 Index of max.: 5
MIT
01-03_The_power_of_NumPy.ipynb
Collumbus/Machine_Learning_for_Trading-Udacity
---
'''Using time function.''' import numpy as np import time def test_run(): t1 = time.time() print 'ML4T' t2 = time.time() print 'The time taken by print statement is ', t2 - t1,'seconds' if __name__ == '__main__': test_run() '''How fast is NumPy.''' import numpy as np from time import time def how_long(func, *args): '''Execute funcion with given arguments, and measure execution time.''' t0 = time() result = func(*args) # All arguments are passed in as-is t1 = time() return result, t1- t0 def manual_mean(arr): '''Compute mean (average) of all elements in the given 2D array''' sum = 0 for i in xrange(0, arr.shape[0]): for j in xrange (0, arr.shape[1]): sum = sum + arr[i, j] return sum / arr.size def numpy_mean(arr): '''Compute mean (average) using NumPy''' return arr.mean() def test_run(): '''Function called by Test Run.''' nd1 = np.random.random((1000, 10000)) # Use a sufficiently large array #Time the two functions, retrieving results and execution times res_manual, t_manual = how_long(manual_mean, nd1) res_numpy, t_numpy = how_long(numpy_mean, nd1) print 'Manual: {:.6f} ({:.3f} secs.) vs NumPy: {:.6f} ({:.3f} secs.)'.format(res_manual, t_manual, res_numpy, t_numpy) #Make sure both give us the same answer (upto some precision) assert abs(res_manual - res_numpy) <= 10e-6, 'Results arenΒ΄t equal!' #Compute speedup speedup = t_manual / t_numpy print 'NumPy mean is', speedup, 'times faster than manual for loops.' if __name__ == '__main__': test_run() '''Accessing array elements.''' import numpy as np def test_run(): a = np.random.rand(5, 4) print 'Array:\n', a print'' #Accessing element at position (3, 2) element = a[3, 2] print 'Position (3, 2):\n', element print'' #Elements in defined range print 'Range (0, 1:3):\n', a[0, 1:3] print'' #Top-left corner print 'Top-left corner :\n', a[0:2, 0:2] print'' #Slicing #Note: Slice n:m:t specifies a range that starts at n, and stops before m, in steps of sizet print 'Slicing:', a[:, 0:3:2] if __name__ == '__main__': test_run() '''Modifying array elements.''' import numpy as np def test_run(): a = np.random.rand(5, 4) print 'Array:\n', a print'' #Assigning a value to aa particular location a[0, 0] = 1 print '\nModified (replaced one element):\n', a print'' #Assingning a single value to an entire row a[0, :] = 2 print '\nModified (replaced a row with a single value):\n', a print'' #Assingning a list to a column in an array a[:, 3] = [1, 2, 3, 4, 5] print '\nModified (replaced a column with a list):\n', a print'' if __name__ == '__main__': test_run() '''Indexing an array with another array.''' import numpy as np def test_run(): a = np.random.rand(5) #Accessing using list of indices indices = np.array([1, 1, 2, 3]) print a print a[indices] if __name__ == '__main__': test_run() '''Boolean or "mask" index arrays.''' import numpy as np def test_run(): a = np.array([(20, 25, 10, 23, 26, 32, 10, 5, 0), (0, 2, 50, 20, 0, 1, 28, 5, 0)]) print 'Array:\n', a print '' #Calculating mean mean = a.mean() print 'Mean:\n', mean print '' #Masking a[a<mean] = mean print 'Masking:\n', a if __name__ == '__main__': test_run() '''Arithmetic operations.''' import numpy as np def test_run(): a = np.array([(1, 2, 3, 4, 5), (10, 20, 30, 40, 50)]) print 'Original array a:\n', a print '' b = np.array([(100, 200, 300, 400, 500), (1, 2, 3, 4, 5)]) print 'Original array b:\n', b print '' #Multiply a by 2 mean = a.mean() print 'Multiply a by 2:\n', 2*a print '' #Divide a by 2 mean = a.mean() print 'Divide a by 2:\n', a/2.0 #Add the two arrays print '\nAdd a + b:\n', a + b #Multiply a and b print '\nMultiply a * b:\n', a * b #Divide a and b print '\nDivide a / b:\n', a / b if __name__ == '__main__': test_run()
Original array a: [[ 1 2 3 4 5] [10 20 30 40 50]] Original array b: [[100 200 300 400 500] [ 1 2 3 4 5]] Multiply a by 2: [[ 2 4 6 8 10] [ 20 40 60 80 100]] Divide a by 2: [[ 0.5 1. 1.5 2. 2.5] [ 5. 10. 15. 20. 25. ]] Add a + b: [[101 202 303 404 505] [ 11 22 33 44 55]] Multiply a * b: [[ 100 400 900 1600 2500] [ 10 40 90 160 250]] Divide a / b: [[ 0 0 0 0 0] [10 10 10 10 10]]
MIT
01-03_The_power_of_NumPy.ipynb
Collumbus/Machine_Learning_for_Trading-Udacity
Setup
%load_ext rpy2.ipython import os from json import loads as jloads from glob import glob import pandas as pd import datetime %%R library(gplots) library(ggplot2) library(ggthemes) library(reshape2) library(gridExtra) library(heatmap.plus) ascols = function(facs, pallette){ facs = facs[,1] ffacs = as.factor(as.character(facs)) n = length(unique(facs)) cols = pallette(n)[ffacs] } greyscale = function(n){ return(rev(gray.colors(n))) } def getsname(filename): return filename.split('/')[-1].split('.')[0] def readJSON(jsonf): return jloads(open(jsonf).read())
_____no_output_____
MIT
notebooks/basic_cap_figures.ipynb
Chandrima-04/gimmebio
Beta Diversity
obj = readJSON('results/olympiome.beta_diversity_stats.json.json') speciesRhoKraken = obj['species']['rho_proportionality']['kraken'] speciesRhoKrakenDF = pd.DataFrame(speciesRhoKraken) speciesJSDKraken = obj['species']['jensen_shannon_distance']['kraken'] speciesJSDKrakenDF = pd.DataFrame(speciesJSDKraken) %%R -i speciesRhoKrakenDF beta.df = as.matrix(speciesRhoKrakenDF) diag(beta.df) = NA heatmap.2(beta.df, trace='none', margins=c(8,8), ColSideColorsSize=3, KeyValueName="Rho Prop.", labCol=F, cexRow=0.8, dendrogram="both", density.info="histogram", col=greyscale) %%R -i speciesJSDKrakenDF beta.df = as.matrix(speciesJSDKrakenDF) diag(beta.df) = NA heatmap.2(beta.df, trace='none', margins=c(8,8), ColSideColorsSize=3, KeyValueName="Rho Prop.", labCol=F, cexRow=0.8, dendrogram="both", density.info="histogram", col=greyscale)
_____no_output_____
MIT
notebooks/basic_cap_figures.ipynb
Chandrima-04/gimmebio
AMR
amrclassfs = glob('results/*.resistome_amrs.classus.tsv') def parseF(fname): out = {} with open(fname) as f: f.readline() for line in f: tkns = line.strip().split('\t') out[tkns[1]] = int(tkns[2]) return out amrclass = {getsname(amrclassf): parseF(amrclassf) for amrclassf in amrclassfs} amrclass = pd.DataFrame(amrclass).fillna(0).transpose() amrclass.shape %%R -i amrclass amr.df = t(as.matrix(amrclass)) heatmap.2(amr.df, trace='none', margins=c(8,8), ColSideColorsSize=3, KeyValueName="Rho Prop.", cexCol=0.8, cexRow=0.8, dendrogram="both", density.info="histogram", col=greyscale)
_____no_output_____
MIT
notebooks/basic_cap_figures.ipynb
Chandrima-04/gimmebio
Virulence Factors
virfs = glob('results/*.vfdb_quantify.table.tsv') virs = {getsname(virf): pd.read_csv(virf).set_index('Unnamed: 0').transpose() for virf in virfs} virpan = pd.Panel(virs).transpose(2,0,1) #vrpkm = virpan['RPKM'].fillna(0).apply(pd.to_numeric) vrpkmg = virpan['RPKMG'].fillna(0).apply(pd.to_numeric) vrpkmghigh = vrpkmg.transpose().loc[vrpkmg.mean(axis=0) > 200] vrpkmghigh.shape %%R -i vrpkmghigh vir.df = as.matrix(vrpkmghigh) heatmap.2(vir.df, trace='none', margins=c(8,8), ColSideColorsSize=3, KeyValueName="Rho Prop.", cexCol=0.8, cexRow=0.8, dendrogram="both", density.info="histogram", col=greyscale)
_____no_output_____
MIT
notebooks/basic_cap_figures.ipynb
Chandrima-04/gimmebio
Virulence vs AMR
virlevels = vrpkmg.transpose().mean() amrlevels = amrclass.transpose().mean().loc[virlevels.index] %%R -i virlevels -i amrlevels df = cbind(virlevels, amrlevels) colnames(df) = c("virulence", "antimicrobial") df = as.data.frame(df) ggplot(df, aes(virulence, antimicrobial)) + geom_point() + geom_rug() + theme_tufte(ticks=F) + xlab("Total Virulence") + ylab("Total AMR") + theme(axis.title.x = element_text(vjust=-0.5), axis.title.y = element_text(vjust=1))
_____no_output_____
MIT
notebooks/basic_cap_figures.ipynb
Chandrima-04/gimmebio
Alpha Diversity
adivfs = glob('results/*.alpha_diversity_stats.json.json') adivs = {getsname(adivf): readJSON(adivf) for adivf in adivfs} chaoSpecies = {} shanSpecies = {} richSpecies = {} for sname, adiv in adivs.items(): chaoSpecies[sname] = adiv['kraken']['species']['chao1'] shanSpecies[sname] = adiv['kraken']['species']['shannon_index'] richSpecies[sname] = adiv['kraken']['species']['richness'] chaoSpeciesDF = pd.DataFrame(chaoSpecies).fillna(0) shanSpeciesDF = pd.DataFrame(shanSpecies).fillna(0) richSpeciesDF = pd.DataFrame(richSpecies).fillna(0) shanSpeciesDF.loc['500000'].sort_values()
_____no_output_____
MIT
notebooks/basic_cap_figures.ipynb
Chandrima-04/gimmebio
HMP Comparison
hmpfs = glob('results/*.hmp_site_dists.metaphlan2.json') def crunch(obj): out = {} for k, v in obj.items(): out[k] = sum(v) / len(v) return out hmps = {getsname(hmpf): crunch(readJSON(hmpf)) for hmpf in hmpfs} hmps = pd.DataFrame(hmps).transpose() %%R -i hmps hmp.df = melt(hmps) ggplot(hmp.df, aes(x=variable, y=value)) + theme_tufte() + geom_boxplot() + ylab('Cosine Similarity to HMP Sites') + xlab('Body Site')
_____no_output_____
MIT
notebooks/basic_cap_figures.ipynb
Chandrima-04/gimmebio
Taxonomy
krakfs = glob('results/*.kraken_taxonomy_profiling.mpa.mpa.tsv') def parseKrakF(krakf): out = {} with open(krakf) as kf: for line in kf: tkns = line.strip().split() taxa = tkns[0] if ('g__' in taxa) and ('s__' not in taxa): key = taxa.split('g__')[-1] out[key] = int(tkns[1]) return out def getTopN(vec, n): tups = vec.items() tups = sorted(tups, key=lambda x: -x[1]) out = {k: v for k, v in tups[:n]} return out krak10 = {getsname(krakf): getTopN(parseKrakF(krakf), 10) for krakf in krakfs} krak10 = pd.DataFrame(krak10).fillna(0).transpose() %%R -i krak10 krak.df = t(as.matrix(krak10)) krak.df = log(krak.df) krak.df[!is.finite(krak.df)] = 0 heatmap.2(krak.df, trace='none', margins=c(8,8), ColSideColorsSize=3, KeyValueName="Rho Prop.", cexCol=0.8, cexRow=0.7, dendrogram="both", density.info="histogram", col=greyscale)
_____no_output_____
MIT
notebooks/basic_cap_figures.ipynb
Chandrima-04/gimmebio
import re import string !pip install -U nltk import nltk nltk.download('punkt') nltk.download('stopwords') nltk.download('wordnet') from nltk.tokenize import sent_tokenize # Sentence Tokenizer from nltk.tokenize import word_tokenize # Word Tokenizer from nltk.corpus import stopwords from nltk.stem.porter import PorterStemmer from nltk.stem.wordnet import WordNetLemmatizer from nltk.probability import FreqDist from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer import matplotlib.pyplot as plt import pandas as pd import numpy as np
Requirement already up-to-date: nltk in /usr/local/lib/python3.6/dist-packages (3.4) Requirement already satisfied, skipping upgrade: singledispatch in /usr/local/lib/python3.6/dist-packages (from nltk) (3.4.0.3) Requirement already satisfied, skipping upgrade: six in /usr/local/lib/python3.6/dist-packages (from nltk) (1.11.0)
MIT
module2-Bag-of-Words/LS_DS_422_BOW_Assignment.ipynb
brit228/AB-Demo
1) (optional) Scrape 100 Job Listings that contain the title "Data Scientist" from indeed.comAt a minimum your final dataframe of job listings should contain- Job Title- Job Description
import requests from bs4 import BeautifulSoup addition = "" i = 0 data = [] while True: r = requests.get("https://www.indeed.com/jobs?q=data%20scientist&l=Boston,%20MA"+addition) soup = BeautifulSoup(r.text, 'html.parser') for card in soup.find_all('div', class_="jobsearch-SerpJobCard", attrs={"data-tn-component": "organicJob"}): try: d = {} d["Job Title"] = card.h2.a.text d["Company"] = card.find("span", class_="company").text.strip() d["Location"] = card.find("span", class_="location").text.strip() r2 = requests.get("https://www.indeed.com"+card.a["href"]) soup2 = BeautifulSoup(r2.text, 'html.parser') d["Job Description"] = "\n".join([a.text for a in soup2.find("div", class_="jobsearch-JobComponent-description icl-u-xs-mt--md").contents]) data.append(d) except: pass i += 10 print(i) addition = "&start={}".format(i) if len(data) > 100: break df = pd.DataFrame(data) df
Who we are Massachusetts Cannabis Research Laboratories (MCR Labs) is a rapidly growing, independent, and accredited cannabis testing and analytics laboratory. We are a team of researchers, analytical chemists, pharmaceutical scientists, technologists, and cannabis enthusiasts working to advance the medical and recreational cannabis industries by helping providers ensure the safety and effectiveness of their products. We are committed to proving the best possible services to our clients, building relationships and supporting our community. This is an exceptional career opportunity to join the team of a fast-paced company in a high-growth industry! Job Summary This position is responsible for the production of reports, extracting and analyzing output from internal software tool for client testing reports, and performing internal trend analysis. Primary Job Responsibilities: The position will be responsible for creating and running various reports from the internal software. Review and ensure all information on the reports is accurate and correct. Identify opportunities for process improvements in reporting and development, while providing value through insightful analytics and problem-solving. The reporting data analyst will be responsible for identifying data trends and performing data analysis. Timely and effectively communicate with coworkers and other internal departments any changes affecting client’s product. Present findings and make recommendations. Meet client deadlines for ad hoc reports and other requested information. In this dynamic role, the individual will coordinate reporting and act as the liaison with the external customers and internal team. Understand the purpose and content of requested reports, utilizing systems to create routine reports to meet clients’ needs, and audit and analyze data to verify quality and data integrity. Identify and research data anomalies. Primary Goal The primary goal of the Reporting Data Analyst is to prepare accurate client analytical testing reports. Review all reports for accuracy before sending to the client. Analyze and develop recommendations on cannabis laboratory reporting processes. About You Preferred Skills and Qualifications BS/BA degree in chemistry, computer science, physics, mathematics or a related fieldExperience working in a labKnowledge of knowledge of Chemistry, specifically ChromatographyKnowledge of Mass SpectrometryAbility to perform repetitive tasks with a high level of successVery high level of organization and attention to detail with a sense of self-directionAbility to read, analyze, and interpret technical dataWillingness to work and learnAbility to adapt and learn quicklyDesire to learn about product and industryHave advanced knowledge of Excel, macros, and functions to enhance current reportingStrong computer skillsAbility to work independently and communicate effectively in a team environmentEffective time management skills – this is a fast-paced environment and the successful candidate will be able to multi-task and change focus very quickly while maintaining their ability to be effective Do you have what it takes to be a part of an industry leading cannabis testing company? Come join the Green Rush! Job Type: Full-time Job Location: Framingham, MA 01701 Required education: Bachelor's Additional Requirements: Ability to work on weekends, must be fluent in English and authorized to work in US. MCR Labs is an equal opportunity employer and we value diversity at our company. We do not discriminate on the basis of race, religion, color, national origin, gender, sexual orientation, age, marital status, genetic information, veteran status, or disability status. All qualified applicants must be able to satisfy the Department of Homeland Security Form I-9 requirement, with the documents necessary to verify identity & employment authorization. Job Type: Full-time Experience: Data Analysis: 1 year (Required)Excel: 2 years (Required)Chromatography: 1 year (Required) Education: Bachelor's (Required) Benefits offered: Paid time offHealth insuranceDental insurance $68,809 a year TO APPLY: Interested candidates must fully complete the online MSO application and attach a cover letter and resume through the online application process. The online application process can be found under the β€˜Employment’ tab at www.middlesexsheriff.com. The Middlesex Sheriff’s Office (MSO) is seeking a qualified professional for the position of Data Integration Specialist / Research Analyst to work in the Administrative Offices of the MSO located in Medford, MA. The Data Integration Specialist / Research Analyst will work directly with individual departments and administration to coordinate, streamline, and visualize an array of data being gathered at the MSO. The Data Integration Specialist / Research Analyst will also be responsible for gathering, linking, and cleaning data that will be used for reporting requirements. The Data Integration Specialist / Research Analyst will also work with the Chief Financial Officer and Director of Fiscal Operations on grant funded projects, reporting requirements, and other budget-related initiatives. This is an exempt, full time, at-will employment position that reports to the Director of Fiscal Operations and Chief Financial Officer. Additional responsibilities include, but are not limited to, the following: Collecting, analyzing, interpreting and presenting quantitative and qualitative information; Designing and managing experiments, surveys and data collection; Designing data warehouse/reporting schemas that advance the MSO’s reporting needs; Collaborating with full-time and contractual staff to manage hardware and software systems (SQL, Excel); Providing analysis of data obtained in context, looking for patterns and forecasting trends to help make recommendations for funding and policy changes; Working with MSO staff to ensure that complex statistical concepts are explained efficiently and accurately; Preparing materials for submission to granting agencies and foundations. Working with other internal departments and outside public safety agencies to compile data and provide data visualizations for presentations; do one-off and repeatable Extract Transfer Load (ETL) work to support the MSO; Building and managing ETL processes for internal departments; Working closely with data analysts to build data integrations that quickly and effectively solve reporting needs; Other work duties as assigned by Director of Fiscal Operations and Chief Financial Officer. The individual selected for the position of Data Integration Specialist / Research Analyst must have the ability to exercise good judgment and focus on detail as required by the job. The individual selected shall also be cognizant of the confidential and sensitive nature of working in a law enforcement agency and must comply with all institutional rules regarding safety and security. Qualifications: Bachelor's degree in Accounting/Finance, Business/Public Administration or a closely related field preferred; Proven ability to gather, synthesize, and present current research and trends in criminal justice, healthcare, and social science. Mandatory proficiencies should include: Microsoft Word; Excel; Access; SQL query and data transformation skills, including development of Dashboards, Crystal Reports, and MS Access-based Reports. Microsoft SQL Server expertise a plus. Strong knowledge of industry research methodologies and techniques. Ability to work independently, as well as, and with other internal departments in a fast paced environment and execute close attention to detail; Strong written, communication, organizational, analytical, problem-solving and time-management skills; Ability to complete multiple projects in a timely and accurate manner while maintaining comprehensive and cohesive records. In compliance with federal law, all persons hired will be required to verify identity and eligibility to work in the United States and to complete the required employment eligibility verification form upon hire. Must be a Massachusetts resident, have an active valid driver’s license and the right to legally operate a motor vehicle in Massachusetts and pass a criminal background check. Salary: Starting annual base salary from $68,809 plus additional benefits. Submissions must be received by Friday, March 22, 2019 at 4:00p.m. The Middlesex Sheriff’s Office is an equal opportunity employer Job Type: Full-time Education: Bachelor's (Preferred) License: active valid driver’s (Required) Due to BitSight’s rapid growth, and our need to better understand and glean insights from our marketing data, we are recruiting a data scientist to join the marketing team. This is a new role in the marketing department that you will help shape. You will also have the opportunity to work with BitSight’s Security Data Science team to stay on the bleeding edge of data science/AI with a bi-weekly internal reading group, technology-specific workshops, and conference attendance. Responsibilities Look for signals in our large data sets and build models that leverage these signals to glean insights from our marketing and customer data. Determine the buyer’s journey from first touch on the website all the way through retention and upsell/cross-sell. Make recommendations on what changes to make to content and programs across the funnel from awareness to consideration to selection to upsell to drive revenue growth. Lead machine learning projects using varied advanced analytical techniques for estimating current and future customer engagement, prospecting new customers, cross-selling to existing customers (response models), and identifying customers likely to leave. Develop and test multiple hypotheses using results from analyses to generate and answer new questions leading to more in-depth understanding of customer behavior being studied. Collaborate with internal business partners across departments to integrate analytical tools and solutions into demand generation campaigns/initiatives. Identify potential issues that arise during the course of research projects and escalate and communicate these to management and internal clients for awareness/ action/ resolution. Make presentations of status and results of analysis to non-technical audience of various levels. Projects are typically short, fast moving, and highly varied. You will need to manage multiple research projects under tight deadlines, as well as ensure research projects are completed on time and meet internal client expectations. Desirable Skills Strong coding skills: You are able to read in a data set, process it, and plot data. You can build modules that encapsulate functionality you have developed. You have some experience with Big Data tools, e.g., Spark, PrestoDB, Hive. We are a Python house and you need to know Python. Also important is the ability to quickly learn to use tools and packages new to you. Strong statistical analysis skills: You are able to take a data set, estimate statistical parameters from it, and compare the results with existing data sets. You are comfortable with and knowledgeable about statistical concepts like p-values, hypothesis testing, and non-normal distributions. Again, of utmost importance is being comfortable learning new statistical methods. Solid machine learning and statistical modeling skills: You are able to frame problems as estimating a target variable, build a dataset, define an evaluation metric, build a non-trivial baseline, and finally use appropriate techniques to beat the baseline when possible. Strong team working skills: You’ll be able to work well with other departments (e.g. data science, product management, customer success), balance requirements, and work independently. Ability to communicate effectively: Good results are a good starting point but we also need to communicate these results verbally, in writing, and visually. Job Benefits BitSight prides itself in building exceptional career opportunities and offering outstanding benefits to our team. In that regard, BitSight is not your average company. We have the enthusiasm of a start-up, a culture driven from industry veterans committed to long-term growth, and the benefits package of a mature industry leader. BitSight is a great place to work. The purpose of this role is to partner with the respective Franchise analytics teams to ensure high quality analytic insights, recommendations, and data usage for the supported therapeutic area. Key responsibilities include: Analytics Execute analytics to support Multi-Channel, Patient, Payer/Provider, Life Cycle, Field Force, and Forecasting Determine the ideal methodology to apply for each analysis or process based on data availability and limitations Develop, validate and deploy predictive and diagnostic solutions using reusable code and computing paradigms Derive insights and recommendations from research and analyses that address both stated and unstated business questions Use statistical approaches, such as ANOVA, etc., leveraging statistical analysis toolsets, such as R, SPSS and SAS Process and analyze large health-related datasets ranging from small to Big Data and integrate and analyze Structured, Semi-structured, and Unstructured data Use tools for accessing, synthesizing, analyzing and reporting data Data Work with database technologies, SQL, NLP, data engineering, Hadoop Zoo, Kibana, visualization tools, graph analysis Collaboration Act as SME resource for broader Advanced Analytics community within supported therapeutic areas Foster common data-driven viewpoints between stakeholders with divergent views and objectives Connect technical and data skills to business needs to identify improvements with the project lifecycle Work with peers to ensure that resulting code is compliant and supports standardization whenever possible Proactively deliver analyses and reports based on timing for key planning processes Project Management Meet Franchise Leadership expectations by delivering work on time and within scope Fulfill assigned role within a project team delivering to expectations Ensure Takeda ethics and compliance are continuously met Responsibilities Job Function and Description 80% Deliver recommendations built on models and analytics 20% Develop data environment Education and Experience Requirements BA/BS degree in business, life sciences, or related technical discipline Master’s in statistics, mathematics, computer science, applied economics, computational biology, computational informatics, or medical informatics preferred 5+ years’ of relevant analytical or healthcare experience preferred Preferred Programming skills: Uses Statistical Modelling: e.g. SPSS, SAS, R Machine Learning Tools: e.g. Spark Visualization: e.g. QlikView/QlikSense, Tableau Data Environment: e.g. Datameer (Hadoop) Experience with large scale database applications (e.g., Oracle, Hadoop, Teradata) Experience detailing Big Data environment requirements Familiar with advanced data science methods Demonstrated proficiency with statistical methods, e.g. ANCOVA, two-tailed p-test, descriptive statistics, etc. Key Skills, Abilities, and Competencies Technical Skills Practiced skills in creating appropriate logic that answers stated and unstated business questions In-depth analytical and critical thinking skills to resolve issues efficiently and effectively Experienced in selecting and applying the appropriate methodology based on business need and data Demonstrable comfort using and applying structured statistical modeling tools and additional analytical modules or add-ons Expertise in writing reusable code to customize statistical models Business Acumen Understanding of goals and needs of supported functions Able to identify stakeholder needs through voice of customer and relevant data collection Experience in generating insights and recommendations from research and analyses that address both stated and unstated business questions Interpersonal Skills Experienced in presenting insights and conclusions from complex information in an easy to understand way Able to maintain an impartial point of view Builds marketing and sales leadership’s confidence through active listening, asking questions and accurately paraphrasing their needs and expectations Proactively engages with stakeholders to build relationships. Recognizes the need to modify communication styles to fit diverse audiences with various levels of expertise Fulfills assigned role within a project team delivering to expectations Complexity and Problem Solving Technical, data and analysis related decisions are within the incumbent’s authority. For business and stakeholder related decision he/she consults with the Advanced Analytics Lead Internal and External Contacts Internal Business Partner Contacts Advanced Analytics Community in Franchises U.S. Commercial Operations functions (Sales Operations, Marketing Operations, Learning and Development) IT, Regulatory, Medical, Compliance External Vendor Contracts Service Providers / Consultants Technology Solution Implementation Vendors Software Vendors Other Job Requirements 10% domestic travel may be required. Notice to Employment / Recruitment Agents: Employment / Recruitment agents may only submit candidates for vacancies only if they have written authorization to do so from Shire, a wholly-owned subsidiary of Takeda’s Talent Acquisition department. Any agency candidate submission may only be submitted to positions opened to the agency through the specific Agency Portal. Shire, a wholly-owned subsidiary of Takeda will only pay a fee for candidates submitted or presented where there is a fully executed contract in place between the Employment / Recruitment agents and Shire, a wholly-owned subsidiary of Takeda and only if the candidate is submitted via the Agency Portal. Candidates submitted or presented by Employment / Recruitment Agents without a fully executed contract or submitted through this site shall not be deemed to form part of any Engagement for which the Agency may claim remuneration. Equal Employment Opportunity Shire, a wholly-owned subsidiary of Takeda, is an Equal Opportunity Employer committed to a diverse workforce. Shire, a wholly-owned subsidiary of Takeda, will not discriminate against any worker or job applicant on the basis of race, color, religion, gender, national origin, ancestry, age, sexual orientation, marital or civil partnership status, pregnancy, gender reassignment, non-job related mental or physical disability, genetic information, veteran status, military service, application for military service, or membership in any other category protected under law. EEO is the Law - https://www.dol.gov/ofccp/regs/compliance/posters/pdf/eeopost.pdf EEO is the Law – Supplement - https://www.dol.gov/ofccp/regs/compliance/posters/pdf/OFCCP_EEO_Supplement_Final_JRF_QA_508c.pdf Pay Transparency Policy - https://www.dol.gov/ofccp/pdf/pay-transp_formattedESQA508c.pdf Reasonable Accommodations Shire, a wholly-owned subsidiary of Takeda, is committed to working with and providing reasonable accommodation to individuals with disabilities. If, because of a medical condition or disability, you need a reasonable accommodation for any part of the application process, or in order to perform the essential functions of a position, please call 484-595-8400 and let us know the nature of your request and your contact information. Description: Are you passionate about applying data science to real business and customer needs? Would you like to use your data science skills to help our customers do more, feel more, and be more? At Bose, all of our energy is aimed at bringing products into the world that people truly love, and we don’t stop until the details are just right. Data science, machine learning, and analytics have become a crucial part of this mission . These capabilities fuel the creation of new and innovative products in consumer electronics and wellness, help us to bring the right products to the right customers, and allow us to astonish customers with carefully crafted and personalized experiences. We are looking for a bright, enthusiastic data scientist for our new and growing Global Consumer Sales Data Science team out of the Boston Landing location. The mission of this team is to develop world-class data science, machine learning, and related technologies to extract insights from data for driving business and customer value. We provide data science expertise and support across the Sales, Marketing, Retail, and Customer Service organizations. The desired outcomes will include improved customer experiences, personalized recommendations, and digital optimization. Responsibilities: Develop and evaluate predictive and prescriptive models for marketing, sales, e-commerce, and customer service applications such as customer lifetime value models, product recommenders, customer segmentations, uplift models, and propensity models. Explore large datasets related to customer and user behavior using modeling, analysis, and visualization techniques. Apply frequentist and Bayesian statistical inference tools to experimental and observational data. Collaborate with data science, data engineering, and data governance teams throughout the data science process. Engage with Global Consumer Sales colleagues to understand business problems and define data science solutions. Communicate results, analyses, and methodologies to technical and business stakeholders. Travel to Framingham, MA location at least once per week (shuttle from Boston Landing is available) . Education: BS or MS (preferred) in Data Science, Computer Science, Business Analytics, Statistics, or a related field Completed coursework related to Statistics, Computer Science, Machine Learning, and Data Science Completed coursework related to Business/Customer Analytics, Marketing, Sales, and/or Management Skills: 2+ years of experience applying data science, machine learning, and analytics techniques to business problems, preferably related to sales and marketing Strong programming background with experience in Python (preferred) or R Strong understanding of unsupervised and supervised machine learning algorithms Experience designing experiments and analyzing experimental data using statistical modeling Strong analytical, communication, collaboration, and problem - solving skills Experience cleaning and wrangling data using tools such as SQL and Python (Preferred) Experience working with big data tools and frameworks such as Hadoop and Apache Spark Bose is an equal opportunity employer that is committed to inclusion and diversity. We evaluate qualified applicants without regard to race, color, religion, sex, sexual orientation, gender identity, genetic information, national origin, age, disability, veteran status, or any other legally protected characteristics. For additional information, please review: (1) the EEO is the Law Poster (http://www.dol.gov/ofccp/regs/compliance/posters/pdf/OFCCP_EEO_Supplement_Final_JRF_QA_508c.pdf); and (2) its Supplements (http://www.dol.gov/ofccp/regs/compliance/posters/ofccpost.htm). Please note, the company's pay transparency is available at http://www.dol.gov/ofccp/pdf/EO13665_PrescribedNondiscriminationPostingLanguage_JRFQA508c.pdf. Bose is committed to working with and providing reasonable accommodations to individuals with disabilities. If you need a reasonable accommodation because of a disability for any part of the application or employment process, please send an e-mail to Wellbeing@bose.com and let us know the nature of your request and your contact information. 150 We are seeking a highly motivated Data Scientist for computational analysis of complex data across our R&D portfolio to advance Preclinical and Development-stage programs. The successful candidate will serve as a subject matter expert who will provide cross-functional guidance and support internally and externally. He/she will thrive in a fast-paced, highly-collaborative environment to advance program goals with deep expertise in developing models using multi-dimensional data sources. Job Responsibilities: Provide scientific input and leadership to enable the team to analyze complex data (e.g. genomics, transcriptomics and proteomics) from patient samples. Lead efforts to develop predictive models using statistical and computational biology approaches for biomarker development and patient stratification strategies. Provide immediate support in multiple R&D-stage programs and to cross-functional clinical development teams through integrated analysis of clinical and biomarker data generated from multiple platforms and formats. Pro-actively define statistical analysis plans to generate actionable results for meeting program and business objectives. Communication and visualization of results to scientific and non-scientific audiences. Proactively partner with core R&D functional leads to advance Clinical, Preclinical and Discovery program objectives and serve as internal expert in computational biology and biostatistical modeling. Identify key scientific questions to advance our scientific understanding across the portfolio. Adapt latest methods and tools for analyzing large omics datasets (genomics/proteomics). Establish internal best practices for complex data visualization, integration, and accessibility. Education and Experience: Ph.D. in statistics, mathematics, bioinformatics, computational biology, genomics, computer science, or a related field with 5+ years of experience in complex, quantitative data analysis in a biotech/biopharm environment. Experience working with big-data generated by diverse platforms (e.g. RNA-Seq, Flow Cytometry, multiplexed proteomics) and accessing and mining external datasets. Experience working with clinical study data and compiling reports in a GxP-environment. Ability to present and visualize data for communicating with scientific and non-scientific colleagues. Proficiency in common programming languages such as Python, R, Matlab, Java, Shell and Linux environments. Ability to work independently and collaboratively in highly dynamic, fast-paced projects within a highly-matrixed, cross-functional and collaborative environment. Data Science Team Nift brings new customers through the doors of neighborhood businesses better than anything else out there. Join the team that’s giving millions of people gifts they love while bringing in the foot traffic that makes neighborhood businesses thrive. Data Science is the heart and core of our company. Algorithms and models are the foundation upon which our product is built, with data driving our key decisions, testing and growth. Our Chief Scientist, David C Parkes, is the former Area Dean of Computer Science at Harvard, the founder of Harvard’s EconCS Group, and the Co-Director of Harvard's Data Science Initiative. Our data doubles every two months with even more sources waiting to be added. Our product represents a completely new kind of marketplace and the science around it has yet to be defined. We’re looking for a Data Analyst to join our core team and drive growth and revenue. This position is based in Boston, MA. Examples of projects we currently need help with: Analyze real-time data market economic data Evaluate experimental results - On simulated data, real-time data and concurrent AB tests. Collect data, build analysis and present it to monitor and understand the company revenue. Estimate the demand in multiple local markets (in different sizes, locations and development stages). Segment customers and businesses; offer insights and identify revenue growth opportunities within existing core verticals and new ones. Monitor marketplace metrics and system performance and suggest corrective actions when needed. We have an outstanding core team with deep understanding of algorithmic economics and data science. Our work is highly sought-after and is critical to the success of our business. If you have a proven track-record, want to make an impact and you get excited about the prospects of being part of something really special, we should talk. Traits we value: Solid understanding of statistics, economics, and math. A keen eye for detail and thoughtful investigation of data. A steadfast focus on creating impactful change and ability to prioritize between many tasks to maximize the improvement of the business. 2+ years of commercial experience is a plus. A minimum of a Bachelor’s degree, a Master’s degree is preferred. Collaborative team player who values the contribution of others. We believe it's time technology starts working for Main Street's small businesses. Launched in Boston in the Summer of 2016, our start-up is helping millions of people discover great local businesses. Klaviyo is looking for data scientists to analyze large data sets (we’re collecting billions of individual actions every month), build models and ship products that enable businesses to grow faster and communicate with their customers. Our background as a team is building these models for the Fortune 50 and we want to democratize and open up that technology to everyone. The ideal candidate has a background in data science, statistics and machine learning and has done work ranging from exploratory analysis to training and deploying models. We use a wide variety of data mining and machine learning algorithms. The right candidate will have both a solid fundamental understanding and deep practical experience with at least a few modeling and machine learning techniques. Our goal is always to match the right assumptions and models to the right problem - they don’t necessarily have to be complex. You should have experience building models that are used by people to make better decisions. We’re focused on shipping early and often. We prefer iterative solutions that are incrementally better to the perfect solution. You should also be able to measure and know what impact your models had on the decisions people made - e.g. did they outperform the previous best model or a human decision maker? Our data science team is still in its early days and you’ll have a big impact on our direction and how we operate. You’ll be central to upfront research and shipping products that help our customers learn and grow from their data. You: Have a strong fundamental understanding and deep experience with at least a few machine learning algorithms (e.g. regressions, decision trees, k-means clustering, neural networks). Understand Bayesian modeling techniques. Are capable of analyzing data and making rigorous statements about what can or cannot be concluded. Have experience designing and implementing model performance/validation assessments. Have a background in statistics and understand different distributions and the conditions under which they’re valid. Know how code and have used data science tools and packages. Have demonstrated a measurable impact based on the models you’ve created. It’s not always easy getting a model correct and we love talking about places we got stuck and working as a team to think through ideas that could unblock us. Have a desire to ship features powered by data science (in other words, you’re excited by both upfront research and actually getting models into production at cloud scale). You Have: Bachelor’s or advanced degree in statistics, applied mathematics, computer science or other relevant quantitative discipline, or equivalent industry experience. Have worked in a data science role for 5+ years professionally or academically and can talk about your projects and the techniques you used. About Us Klaviyo is a team of people who are crazy motivated by growth. It’s what we help our customers do: grow their businesses by making it possible and easy for them to use their data to power better marketing. It’s how we behave as individuals: we’re all deeply passionate about learning. It’s how we manage our business: we have thousands of paying customers, we’re profitable, and we’re growing insanely fast. And it’s what our culture is all about. Working at Klaviyo means you’ll work on things you never imagined you would; you’ll grow in ways you didn’t consider possible; and you’ll do the best work of your career with people who are just as motivated and talented as you are. If this sounds like your ideal place to work, drop us a note! Under supervision, and within established departmental and hospital policies and procedures, is responsible for performing a variety of general and technical duties to support the research activities within the Translational Neuroimmunology Research Center (TNRC) in the Ann Romney Center for Neurologic Diseases (ARCND). The data analyst is to provide expertise to acquire, manage, manipulate, analyze data, and report the results. Data capture and reporting in collaboration with others will be a daily activity. Qualifications PRINCIPAL DUTIES: a. To assist in data analysis related to the research of Multiple Sclerosis and other Autoimmune diseases b. Assist with data processing, archiving, storage and computer analysis c. Create analysis datasets from an Oracle database d. Oversee data cleaning and manipulation e. Perform statistical analysis utilizing SAS, STATA, or R f. Identify, analyze and interpret trends or patterns in complex data sets g. Perform data entry when needed or required h. Develop graphs, reports and presentations of project results i. In collaboration with others; develop and maintain databases and data systems necessary for projects and department functions. PREFERRED QUALIFICATIONS: Experience with Red Cap Experience with SAS, STATA or R Programming Skills to execute queries in Oracle database and knowledge of SQL and XML At least 2 years in a research setting looking at multiple sclerosis or neurological data MPH in epidemiology, biostatistics or a related field SKILLS/ABILITIES/COMPETENCIES REQUIRED: Proficiency in Microsoft Office suite, including Excel and Access Bachelor’s or master’s degree, preferable in Information Management, Healthcare Information, Computing, Mathematics, Statistics or related fields Proficiency with statistics in order to communicate easily with other statisticians WORKING CONDITIONS: Office space environment and occasional clinical center exposure. EEO Statement Brigham and Women’s Hospital is an Equal Opportunity Employer. All qualified applicants will receive consideration for employment without regard to race, color, religion, creed, sex, sexual orientation, gender identity, national origin, ancestry, age, veteran status, disability unrelated to job requirements, genetic information, military service, or other protected status. Primary Location: MA-Boston-BWH Longwood Medical Area Work Locations: BWH Longwood Medical Area 75 Francis Street Boston 02115 Job: Business and Systems Analyst Organization: Brigham & Women's Hospital(BWH) Schedule: Full-time Standard Hours: 40 Shift: Day Job Employee Status: Regular Recruiting Department: BWH Neurology Job Posting: Mar 18, 2019 160
MIT
module2-Bag-of-Words/LS_DS_422_BOW_Assignment.ipynb
brit228/AB-Demo
2) Use NLTK to tokenize / clean the listings
df2 = df.copy() stop_words = stopwords.words('english') lemmatizer = WordNetLemmatizer() df2["Job Description"] = df2["Job Description"].apply(lambda v: [lemmatizer.lemmatize(w) for w in word_tokenize(v) if w.isalpha() and w not in stop_words]) vector_list = sorted(list(set([inner for outer in df2["Job Description"].values for inner in outer]))) print(vector_list)
['A', 'AART', 'AB', 'ACCESS', 'ACO', 'AD', 'AI', 'AIDS', 'ANALYST', 'AND', 'API', 'AWS', 'Abdul', 'Abilities', 'Ability', 'About', 'Absolutely', 'Acceleration', 'Access', 'Accommodation', 'Accommodations', 'Accountability', 'Accountable', 'Achieve', 'Acquisition', 'Act', 'Acting', 'Action', 'Actively', 'Activities', 'Actual', 'Actuarial', 'Acute', 'Additional', 'Additionally', 'Adhere', 'Administration', 'Advance', 'Advanced', 'Advice', 'Advisers', 'Advisory', 'Aetna', 'Affirmative', 'Affordability', 'Africa', 'African', 'After', 'Afternoon', 'Again', 'Agency', 'Aggregating', 'Agile', 'Agilent', 'Agility', 'Agios', 'Alaska', 'Algebra', 'Algorithm', 'Algorithms', 'Alion', 'All', 'Allocation', 'Alongside', 'Also', 'Alternatively', 'Amazon', 'America', 'American', 'Americans', 'An', 'Analyses', 'Analysis', 'Analyst', 'Analysts', 'Analytics', 'AnalyticsTM', 'Analyze', 'And', 'Apache', 'Applicants', 'Application', 'ApplicationClaims', 'Applications', 'Applied', 'Apply', 'Are', 'Area', 'Areas', 'Arthritis', 'Artificial', 'As', 'Asia', 'AspenTech', 'Assess', 'Asset', 'Assist', 'Assistance', 'Assistants', 'Assists', 'Associate', 'Association', 'AstraZeneca', 'At', 'Attend', 'Attractive', 'Attributes', 'August', 'Australia', 'Authenticity', 'Auto', 'Autoimmunity', 'Automate', 'Automation', 'Avenue', 'Award', 'Azar', 'Azure', 'B', 'BI', 'BLAS', 'BMC', 'BOP', 'BPT', 'BS', 'BWH', 'Bachelor', 'Bachelors', 'Back', 'Background', 'Banking', 'Based', 'Bash', 'Basic', 'Bayesian', 'Be', 'Beacon', 'Beam', 'Bedford', 'Begins', 'Benefits', 'BenefitsExcellent', 'Berkley', 'Big', 'Billing', 'Bioanalyzer', 'Biochemistry', 'Bioengineering', 'Bioinformatics', 'Biology', 'Bioscience', 'Biostatistics', 'BitSight', 'Blog', 'Bloomberg', 'Boards', 'Bose', 'Boston', 'BostonBMC', 'Botswana', 'Branson', 'Brave', 'Brazil', 'Breakfast', 'Brigham', 'Bring', 'Bringing', 'Broad', 'Broadway', 'Build', 'Building', 'Bulgaria', 'Bureau', 'Burning', 'Business', 'But', 'By', 'C', 'CCDS', 'CEO', 'CEPAC', 'CERTIFICATES', 'CFA', 'CFO', 'CFR', 'CHC', 'CHI', 'CIO', 'CJ', 'CMake', 'COMPETENCIES', 'CONDITIONS', 'CRM', 'CRO', 'CSCW', 'CT', 'CTO', 'CUDA', 'CV', 'CVB', 'CVS', 'Cafeteria', 'Calculus', 'Call', 'Cambridge', 'Campaign', 'Campus', 'Can', 'Cancer', 'Candidate', 'Capable', 'Capacity', 'Capital', 'Cardiovascular', 'Care', 'Carlo', 'Carry', 'Ccain', 'Census', 'Center', 'Central', 'Chain', 'Chairman', 'Chan', 'Chance', 'Characteristics', 'Chemistry', 'Chief', 'China', 'Christensen', 'Circadian', 'City', 'Claim', 'Claims', 'Classification', 'Clayton', 'Click', 'Client', 'Clinical', 'Clinicians', 'Clinicogenomics', 'Cloud', 'Cloudera', 'Clustering', 'Coding', 'Cohen', 'Collaborate', 'Collaborating', 'Collaboration', 'Collaborative', 'Colleagues', 'Collect', 'Collibra', 'Combine', 'Combining', 'Come', 'Comfort', 'Comfortable', 'Commercial', 'Commitment', 'Communicate', 'Communicating', 'Communication', 'Community', 'Company', 'Competencies', 'Completed', 'Completion', 'Complications', 'Compute', 'Computer', 'Computing', 'Conditions', 'Conduct', 'Conducting', 'Conducts', 'Confidently', 'Connect', 'Connecting', 'Considered', 'Consult', 'Consulting', 'Consumer', 'Contact', 'Container', 'Continuous', 'Contract', 'Contribute', 'Conversational', 'Coordinate', 'Coordinating', 'Core', 'Cortex', 'Cost', 'Counsel', 'Create', 'Creates', 'Creating', 'Creative', 'Critical', 'Cultivate', 'Curious', 'Currently', 'Customer', 'Cybersecurity', 'Côte', 'D', 'DARPA', 'DATA', 'DEPARTMENT', 'DESCRIPTION', 'DESIRABLE', 'DICOM', 'DNA', 'DNNs', 'DO', 'DUTIES', 'Dash', 'Dassault', 'Data', 'Database', 'Databricks', 'DatabricksBayesian', 'Datawarehouse', 'Date', 'David', 'Day', 'Dean', 'Dec', 'December', 'Decision', 'Decisiveness', 'Dedication', 'Deep', 'Defense', 'Define', 'Defines', 'Degree', 'Degrees', 'Deliver', 'Delivering', 'Delphix', 'Demonstrable', 'Demonstrate', 'Demonstrated', 'Demonstrates', 'Dental', 'Department', 'Depending', 'Description', 'Descriptive', 'Design', 'Designer', 'Designs', 'Desirable', 'Desirables', 'Desired', 'Desk', 'Detail', 'Details', 'Detection', 'Determination', 'Determine', 'DevOps', 'Develop', 'Developer', 'Developing', 'Development', 'Develops', 'Devices', 'Diagnostics', 'Dialogflow', 'Diego', 'Digestive', 'Digital', 'Dimensionality', 'Direct', 'Director', 'Disabilities', 'Disadvantaged', 'Discovery', 'Diseases', 'Disorders', 'Distribution', 'Diversity', 'Division', 'Do', 'DoD', 'Docker', 'Documenting', 'Does', 'Draw', 'Drives', 'Drug', 'Due', 'Duration', 'During', 'Duties', 'Dyad', 'ED', 'EDUCATION', 'EEO', 'EMR', 'ENGIE', 'ENOVIA', 'EO', 'ETL', 'EXG', 'EXPERIENCE', 'East', 'Eclipse', 'EconCS', 'Econometrics', 'Economic', 'Economics', 'Economist', 'Ecova', 'Education', 'Educational', 'Effective', 'Efficiency', 'Efficient', 'ElasticSearch', 'Electric', 'Electrical', 'Elsevier', 'Employee', 'Employer', 'Employment', 'Empower', 'Encoders', 'Encouragement', 'Energy', 'Engage', 'Engine', 'Engineer', 'Engineering', 'Engineers', 'England', 'EnglandBoston', 'English', 'Enhance', 'Enjoy', 'Ensure', 'Ensuring', 'Environment', 'Epidemiology', 'Equal', 'Equally', 'Equipment', 'Essential', 'Essentials', 'Establish', 'Establishing', 'Estimate', 'Etiometry', 'Europe', 'Evaluate', 'Evaluating', 'Evaluation', 'Evangelize', 'Evelo', 'Examine', 'Examining', 'Examples', 'Excel', 'Excellent', 'Exception', 'Exceptional', 'Exchange', 'Excitingly', 'Executive', 'Exempt', 'Exercises', 'Exhibit', 'Exhibits', 'Expand', 'Experience', 'Experienced', 'Experiences', 'Experimental', 'Expert', 'Expertise', 'Exposure', 'Extensive', 'External', 'Extract', 'FAIR', 'FFNN', 'FLSA', 'Facebook', 'Facility', 'FactBase', 'Factor', 'Factors', 'Fair', 'Fairbank', 'Familiar', 'Familiarity', 'Fearlessly', 'Feb', 'Federal', 'Fidelity', 'Fields', 'Finance', 'Financial', 'First', 'Flag', 'Flagship', 'Flask', 'Flatiron', 'Flexibility', 'Fluency', 'Fluent', 'Follow', 'Football', 'For', 'Forces', 'Forest', 'Form', 'Formal', 'Formalize', 'Formulate', 'Forrester', 'Fortune', 'Forum', 'Foundation', 'FoundationCORE', 'Founded', 'Framingham', 'France', 'Francis', 'Francisco', 'Fraud', 'Free', 'Friday', 'From', 'Fruit', 'Fuel', 'Full', 'Functions', 'Fusion', 'GAA', 'GI', 'GL', 'GPA', 'GPGPUYou', 'GPU', 'Gastrointesinal', 'Gastrointestinal', 'Gaussian', 'Gen', 'General', 'Generalized', 'Generating', 'Genetics', 'Git', 'GitHub', 'Glass', 'Global', 'Go', 'Goal', 'Good', 'Google', 'Governance', 'Government', 'Grad', 'Grads', 'Graduate', 'Grant', 'Great', 'Group', 'GroupsTranslate', 'Growth', 'Guard', 'Guided', 'Guidelines', 'Gym', 'HDFS', 'HIGHLY', 'HIPAA', 'HIV', 'HMS', 'HPC', 'HPSL', 'HPV', 'HQ', 'HTML', 'Hadoop', 'Hampshire', 'HampshireBoston', 'Hands', 'Harvard', 'Have', 'Haves', 'Hawaiians', 'He', 'Health', 'HealthCare', 'HealthNet', 'Healthcare', 'Heart', 'Help', 'Helpdesk', 'Hemostasis', 'Here', 'HiTS', 'Hierarchical', 'High', 'Higher', 'Highest', 'Highly', 'Hiring', 'Hispanic', 'Hive', 'Hockey', 'Homeland', 'Homesite', 'Horizons', 'Hospital', 'Hospitals', 'Hours', 'How', 'However', 'Hub', 'Huge', 'Human', 'Humana', 'Humor', 'Hypothesis', 'I', 'IBM', 'IL', 'IM', 'IMED', 'IMU', 'IMWUT', 'IP', 'IRB', 'IS', 'IT', 'IUI', 'IVD', 'IVZ', 'Idea', 'Ideal', 'Ideally', 'Identifies', 'Identify', 'Identifying', 'Identity', 'If', 'Illumina', 'Image', 'Imagine', 'Impact', 'Impala', 'Implement', 'Implementation', 'Improvement', 'In', 'Includes', 'Inclusion', 'Incomplete', 'Index', 'India', 'Indians', 'Indicator', 'Indigo', 'Individual', 'Individuals', 'Industry', 'InfiniBand', 'Info', 'Informatica', 'Informatics', 'Information', 'Infusion', 'Ingenuity', 'Initial', 'Initiative', 'Innovation', 'Insight', 'Insights', 'Institute', 'Institutes', 'Institutional', 'Instructor', 'Instrumentation', 'Insurance', 'Insurtech', 'Integrate', 'Integrating', 'Integration', 'Integrity', 'Intellectual', 'Intelligence', 'Intense', 'Interact', 'Interacting', 'Interacts', 'Interest', 'Interested', 'Interface', 'Intern', 'International', 'Internet', 'Interns', 'Internship', 'InternshipBoston', 'Interprets', 'Intuitive', 'Invesco', 'Investigators', 'Investment', 'Investments', 'Investor', 'Involve', 'Is', 'Islanders', 'It', 'Ivoire', 'JOB', 'Jameel', 'Jan', 'Java', 'JavaScript', 'Javascript', 'Jenkins', 'Job', 'Join', 'Joint', 'Journal', 'Julia', 'July', 'Junction', 'June', 'Junior', 'Jupyter', 'KEY', 'KNN', 'KPIs', 'KTC', 'Kanban', 'Keep', 'Kenexa', 'Keras', 'Key', 'Kintai', 'Kirschstein', 'Klaviyo', 'Know', 'Knowledge', 'Knowledgeable', 'Kubernetes', 'L', 'LAW', 'LICENSES', 'LMS', 'LOCATION', 'LSP', 'LSTM', 'LTD', 'Lab', 'Laboratory', 'Labs', 'Lake', 'Language', 'Languages', 'Lastly', 'Latex', 'Latif', 'Launched', 'Law', 'Lead', 'Leading', 'Learn', 'Learning', 'Lester', 'Let', 'Letters', 'Level', 'Leverage', 'Leveraging', 'Liberty', 'Life', 'Lifetime', 'Limited', 'Linear', 'Lines', 'LinkedIn', 'Linux', 'Loans', 'Localization', 'Located', 'Location', 'Locations', 'LogMeIn', 'Logistics', 'London', 'Longwood', 'Look', 'Looking', 'Lucene', 'Lunch', 'M', 'MA', 'MATLAB', 'MCR', 'MD', 'MDM', 'MGH', 'MIT', 'MITRE', 'ML', 'MLlib', 'MPEC', 'MPI', 'MPP', 'MRI', 'MS', 'MSc', 'MXNet', 'MYSQLLanguages', 'MacB', 'Machine', 'Machines', 'Mackey', 'Madrid', 'Main', 'Maintaining', 'Make', 'Makes', 'Manage', 'Management', 'Manager', 'Managing', 'Manipulate', 'Manipulating', 'Manipulation', 'Manufacturing', 'Many', 'Mapping', 'Mar', 'Market', 'Marketbasket', 'Marketers', 'Marketing', 'Markov', 'Massachusetts', 'Master', 'Masters', 'MatLab', 'Matab', 'Materials', 'Math', 'MathWorks', 'Mathematics', 'Matlab', 'Matploltib', 'May', 'McKinsey', 'Measure', 'Measurement', 'Medicaid', 'Medical', 'Medicine', 'Members', 'Mentor', 'Mercurial', 'Metadata', 'Metrics', 'Microsoft', 'Middle', 'Military', 'Min', 'Mine', 'Minimum', 'Minneapolis', 'Minorities', 'Mo', 'Model', 'Modeling', 'Modelling', 'Moderna', 'Molecular', 'MongoDB', 'Monitor', 'Monte', 'More', 'Moreover', 'Morning', 'Most', 'Mozambique', 'Much', 'MuleSoft', 'Multiple', 'Must', 'Mutual', 'MySQL', 'N', 'NCI', 'NERD', 'NGS', 'NIH', 'NLP', 'NPL', 'NRSA', 'NY', 'NYC', 'Nanodrop', 'Nation', 'National', 'Native', 'Natives', 'Natural', 'Nature', 'Need', 'Network', 'Networks', 'Neural', 'New', 'Next', 'Nice', 'Nift', 'No', 'NoSQL', 'None', 'Normal', 'North', 'Northeastern', 'Notebooks', 'Now', 'NumPy', 'Numpy', 'OBI', 'OFCCP', 'OOP', 'OUR', 'Object', 'Objective', 'Occasional', 'Of', 'Offers', 'Office', 'Officer', 'On', 'Oncology', 'One', 'Ongoing', 'Only', 'OpenFlow', 'Openly', 'Operational', 'Operations', 'Opportunities', 'Opportunity', 'Optic', 'Optimisation', 'Optimizing', 'Options', 'Oracle', 'Ordinance', 'OrgSolutions', 'Organization', 'Oriented', 'Original', 'Orthopaedic', 'Orthopedics', 'Other', 'Our', 'Outcomes', 'Outlook', 'Outstanding', 'Over', 'Overview', 'Owners', 'P', 'PCA', 'PHS', 'PI', 'PIVOT', 'PM', 'POSITION', 'PRINCIPAL', 'Pacific', 'Paid', 'Pandas', 'Paris', 'Park', 'Parkes', 'Parkinson', 'Participate', 'Partner', 'Partners', 'Passion', 'Passionate', 'Patent', 'Patients', 'Perform', 'Performance', 'Performing', 'Performs', 'Perks', 'Perl', 'Personal', 'Personalization', 'Persons', 'PhD', 'PharmD', 'Pharmaceutical', 'Pharmacology', 'Philadelphia', 'Physical', 'Physics', 'Pioneering', 'Plan', 'Planning', 'Platforms', 'Please', 'Plotly', 'Plusses', 'Policy', 'Population', 'Position', 'Positive', 'Possess', 'PostDocs', 'Poster', 'Posters', 'Postg', 'PostgreSQL', 'Postgres', 'Postgresql', 'Posting', 'Poverty', 'Power', 'PowerAdvocate', 'PowerPoint', 'Powered', 'Practical', 'Practice', 'Predictive', 'Preferable', 'Preference', 'Preferred', 'Prepare', 'Prepares', 'Present', 'Presenting', 'Presents', 'Presto', 'PrestoDB', 'Preventing', 'Previous', 'Price', 'Primary', 'Prime', 'Principal', 'Prior', 'Privacy', 'Proactively', 'Probability', 'Process', 'Processing', 'Procurement', 'Produce', 'Product', 'Productivity', 'Productize', 'Prof', 'Professional', 'Professions', 'Proficiency', 'Proficient', 'Program', 'Programming', 'Project', 'Projects', 'Propensity', 'Prototype', 'Proven', 'Provide', 'Providence', 'Psychology', 'Public', 'Publish', 'Pubmed', 'Pulse', 'Purchasing', 'Purpose', 'Pursuant', 'Putting', 'PyTorch', 'Python', 'PythonKnowledge', 'QA', 'QC', 'QUALIFICATIONS', 'Quailfications', 'Qualification', 'Qualifications', 'Quality', 'Quantitative', 'Qubit', 'Query', 'Quest', 'R', 'RDMA', 'REGISTRATIONS', 'REQUIRED', 'REQUIREMENTS', 'RESEARCH', 'RESPECT', 'RESPONSIBILITIES', 'RESPONSIBILITY', 'RESTful', 'RF', 'RNA', 'RYou', 'Radiology', 'Random', 'Rapidly', 'Read', 'Reasonable', 'RecSys', 'Recent', 'Recognition', 'Recognized', 'Reconstruction', 'Recruiter', 'Recruiting', 'RedShift', 'Redshift', 'Regression', 'Regular', 'Relationship', 'Relevant', 'ReltioMust', 'Rentals', 'Reporting', 'Req', 'Required', 'Requirements', 'Research', 'Researching', 'Resources', 'Respect', 'Respond', 'Responsibilities', 'Responsibility', 'Responsible', 'Results', 'Review', 'Richard', 'Rifiniti', 'Right', 'Risk', 'Roche', 'Role', 'Root', 'Ruby', 'Run', 'Running', 'Ruth', 'SAI', 'SAP', 'SAS', 'SCIENTIST', 'SENIOR', 'SIGIR', 'SKILLS', 'SLAM', 'SPARK', 'SPSS', 'SQL', 'SSIS', 'STATA', 'STD', 'STEM', 'SUMMARY', 'SUPERVISORY', 'SUPPLEMENT', 'SVD', 'SVM', 'SVMs', 'SaaS', 'Sample', 'Samples', 'San', 'Saturdays', 'Scala', 'Schedule', 'Scheduled', 'Scheduling', 'Schneider', 'Scholar', 'Scholarship', 'School', 'SciKit', 'SciPy', 'Science', 'Sciences', 'Scientific', 'Scientist', 'Scientists', 'Scikit', 'Scrum', 'Seamlessly', 'Seattle', 'Sec', 'Secondary', 'Secretary', 'Security', 'See', 'Seek', 'Segment', 'Select', 'Selected', 'Self', 'Senior', 'Sense', 'Sensor', 'September', 'Seres', 'Serve', 'Server', 'Service', 'Services', 'Several', 'Shaker', 'Shape', 'Share', 'She', 'Shift', 'Shiny', 'ShinyExpansive', 'Significant', 'Since', 'Singapore', 'Sir', 'Skills', 'Slack', 'Sleep', 'So', 'Soccer', 'Social', 'Sofia', 'Software', 'Solid', 'Solution', 'Solutions', 'Solve', 'Some', 'South', 'Spark', 'SparkProficiency', 'Special', 'Spend', 'Sports', 'Spotify', 'Springs', 'St', 'Stakeholder', 'Standard', 'Start', 'Stata', 'State', 'Statement', 'States', 'Statistical', 'Statistics', 'StatisticsExperience', 'Status', 'Stay', 'Steps', 'Steward', 'Stewards', 'Still', 'Stock', 'Strategic', 'Street', 'Strong', 'Structured', 'Student', 'Students', 'Studio', 'Study', 'Submit', 'Subsidized', 'Subversion', 'Success', 'Successful', 'Successfully', 'Such', 'Summary', 'Summer', 'Superior', 'Supervisory', 'Supplements', 'Supply', 'Support', 'Surgery', 'Sustainability', 'Sustained', 'System', 'Systemes', 'Systems', 'Systèmes', 'TA', 'TECHNICAL', 'THE', 'TITLE', 'Tableau', 'Take', 'Talend', 'Teach', 'Teaching', 'Team', 'Teammate', 'Teamwork', 'Tech', 'Technical', 'Technologies', 'Technology', 'Telecom', 'Temporary', 'TensorFlow', 'TensorFlowDatabases', 'Tensorflow', 'Test', 'Testing', 'Text', 'Thailand', 'That', 'The', 'Therapeutic', 'Therapeutics', 'Therapy', 'There', 'These', 'They', 'This', 'Those', 'Thoughtful', 'Three', 'Thrive', 'Through', 'Throughout', 'Thursday', 'Time', 'Times', 'Title', 'To', 'Tobacco', 'Today', 'Together', 'Tokyo', 'Tools', 'Torch', 'Train', 'Training', 'Traits', 'Transfer', 'Translate', 'Transnational', 'Transportation', 'Travel', 'Treatments', 'Tree', 'Trello', 'TripAdvisor', 'Troubleshoot', 'Tryout', 'Tuesday', 'Tufts', 'Twitter', 'Two', 'Type', 'Typically', 'UIST', 'UK', 'US', 'USA', 'UX', 'Ukraine', 'Uncompromising', 'Under', 'Understand', 'Understanding', 'Undertaking', 'Underwriting', 'Unit', 'United', 'University', 'Unposting', 'Up', 'Us', 'Use', 'User', 'Users', 'Using', 'Utilize', 'Utilizing', 'VALUES', 'VBA', 'VM', 'VMs', 'VMware', 'VR', 'Value', 'Values', 'Vector', 'Ventures', 'Verily', 'Very', 'Veteran', 'Veterans', 'Videos', 'Virgin', 'Virtual', 'Vistaprint', 'W', 'WA', 'WORKING', 'WWW', 'Wall', 'Waltham', 'Want', 'Watson', 'We', 'Web', 'Webinars', 'Wednesday', 'Weekly', 'Wellbeing', 'Werfen', 'What', 'When', 'Where', 'Whether', 'While', 'Who', 'Why', 'Will', 'Willingness', 'Wired', 'With', 'Within', 'Without', 'Women', 'Word', 'Work', 'Working', 'Works', 'World', 'Worldpay', 'Would', 'Write', 'Writing', 'Wyman', 'XGBoost', 'XML', 'Xgboost', 'Yale', 'York', 'You', 'Your', 'Youssef', 'ZR', 'ZRNift', 'Zealand', 'Zeppelin', 'Zimbabwe', 'Zurich', 'ability', 'able', 'abreast', 'abroad', 'absolute', 'abstract', 'academia', 'academic', 'academically', 'accelerate', 'accelerating', 'accept', 'accepted', 'accepting', 'access', 'accessible', 'accessing', 'accolade', 'accommodate', 'accommodation', 'accompanying', 'accomplish', 'account', 'accountability', 'accountable', 'accredited', 'accuracy', 'accurate', 'accurately', 'achievable', 'achieve', 'achievement', 'achieving', 'acid', 'acquired', 'acquisition', 'across', 'act', 'action', 'actionable', 'active', 'actively', 'activity', 'actual', 'actually', 'actuarial', 'acuity', 'ad', 'adapt', 'added', 'addition', 'additional', 'address', 'addressing', 'adept', 'adhere', 'adjust', 'adjusted', 'administrative', 'admission', 'admitted', 'adoption', 'advance', 'advanced', 'advancement', 'advancing', 'advantage', 'advertised', 'advertisement', 'advice', 'advisor', 'advisory', 'advocacy', 'advocate', 'affect', 'affiliate', 'affiliated', 'affiliation', 'affirmative', 'affordable', 'aforementioned', 'afraid', 'age', 'agency', 'agenda', 'agent', 'aggregate', 'aggressive', 'agile', 'ago', 'agreement', 'ahead', 'aid', 'aim', 'aimed', 'al', 'alert', 'algebra', 'algorithm', 'algorithmic', 'align', 'aligned', 'aligning', 'alignment', 'aligns', 'alliance', 'allocate', 'allocation', 'allow', 'allows', 'along', 'alongside', 'also', 'alternative', 'always', 'amazing', 'ambiguity', 'ambiguous', 'ambulatory', 'amenable', 'among', 'amount', 'analyse', 'analysesDevelops', 'analysesImplement', 'analysis', 'analysisAnalyze', 'analysisComfort', 'analysisEnhancing', 'analysisProvide', 'analyst', 'analytic', 'analytical', 'analytics', 'analyze', 'analyzed', 'analyzes', 'analyzing', 'anatomical', 'ancestry', 'annotation', 'annual', 'annually', 'anomaly', 'another', 'answer', 'answered', 'answering', 'anticipate', 'anticipated', 'anyone', 'anything', 'anywhere', 'applicability', 'applicable', 'applicant', 'application', 'applied', 'applies', 'apply', 'applying', 'appointment', 'appreciates', 'appreciation', 'approach', 'approachesHas', 'appropriate', 'appropriately', 'approval', 'approximately', 'architect', 'architecting', 'architectural', 'architecture', 'archive', 'area', 'arise', 'around', 'arrangement', 'array', 'arrest', 'art', 'articulating', 'artifact', 'artificial', 'artist', 'asap', 'asked', 'asking', 'aspect', 'aspenONE', 'aspiration', 'aspire', 'ass', 'assay', 'assembled', 'assessment', 'asset', 'assigned', 'assignment', 'assist', 'assistance', 'assistant', 'assisting', 'associate', 'associated', 'assume', 'assumption', 'assurance', 'assure', 'attached', 'attend', 'attendance', 'attending', 'attention', 'attitude', 'attract', 'attracts', 'attribute', 'attributed', 'attribution', 'attrition', 'audience', 'audiencesDemonstrates', 'audio', 'audit', 'auditing', 'augment', 'augmented', 'authentication', 'author', 'authorization', 'authorship', 'automate', 'automated', 'automating', 'automation', 'autonomously', 'autonomy', 'available', 'avenue', 'average', 'award', 'awarded', 'awareness', 'b', 'bachelor', 'back', 'backed', 'background', 'bagging', 'balance', 'banking', 'base', 'based', 'baseline', 'basic', 'basis', 'bayes', 'bear', 'beat', 'beating', 'become', 'becoming', 'began', 'begin', 'beginning', 'begun', 'behalf', 'behave', 'behavior', 'behavioral', 'behaviour', 'behavioural', 'behind', 'belief', 'believe', 'bench', 'benchmark', 'benefit', 'bespoke', 'best', 'better', 'beyond', 'bias', 'big', 'bilingual', 'billing', 'billion', 'bioinformaticians', 'bioinformatics', 'biological', 'biologist', 'biology', 'biologyComfort', 'biomedical', 'biometric', 'biopharmaceutical', 'biophysical', 'bioscience', 'biostatistical', 'biostatisticians', 'biostatistics', 'bleeding', 'blend', 'blocker', 'blood', 'board', 'body', 'bold', 'bonus', 'boosting', 'borough', 'bottom', 'bound', 'boundary', 'boutique', 'box', 'brain', 'brainstorm', 'brand', 'breadth', 'break', 'breaking', 'breakthrough', 'brief', 'brightest', 'brilliant', 'bring', 'bringing', 'brings', 'broad', 'broader', 'broadly', 'broker', 'budgeting', 'bug', 'build', 'building', 'built', 'burden', 'burgeoning', 'business', 'busy', 'buyer', 'c', 'caching', 'calculation', 'calibration', 'call', 'campaign', 'campus', 'cancer', 'candidate', 'cannabis', 'capability', 'capable', 'capacity', 'capitalization', 'capture', 'carbon', 'cardiovascular', 'care', 'career', 'careful', 'caring', 'carried', 'carrier', 'carry', 'carve', 'case', 'casualty', 'catalog', 'categorization', 'category', 'causal', 'causality', 'cause', 'cell', 'center', 'central', 'centric', 'ceremony', 'certain', 'certification', 'certified', 'chain', 'challenge', 'challenged', 'challenging', 'chance', 'change', 'changing', 'channel', 'characteristic', 'characterization', 'characterize', 'characterizing', 'charge', 'charged', 'chart', 'chatbots', 'check', 'chemist', 'chemistry', 'choice', 'choose', 'chosen', 'churn', 'circumstance', 'cited', 'citizen', 'citizenship', 'city', 'civil', 'civilian', 'claim', 'clarity', 'class', 'classical', 'classification', 'classified', 'classifier', 'classify', 'classifying', 'classroom', 'clean', 'cleaning', 'cleanliness', 'cleanse', 'cleansing', 'clear', 'clearable', 'clearance', 'clearly', 'click', 'clickstream', 'client', 'climate', 'clinic', 'clinical', 'clinician', 'clinicogenomic', 'close', 'closely', 'cloud', 'club', 'cluster', 'clustering', 'coach', 'coaching', 'coagulation', 'code', 'coding', 'cognitive', 'coherent', 'cohort', 'collaborate', 'collaborating', 'collaboration', 'collaborative', 'collaboratively', 'collaborator', 'colleague', 'collect', 'collected', 'collecting', 'collection', 'college', 'color', 'combating', 'combination', 'combine', 'combined', 'combining', 'come', 'comfort', 'comfortable', 'coming', 'command', 'commensurate', 'commerce', 'commercial', 'commit', 'commitment', 'committed', 'common', 'communicate', 'communicates', 'communicating', 'communication', 'communicative', 'communicator', 'community', 'company', 'compare', 'comparison', 'compartmental', 'compatibly', 'compelling', 'compensates', 'compensation', 'competence', 'competency', 'competing', 'competition', 'competitive', 'competitor', 'complaint', 'complete', 'completed', 'completely', 'completing', 'completion', 'complex', 'complexity', 'compliance', 'comply', 'component', 'compound', 'comprehensive', 'comprehensiveness', 'comprised', 'comprises', 'computation', 'computational', 'computationally', 'computer', 'computing', 'conceive', 'concentration', 'concept', 'conceptualize', 'concise', 'concluded', 'conclusion', 'concreate', 'concurrent', 'condition', 'conditional', 'conduct', 'conducted', 'conducting', 'conference', 'conferencing', 'confidence', 'confident', 'confidentiality', 'confidently', 'configuration', 'congestive', 'connect', 'connected', 'consider', 'considerable', 'consideration', 'considered', 'considering', 'consistent', 'consisting', 'consists', 'consolidate', 'consolidation', 'constant', 'constantly', 'constraint', 'construct', 'construction', 'constructive', 'construed', 'consult', 'consultant', 'consultation', 'consultative', 'consulting', 'consumable', 'consumer', 'consumption', 'contact', 'contain', 'contained', 'container', 'contains', 'contender', 'content', 'context', 'continual', 'continually', 'continue', 'continued', 'continuing', 'continuous', 'continuously', 'continuum', 'contract', 'contractor', 'contribute', 'contributes', 'contributing', 'contribution', 'contributor', 'control', 'convenience', 'convention', 'conventional', 'conversation', 'convert', 'convey', 'conviction', 'convince', 'convincingly', 'cooperatively', 'coordinate', 'coordination', 'coordinator', 'core', 'coronary', 'corporate', 'correct', 'corrective', 'correlate', 'corresponding', 'cost', 'could', 'country', 'coupled', 'courage', 'course', 'coursework', 'cover', 'coverage', 'crave', 'crazy', 'create', 'created', 'creates', 'creating', 'creation', 'creative', 'credential', 'credibility', 'creditable', 'creed', 'crime', 'criminal', 'critical', 'criticism', 'cross', 'crucial', 'cryptography', 'cubicle', 'culmination', 'cultural', 'culturally', 'culture', 'cumulative', 'curation', 'curiosity', 'curious', 'currency', 'current', 'currently', 'curriculum', 'custom', 'customer', 'customized', 'cut', 'cutting', 'cvshealthsupport', 'cyber', 'cybersecurity', 'cycle', 'cyclostationary', 'daily', 'dashboard', 'data', 'dataOrganized', 'database', 'datapoints', 'dataset', 'datasets', 'date', 'day', 'deadline', 'deaf', 'deal', 'dealing', 'death', 'debugging', 'decision', 'decomposition', 'decreasing', 'dedicated', 'dedication', 'deemed', 'deep', 'deepen', 'deeper', 'deeply', 'define', 'defined', 'defining', 'degree', 'deidentification', 'delay', 'deliver', 'deliverable', 'delivered', 'delivering', 'delivers', 'delivery', 'demand', 'demo', 'democratization', 'democratize', 'demographic', 'demography', 'demonstrable', 'demonstrably', 'demonstrate', 'demonstrated', 'demonstrates', 'dental', 'department', 'depending', 'depict', 'deploy', 'deploying', 'deployment', 'deploys', 'depth', 'derive', 'derived', 'deriving', 'describe', 'described', 'describing', 'description', 'descriptive', 'deserves', 'design', 'designation', 'designed', 'designer', 'designing', 'desirable', 'desire', 'desired', 'desktop', 'detail', 'detailed', 'detailing', 'detect', 'detection', 'determination', 'determine', 'determined', 'determining', 'deterministic', 'devastating', 'develop', 'developed', 'developedInterface', 'developer', 'developing', 'development', 'developmentExperience', 'develops', 'device', 'devise', 'devising', 'devoted', 'diabetes', 'diagnosing', 'diagnosis', 'diagnostic', 'diagnostics', 'diagram', 'dialect', 'difference', 'different', 'differentiate', 'differentiated', 'differentiating', 'difficult', 'dig', 'digestive', 'digging', 'digital', 'diligence', 'dimensionality', 'direct', 'direction', 'directly', 'dirty', 'disability', 'disadvantaged', 'discharge', 'discipline', 'disciplined', 'disclose', 'disclosed', 'disclosure', 'discover', 'discovering', 'discovers', 'discovery', 'discrepancy', 'discriminate', 'discrimination', 'discus', 'discussed', 'discussion', 'disease', 'disorder', 'disparate', 'disposition', 'disprove', 'disrupted', 'disrupter', 'disruptive', 'disseminate', 'distill', 'distinctive', 'distributed', 'distribution', 'distributor', 'diverse', 'diversifying', 'diversity', 'divestments', 'division', 'doctor', 'doctoral', 'document', 'documentation', 'documented', 'dollar', 'domain', 'domestically', 'dominant', 'done', 'door', 'double', 'downtown', 'dramatic', 'draw', 'dream', 'drive', 'driven', 'driver', 'driving', 'drop', 'drug', 'drugging', 'due', 'duration', 'dust', 'duty', 'dynamic', 'eValuation', 'eager', 'earliest', 'early', 'earned', 'earning', 'ease', 'easy', 'ecommerce', 'econometric', 'econometrics', 'economic', 'economically', 'economics', 'economy', 'ecosystem', 'edX', 'edge', 'editing', 'educating', 'education', 'educational', 'educationally', 'effect', 'effective', 'effectively', 'effectiveness', 'efficiency', 'efficient', 'efficiently', 'effort', 'eigenvalue', 'either', 'electronic', 'elevate', 'eligible', 'elite', 'else', 'email', 'embrace', 'embracing', 'emerge', 'emerged', 'emerging', 'eminence', 'empathetically', 'emphasis', 'emphasized', 'employ', 'employee', 'employer', 'employment', 'empower', 'empowers', 'enable', 'enabled', 'enablement', 'enabler', 'enables', 'enabling', 'encapsulate', 'encourage', 'encouraged', 'encourages', 'encouraging', 'end', 'endpoint', 'energetic', 'energized', 'energy', 'enforcement', 'engage', 'engaged', 'engagement', 'engaging', 'engender', 'engine', 'engineer', 'engineering', 'enhance', 'enhancement', 'enhancing', 'enjoy', 'enjoys', 'enough', 'enrich', 'enrichment', 'enrolled', 'ensemble', 'ensure', 'ensures', 'ensuring', 'entail', 'entailing', 'enterprise', 'enthusiasm', 'enthusiast', 'enthusiastic', 'entire', 'entity', 'entrepreneurial', 'entry', 'environment', 'environmentCommunicates', 'environmentDemonstrates', 'environmentExcellent', 'environmentExperience', 'environmental', 'epidemiological', 'epidemiologist', 'epidemiology', 'equal', 'equally', 'equation', 'equilavent', 'equip', 'equipment', 'equivalent', 'error', 'escalate', 'especially', 'essential', 'establish', 'established', 'establishing', 'establishment', 'estimate', 'estimating', 'et', 'etc', 'ethic', 'ethical', 'ethnic', 'ethnicity', 'euro', 'evaluate', 'evaluating', 'evaluation', 'even', 'evening', 'event', 'ever', 'every', 'everyday', 'everyone', 'everything', 'everywhere', 'evidence', 'evolution', 'evolve', 'evolving', 'exam', 'examining', 'example', 'exceeding', 'excel', 'excellence', 'excellent', 'exceptional', 'excessive', 'exchange', 'exchanging', 'excited', 'exciting', 'excluding', 'execute', 'executed', 'executing', 'execution', 'executive', 'exercise', 'exhaustive', 'exhibit', 'exhilarating', 'exist', 'existing', 'expand', 'expanding', 'expands', 'expansion', 'expectation', 'expected', 'expedition', 'expense', 'experience', 'experienceExperience', 'experienced', 'experiential', 'experiment', 'experimental', 'experimentation', 'expert', 'expertise', 'explain', 'explaining', 'exploration', 'exploratory', 'explore', 'exploring', 'exponential', 'exposure', 'exposureExperience', 'expression', 'extend', 'extended', 'extending', 'extensive', 'external', 'externally', 'extract', 'extracting', 'extraction', 'extraordinary', 'extreme', 'eye', 'fabrication', 'face', 'facet', 'facilitate', 'facilitated', 'facility', 'facing', 'facingExperience', 'fact', 'factor', 'faculty', 'failure', 'fair', 'famed', 'familiar', 'familiarity', 'family', 'fan', 'fashion', 'fast', 'faster', 'faulty', 'feature', 'federal', 'feed', 'feedback', 'feel', 'fellow', 'fertile', 'fertility', 'fidelity', 'field', 'fieldDemonstrated', 'fieldProficient', 'fiercely', 'fifty', 'fight', 'file', 'filing', 'final', 'finalizing', 'finally', 'finance', 'financial', 'find', 'finding', 'finish', 'fintech', 'firm', 'first', 'fit', 'fitting', 'five', 'fix', 'flexibility', 'flexible', 'flow', 'focus', 'focused', 'focusing', 'follow', 'following', 'foot', 'footprint', 'force', 'forecasting', 'foremost', 'forest', 'forge', 'form', 'formal', 'format', 'formatting', 'formed', 'former', 'formerly', 'formulate', 'formulation', 'forth', 'forward', 'foster', 'found', 'foundation', 'founded', 'founder', 'founding', 'four', 'fourth', 'frame', 'framework', 'fraud', 'free', 'freedom', 'frequently', 'fresh', 'front', 'fuel', 'fulfill', 'full', 'fuller', 'fullest', 'fulltime', 'fully', 'fun', 'function', 'functional', 'functionality', 'fund', 'fundamental', 'fundamentally', 'funded', 'funders', 'funnel', 'furnish', 'furtherance', 'furthering', 'future', 'fuzzy', 'gain', 'gained', 'gaining', 'game', 'gaming', 'gas', 'gastroenterology', 'gather', 'gathering', 'gender', 'general', 'generalist', 'generalized', 'generally', 'generate', 'generated', 'generates', 'generating', 'generation', 'generic', 'generous', 'genetic', 'geneticist', 'genetics', 'genomic', 'genomics', 'genuine', 'get', 'getting', 'ggplot', 'gift', 'git', 'give', 'given', 'giving', 'glean', 'global', 'globally', 'globe', 'go', 'goal', 'goalsInterest', 'good', 'got', 'govern', 'governance', 'governing', 'government', 'grading', 'graduate', 'graduated', 'graduation', 'grant', 'grantee', 'graph', 'graphic', 'graphical', 'gratifying', 'great', 'greater', 'greatest', 'greatly', 'greatness', 'grid', 'ground', 'group', 'grow', 'growing', 'growth', 'guide', 'guideline', 'guiding', 'gym', 'habit', 'half', 'hand', 'handle', 'handled', 'handling', 'happier', 'happiness', 'harassment', 'hard', 'harmonization', 'harness', 'harnessing', 'hat', 'head', 'headquartered', 'headquarters', 'health', 'healthcare', 'healthier', 'healthiest', 'healthy', 'hearing', 'heart', 'heat', 'heavy', 'hectic', 'held', 'help', 'helpful', 'helping', 'hepatitis', 'hidden', 'high', 'higher', 'highest', 'highly', 'hire', 'hired', 'hiring', 'history', 'hoc', 'hold', 'holder', 'holding', 'home', 'horizon', 'horizontal', 'hospital', 'host', 'hosted', 'hosting', 'hottest', 'hour', 'house', 'http', 'human', 'humanity', 'humility', 'hundred', 'hybrid', 'hyperparameter', 'hypothesis', 'idea', 'ideal', 'ideally', 'ideation', 'identifiable', 'identification', 'identified', 'identify', 'identifying', 'identity', 'illustrate', 'image', 'imagery', 'imagination', 'imagined', 'imaging', 'immediate', 'immediately', 'immigration', 'immune', 'impact', 'impactful', 'impacting', 'impairment', 'implement', 'implementation', 'implemented', 'implementing', 'implication', 'importance', 'important', 'importantly', 'importing', 'impossible', 'improve', 'improved', 'improvement', 'improving', 'imputation', 'inappropriate', 'incentive', 'include', 'included', 'includes', 'including', 'inclusion', 'inclusive', 'inclusivity', 'income', 'incompatible', 'incomplete', 'inconsistency', 'incorporate', 'increase', 'increasing', 'incrementally', 'incumbent', 'indeed', 'independence', 'independent', 'independently', 'indicate', 'indicator', 'individual', 'individualized', 'indoor', 'industry', 'inexplicable', 'infectious', 'inferential', 'inflammatory', 'influence', 'influencing', 'inform', 'informatica', 'informaticians', 'information', 'informed', 'informing', 'infrastructure', 'infused', 'ingenuity', 'ingest', 'ingestion', 'inhibited', 'initial', 'initiated', 'initiative', 'innovate', 'innovating', 'innovation', 'innovative', 'inordinate', 'input', 'inquired', 'inquiry', 'insanely', 'inside', 'insight', 'insightful', 'inspire', 'inspired', 'inspires', 'installation', 'instance', 'instinctive', 'institution', 'institutional', 'instruction', 'instructor', 'instrumental', 'insurance', 'insuranceDental', 'insurer', 'integral', 'integrate', 'integrated', 'integrating', 'integration', 'integrative', 'integrity', 'intellectual', 'intelligence', 'intelligent', 'intended', 'intensive', 'interact', 'interacting', 'interaction', 'interactive', 'interdisciplinary', 'interest', 'interested', 'interesting', 'interim', 'intermediate', 'intermittent', 'intern', 'internal', 'internally', 'international', 'internationally', 'internet', 'internship', 'interpersonal', 'interpret', 'interpretation', 'interpreted', 'interpreting', 'interval', 'intervention', 'interventional', 'interview', 'intrigued', 'introduce', 'introducing', 'introductory', 'intuitive', 'invent', 'inventing', 'inventive', 'inventory', 'invest', 'investigate', 'investigates', 'investigating', 'investigation', 'investigator', 'investing', 'investment', 'invite', 'involve', 'involved', 'involvement', 'involves', 'involving', 'issue', 'issuesPresent', 'item', 'iterative', 'jeopardize', 'job', 'join', 'joining', 'journal', 'journey', 'judge', 'judgment', 'junior', 'justification', 'k', 'keen', 'key', 'kick', 'kind', 'king', 'know', 'knowledge', 'knowledgeable', 'known', 'lab', 'label', 'labor', 'laboratory', 'lake', 'lambda', 'landmark', 'landscape', 'language', 'large', 'largely', 'largest', 'last', 'lasting', 'latent', 'later', 'latest', 'latitude', 'latter', 'launch', 'launched', 'law', 'lawfully', 'le', 'lead', 'leader', 'leadership', 'leading', 'leaf', 'learn', 'learner', 'learning', 'leased', 'least', 'leave', 'led', 'left', 'legacy', 'legal', 'legally', 'lens', 'lesion', 'let', 'letter', 'level', 'leverage', 'leveraging', 'liability', 'liaising', 'liaison', 'library', 'lie', 'lieu', 'life', 'lifecycle', 'lifestyle', 'lift', 'like', 'likelihood', 'likely', 'limit', 'limitation', 'limited', 'line', 'lineage', 'linear', 'link', 'linking', 'list', 'listed', 'listen', 'listener', 'listing', 'literature', 'little', 'live', 'loan', 'local', 'localization', 'located', 'location', 'locker', 'logical', 'logically', 'logistic', 'long', 'longitudinal', 'look', 'looking', 'loosely', 'love', 'low', 'lower', 'loyalty', 'machine', 'macro', 'mad', 'made', 'main', 'maintain', 'maintaining', 'maintains', 'maintenance', 'major', 'make', 'maker', 'making', 'manage', 'managed', 'management', 'manager', 'managing', 'manipulate', 'manipulating', 'manipulation', 'manipulationExperience', 'manner', 'mannerCreating', 'manufacturing', 'manuscript', 'many', 'mapping', 'margin', 'marital', 'market', 'marketing', 'marketplace', 'marshal', 'mart', 'massive', 'master', 'match', 'matching', 'material', 'maternity', 'math', 'mathematical', 'mathematician', 'mathematics', 'matplotlib', 'matrixed', 'matter', 'mature', 'maturity', 'maximize', 'may', 'mdm', 'mean', 'meaning', 'meaningful', 'measurable', 'measure', 'measurement', 'mechanism', 'mechanistic', 'medical', 'medicine', 'medium', 'meet', 'meeting', 'member', 'mental', 'mentality', 'mentor', 'mentored', 'mentoring', 'merge', 'merger', 'merit', 'message', 'metabolic', 'metabolite', 'metabolomics', 'metadata', 'method', 'methodological', 'methodology', 'methodsDoing', 'meticulous', 'metric', 'mgmt', 'micro', 'microbiome', 'microenvironment', 'microservices', 'might', 'migration', 'milestone', 'military', 'million', 'mind', 'mindset', 'mine', 'minimal', 'minimum', 'mining', 'miningSolid', 'minority', 'mission', 'mobile', 'modality', 'model', 'modeling', 'modelling', 'modellingAdvanced', 'moderate', 'moderately', 'modern', 'modified', 'module', 'molecular', 'molecule', 'moment', 'money', 'monitor', 'monitoring', 'month', 'motivated', 'motivation', 'move', 'moving', 'much', 'multi', 'multidisciplinary', 'multiple', 'multitask', 'multitude', 'multivariate', 'music', 'must', 'mutual', 'myriad', 'narrativeMeticulous', 'nation', 'national', 'natural', 'nature', 'navigating', 'naïve', 'near', 'nearly', 'necessarily', 'necessary', 'necessity', 'need', 'needed', 'neededSelecting', 'needing', 'negotiate', 'neighborhood', 'net', 'network', 'neural', 'neuroimaging', 'neuroscience', 'never', 'new', 'newly', 'next', 'nimble', 'noise', 'noncitizen', 'nonpartisan', 'nontechnical', 'normalization', 'normalized', 'normalizing', 'normally', 'note', 'notebook', 'nothing', 'novel', 'nucleic', 'number', 'numerate', 'numerical', 'numerous', 'nurture', 'nurturing', 'ob', 'object', 'objective', 'objectivesMaintain', 'observational', 'observe', 'obstacle', 'obtain', 'obtaining', 'occasional', 'occur', 'offHealth', 'offer', 'offered', 'offering', 'office', 'offline', 'often', 'oil', 'onboard', 'oncology', 'one', 'ongoing', 'online', 'onsite', 'open', 'opening', 'openly', 'openness', 'operate', 'operates', 'operating', 'operation', 'operational', 'operationalize', 'opinion', 'opportunity', 'optical', 'optimal', 'optimally', 'optimization', 'optimize', 'optimized', 'optimizing', 'option', 'oral', 'orchestration', 'orchestrator', 'order', 'ordering', 'orderliness', 'ordinance', 'organization', 'organizationDeliver', 'organizational', 'organize', 'organized', 'organizing', 'orientation', 'oriented', 'origin', 'original', 'others', 'otherwise', 'out', 'outcome', 'outgoing', 'outlier', 'outlined', 'outlook', 'outperform', 'outside', 'outsourced', 'outstanding', 'overall', 'overarching', 'overcoming', 'overseeing', 'oversight', 'owned', 'ownership', 'owning', 'pace', 'paced', 'package', 'paid', 'paired', 'panda', 'panel', 'paper', 'paradigm', 'parallel', 'parameter', 'parameterization', 'part', 'participant', 'participate', 'participates', 'participating', 'participation', 'particularly', 'partner', 'partnered', 'partnering', 'partnership', 'party', 'passion', 'passionate', 'past', 'path', 'pathology', 'pathway', 'patient', 'pattern', 'pay', 'payer', 'paying', 'payment', 'pedagogical', 'peer', 'penalized', 'people', 'per', 'perception', 'perfect', 'perform', 'performance', 'performanceWorking', 'performed', 'performing', 'period', 'periodically', 'permanent', 'permitted', 'person', 'personal', 'personality', 'personalization', 'personalize', 'personalized', 'personnel', 'perspective', 'pharmaceutical', 'pharmacist', 'pharmacology', 'phase', 'phenotyping', 'phone', 'physic', 'physical', 'physician', 'physiological', 'physiology', 'pick', 'picture', 'piece', 'pilot', 'piloting', 'pioneer', 'pioneering', 'pipeline', 'pivotal', 'place', 'plan', 'planet', 'planning', 'platform', 'play', 'player', 'playing', 'please', 'plot', 'plus', 'plusExperience', 'plusHigh', 'plusProficient', 'plusProven', 'point', 'poised', 'policy', 'political', 'population', 'portal', 'portfolio', 'position', 'positive', 'positively', 'posse', 'possessing', 'possibility', 'possible', 'post', 'postdoc', 'posting', 'potential', 'potentially', 'pound', 'power', 'powered', 'powerful', 'powering', 'practical', 'practice', 'pragmatic', 'preceptor', 'precise', 'predicting', 'prediction', 'predictive', 'preempt', 'pref', 'prefer', 'preferably', 'preference', 'preferred', 'pregnancy', 'preliminary', 'premier', 'preparation', 'prepare', 'prepared', 'preparing', 'preprocessing', 'prescribe', 'prescriptive', 'present', 'presentation', 'presenting', 'presently', 'pressure', 'preventing', 'prevention', 'previous', 'previously', 'price', 'pricing', 'pride', 'primarily', 'primary', 'principle', 'prior', 'prioritize', 'prioritized', 'prioritizing', 'priority', 'privacy', 'private', 'privately', 'proactive', 'proactively', 'probabilistic', 'probability', 'problem', 'procedure', 'proceeding', 'process', 'processed', 'processing', 'produce', 'produced', 'producing', 'product', 'production', 'productionExcellent', 'productionalizing', 'productively', 'productivity', 'profession', 'professional', 'professionally', 'professionnal', 'proficiency', 'proficient', 'proficiently', 'profile', 'profiling', 'profit', 'profitability', 'profitable', 'program', 'programmer', 'programming', 'progress', 'progression', 'progressive', 'prohibits', 'project', 'projection', 'projectsDemonstrates', 'projectsSoftware', 'promote', 'promotes', 'promoting', 'proof', 'property', 'proposal', 'propose', 'proposed', 'proposition', 'proprietary', 'prospect', 'prospecting', 'prosper', 'protected', 'proteomic', 'proteomics', 'protocol', 'prototype', 'prototyping', 'proud', 'proudly', 'proven', 'provide', 'provided', 'provider', 'provides', 'providing', 'proving', 'psychology', 'public', 'publication', 'publicly', 'publish', 'published', 'publishes', 'publishing', 'pull', 'purchase', 'purchasing', 'purpose', 'pursue', 'pursuing', 'pursuit', 'push', 'put', 'python', 'qPCR', 'qualification', 'qualified', 'qualifying', 'qualitative', 'quality', 'quantitative', 'quarterly', 'query', 'querying', 'quest', 'question', 'quick', 'quickly', 'quit', 'quo', 'rShiny', 'race', 'racial', 'radar', 'random', 'range', 'ranging', 'ranking', 'rapid', 'rapidly', 'rare', 'rate', 'rather', 'rating', 'reach', 'reaction', 'read', 'readable', 'reading', 'reagent', 'real', 'reality', 'realize', 'really', 'reason', 'reasonable', 'reasoning', 'receive', 'received', 'recent', 'recently', 'reciprocally', 'recognition', 'recognize', 'recognized', 'recommend', 'recommendation', 'recommender', 'reconstruction', 'record', 'recordkeeping', 'recovery', 'recruit', 'recruiting', 'recruitment', 'recurring', 'redefine', 'redefining', 'redesigned', 'reduce', 'reducing', 'reduction', 'reference', 'refine', 'refinement', 'reflect', 'reflects', 'regard', 'regarding', 'regardless', 'region', 'regional', 'regression', 'regular', 'regularized', 'regularly', 'regulated', 'regulation', 'regulatory', 'reinforcement', 'reinsurers', 'related', 'relating', 'relational', 'relationship', 'release', 'relentless', 'relevance', 'relevant', 'reliability', 'relies', 'religion', 'religious', 'relocation', 'reltio', 'rely', 'relying', 'remain', 'remaining', 'remote', 'remotely', 'rendered', 'renewable', 'renewal', 'repertoire', 'repetitive', 'replying', 'report', 'reporting', 'repository', 'reposted', 'represent', 'representation', 'representing', 'represents', 'reproduce', 'reproducibility', 'reproducible', 'repurposed', 'request', 'requested', 'require', 'required', 'requirement', 'requires', 'requisition', 'research', 'researcher', 'researching', 'reservist', 'reshape', 'reshaping', 'residence', 'resolution', 'resolve', 'resolving', 'resource', 'resourceful', 'respect', 'respond', 'responding', 'response', 'responsibility', 'responsible', 'rest', 'restore', 'result', 'resultsDesign', 'resume', 'retail', 'retain', 'retaliation', 'retention', 'retrain', 'return', 'reused', 'revenue', 'reversal', 'reverse', 'review', 'reviewed', 'reviewing', 'revolutionize', 'reward', 'rewarding', 'rich', 'richest', 'ride', 'right', 'rigor', 'rigorous', 'risk', 'roadmap', 'robust', 'rockstar', 'role', 'roll', 'room', 'root', 'rounded', 'routine', 'row', 'rule', 'run', 'running', 'runtime', 'rural', 'safe', 'safer', 'safety', 'salary', 'sale', 'sample', 'sampling', 'sanity', 'satellite', 'satisfaction', 'satisfy', 'saving', 'say', 'scalability', 'scalable', 'scale', 'scaling', 'scenario', 'scene', 'schedule', 'schema', 'scholar', 'scholarship', 'school', 'science', 'scientific', 'scientist', 'scope', 'screen', 'screening', 'scripting', 'scrum', 'seamlessly', 'search', 'searchable', 'searching', 'seasoned', 'secondary', 'sector', 'secure', 'security', 'see', 'seeing', 'seek', 'seeker', 'seeking', 'seen', 'segment', 'segmentation', 'segmenting', 'select', 'selected', 'selecting', 'selection', 'sell', 'send', 'senior', 'seniority', 'sense', 'sensing', 'sensitivity', 'sensor', 'sentiment', 'sequencing', 'serf', 'series', 'serious', 'serve', 'service', 'servicing', 'serving', 'set', 'setsKnowledge', 'setting', 'seventeen', 'several', 'sex', 'sexual', 'shape', 'shaping', 'share', 'shared', 'sharing', 'sharp', 'shift', 'ship', 'shipping', 'short', 'shot', 'shower', 'shown', 'shrinking', 'signal', 'signaling', 'significance', 'significant', 'silo', 'similar', 'simple', 'simplest', 'simplicity', 'simplifies', 'simplify', 'simplifying', 'simply', 'simulated', 'simulation', 'simultaneously', 'since', 'single', 'singular', 'sit', 'site', 'sits', 'situation', 'size', 'skill', 'skilled', 'skillsClear', 'skillsTeam', 'sleep', 'sleeve', 'slide', 'small', 'smart', 'smarter', 'social', 'socially', 'sociology', 'software', 'solid', 'solution', 'solve', 'solved', 'solver', 'solves', 'solving', 'someone', 'something', 'soon', 'sophisticated', 'sophomore', 'sound', 'source', 'sourcing', 'space', 'span', 'spanning', 'speak', 'speaker', 'speaking', 'spearheading', 'spec', 'special', 'specialist', 'specialization', 'specialized', 'specializing', 'specialty', 'specific', 'specifically', 'specification', 'specified', 'specifying', 'speech', 'speed', 'spend', 'spending', 'spirit', 'sponsor', 'sponsorship', 'sport', 'spot', 'spotting', 'spouse', 'spreadsheet', 'stack', 'staff', 'stage', 'stakeholder', 'standard', 'standardized', 'standardizing', 'start', 'started', 'starting', 'startup', 'state', 'stateful', 'statement', 'statistic', 'statistical', 'statistician', 'status', 'stay', 'stayed', 'staying', 'steadfast', 'step', 'stepwise', 'stewardship', 'still', 'stimulating', 'stone', 'storage', 'store', 'story', 'storytelling', 'straightforward', 'strategic', 'strategy', 'stratification', 'streaming', 'streamline', 'streamlining', 'street', 'strength', 'strict', 'strictly', 'stride', 'strive', 'strong', 'strongest', 'strongly', 'structure', 'structured', 'stuck', 'student', 'studied', 'study', 'studying', 'style', 'subject', 'submit', 'substantial', 'substantially', 'subtypes', 'succeed', 'succeeding', 'success', 'successful', 'successfully', 'suffer', 'suggest', 'suggestion', 'sum', 'summarize', 'summarized', 'summarizes', 'summarizing', 'summary', 'summer', 'superior', 'supervised', 'supervising', 'supervision', 'supervisor', 'supplement', 'supplier', 'supply', 'support', 'supported', 'supporting', 'supportive', 'sure', 'surface', 'surprise', 'surrounded', 'survey', 'survival', 'sustainable', 'sustained', 'sustaining', 'switch', 'symbiosis', 'symbolic', 'sync', 'synergy', 'synthesis', 'synthesize', 'synthesized', 'synthetic', 'system', 'systematic', 'systematization', 'systemsExtending', 'table', 'tackle', 'tackled', 'tactic', 'take', 'takeaway', 'taking', 'talent', 'talented', 'talk', 'talking', 'tandem', 'tangible', 'target', 'targeted', 'targeting', 'task', 'tasked', 'teach', 'teachable', 'teaching', 'team', 'teammate', 'teamwork', 'tech', 'technical', 'technique', 'techniquesData', 'technological', 'technologiesPractical', 'technologist', 'technology', 'technologyPrior', 'tedious', 'telecom', 'telephone', 'tell', 'temperature', 'ten', 'tenacious', 'term', 'territory', 'test', 'tested', 'testing', 'text', 'thanks', 'theoretical', 'theory', 'therapeutic', 'therapy', 'thereafter', 'therefore', 'thermodynamics', 'thesis', 'thing', 'think', 'thinker', 'thinking', 'third', 'thirty', 'thorough', 'though', 'thought', 'thoughtful', 'thousand', 'threat', 'three', 'threshold', 'thrive', 'thrives', 'throughout', 'tie', 'tight', 'time', 'timeline', 'timely', 'tissue', 'title', 'tobacco', 'today', 'together', 'tolerance', 'tool', 'toolbox', 'tooling', 'toolkits', 'toolsExperience', 'toolsets', 'top', 'topic', 'total', 'touch', 'toward', 'towards', 'toworking', 'track', 'tracking', 'traded', 'tradeoff', 'traditional', 'traffic', 'train', 'trained', 'trainee', 'training', 'trajectory', 'transaction', 'transfer', 'transform', 'transformation', 'transformative', 'transforming', 'transition', 'translate', 'translating', 'translational', 'transparency', 'traumatic', 'travel', 'travelling', 'treat', 'treatment', 'tree', 'tremendous', 'trend', 'trial', 'triangulate', 'trillion', 'troubleshoot', 'troubleshooting', 'trucking', 'truly', 'trust', 'trusted', 'truth', 'try', 'trying', 'tuberculosis', 'tumor', 'tuning', 'turn', 'turning', 'turnover', 'two', 'type', 'typical', 'typically', 'typo', 'u', 'ultimate', 'ultimately', 'unblock', 'uncover', 'undergoing', 'undergraduate', 'underlying', 'underpin', 'underpinnings', 'underrepresentation', 'underrepresented', 'understand', 'understanding', 'understands', 'undertake', 'undertaking', 'underway', 'underwriting', 'unexpected', 'unifying', 'unique', 'unit', 'university', 'unlawful', 'unless', 'unlock', 'unlocking', 'unparalleled', 'unprecedented', 'unrelated', 'unrivaled', 'unsolved', 'unstructured', 'unsupervised', 'untapped', 'unturned', 'unusual', 'upcoming', 'updating', 'upfront', 'uphold', 'upkeep', 'upload', 'uploading', 'upon', 'upsell', 'urban', 'urgency', 'urgent', 'us', 'usability', 'usage', 'use', 'used', 'useful', 'user', 'usercentric', 'using', 'utility', 'utilization', 'utilize', 'utilized', 'utilizing', 'utmost', 'valid', 'validate', 'validated', 'validating', 'validation', 'valuable', 'value', 'valued', 'variable', 'variance', 'variation', 'varied', 'variety', 'various', 'vary', 'varying', 'vast', 'vector', 'velocity', 'vendor', 'venture', 'venue', 'verbal', 'verbally', 'verifiable', 'verification', 'verify', 'verifying', 'versed', 'version', 'vertical', 'veteran', 'via', 'vibrant', 'view', 'virtual', 'virtualization', 'virtualized', 'virtually', 'visibility', 'vision', 'visionary', 'visit', 'visual', 'visualization', 'visualize', 'visualizing', 'visually', 'vital', 'vitro', 'voice', 'volatility', 'volume', 'vulnerability', 'waiting', 'wallet', 'want', 'warehouse', 'waste', 'water', 'way', 'weakness', 'weapon', 'wear', 'web', 'weblog', 'website', 'week', 'weekend', 'weight', 'welcome', 'well', 'wellbeing', 'wherever', 'whole', 'whose', 'wide', 'widely', 'wider', 'willing', 'willingness', 'win', 'wind', 'winning', 'within', 'without', 'word', 'work', 'worked', 'workflow', 'workforce', 'working', 'workload', 'workplace', 'workshop', 'workstreams', 'world', 'worldIs', 'worldwide', 'would', 'wrangling', 'write', 'writing', 'written', 'year', 'yes', 'yet', 'yield']
MIT
module2-Bag-of-Words/LS_DS_422_BOW_Assignment.ipynb
brit228/AB-Demo
3) Use Scikit-Learn's CountVectorizer to get word counts for each listing.
df2["Job Description - Most Common"] = df2["Job Description"].apply(lambda v: FreqDist(v).most_common(20)) df2["Job Description - Most Common"]
_____no_output_____
MIT
module2-Bag-of-Words/LS_DS_422_BOW_Assignment.ipynb
brit228/AB-Demo
4) Visualize the most common word counts
import matplotlib.pyplot as plt fdist = FreqDist([inner for outer in df2["Job Description"].values for inner in outer]) fdist.plot(30, cumulative=False) plt.show()
_____no_output_____
MIT
module2-Bag-of-Words/LS_DS_422_BOW_Assignment.ipynb
brit228/AB-Demo
5) Use Scikit-Learn's tfidfVectorizer to get a TF-IDF feature matrix
from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer(stop_words='english') tfidf = TfidfVectorizer(ngram_range=(1,1), max_features=20) bag_of_words = tfidf.fit_transform([" ".join(v) for v in df2["Job Description"].values]) df_vec = pd.DataFrame(bag_of_words.toarray(), columns=tfidf.get_feature_names()) df_vec.head()
_____no_output_____
MIT
module2-Bag-of-Words/LS_DS_422_BOW_Assignment.ipynb
brit228/AB-Demo
Code stuff - not slides!
%run ../ML_plots.ipynb
ERROR:root:File `'../ML_plots.ipynb.py'` not found.
MIT
Test_karl/material/session_12/lecture_12.ipynb
karlbindslev/sds_group29
Session 12: Supervised learning, part 1*Andreas Bjerre-Nielsen* Agenda1. [Modelling data](Modelling-data)1. [A familiar regression model](A-familiar-regression-model)1. [The curse of overfitting](The-curse-of-overfitting)1. [Important details](Implementation-details) Vaaaamos
import warnings from sklearn.exceptions import ConvergenceWarning warnings.filterwarnings(action='ignore', category=ConvergenceWarning) import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns plt.style.use('default') # set style (colors, background, size, gridlines etc.) plt.rcParams['figure.figsize'] = 10, 4 # set default size of plots plt.rcParams.update({'font.size': 18})
_____no_output_____
MIT
Test_karl/material/session_12/lecture_12.ipynb
karlbindslev/sds_group29
Supervised problems (1)*How do we distinguish between problems?*
f_identify_question
_____no_output_____
MIT
Test_karl/material/session_12/lecture_12.ipynb
karlbindslev/sds_group29
Supervised problems (2)*The two canonical problems*
f_identify_answer
_____no_output_____
MIT
Test_karl/material/session_12/lecture_12.ipynb
karlbindslev/sds_group29
Supervised problems (3)*Which models have we seen for classification?*- .- .- . Modelling data Model complexity (1)*What does a model of low complexity look like?*
f_complexity[0]
_____no_output_____
MIT
Test_karl/material/session_12/lecture_12.ipynb
karlbindslev/sds_group29
Model complexity (2)*What does medium model complexity look like?*
f_complexity[1]
_____no_output_____
MIT
Test_karl/material/session_12/lecture_12.ipynb
karlbindslev/sds_group29
Model complexity (3)*What does high model complexity look like?*
f_complexity[2]
_____no_output_____
MIT
Test_karl/material/session_12/lecture_12.ipynb
karlbindslev/sds_group29
Model fitting (1)*Quiz (1 min.): Which model fitted the data best?*
f_bias_var['regression'][2]
_____no_output_____
MIT
Test_karl/material/session_12/lecture_12.ipynb
karlbindslev/sds_group29
Model fitting (2)*What does underfitting and overfitting look like for classification?*
f_bias_var['classification'][2]
_____no_output_____
MIT
Test_karl/material/session_12/lecture_12.ipynb
karlbindslev/sds_group29
Two agendas (1)What are the objectives of empirical research? 1. *causation*: what is the effect of a particular variable on an outcome? 2. *prediction*: find some function that provides a good prediction of $y$ as a function of $x$ Two agendas (2)How might we express the agendas in a model?$$ y = \alpha + \beta x + \varepsilon $$- *causation*: interested in $\hat{\beta}$ - *prediction*: interested in $\hat{y}$ Two agendas (3)Might these two agendas be related at a deeper level? Can prediction quality inform us about how to make causal models? A familiar regression model Estimation (1)*Do we know already some ways to estimate regression models?* - Social scientists know all about the Ordinary Least Squares (OLS). - OLS estimate both parameters and their standard deviation. - Is best linear unbiased estimator under regularity conditions. *How is OLS estimated?* - $\beta=(\textbf{X}^T\textbf{X})^{-1}\textbf{X}^T\textbf{y}$- computation requires non perfect multicollinarity. Estimation (2)*How might we estimate a linear regression model?* - first order method (e.g. gradient descent)- second order method (e.g. Newton-Raphson) *So what the hell was gradient descent?* - compute errors, multiply with features and update Estimation (3)*Can you explain that in details?* - Yes, like with Adaline, we minimize the sum of squared errors (SSE): \begin{align}SSE&=\boldsymbol{e}^{T}\boldsymbol{e}\\\boldsymbol{e}&=\textbf{y}-\textbf{X}\textbf{w}\end{align}
X = np.random.normal(size=(3,2)) y = np.random.normal(size=(3)) w = np.random.normal(size=(3)) e = y-(w[0]+X.dot(w[1:])) SSE = e.T.dot(e)
_____no_output_____
MIT
Test_karl/material/session_12/lecture_12.ipynb
karlbindslev/sds_group29
Estimation (4)*And what about the updating..? What is it something about the first order deritative?* \begin{align}\frac{\partial SSE}{\partial\hat{w}}=&\textbf{X}^T\textbf{e},\\ \Delta\hat{w}=&\eta\cdot\textbf{X}^T\textbf{e}=\eta\cdot\textbf{X}^T(\textbf{y}-\hat{\textbf{y}})\end{align}
eta = 0.001 # learning rate fod = X.T.dot(e) update_vars = eta*fod update_bias = eta*e.sum()
_____no_output_____
MIT
Test_karl/material/session_12/lecture_12.ipynb
karlbindslev/sds_group29
Estimation (5)*What might some advantages be relative to OLS?*- Works despite high multicollinarity- Speed - OLS has $\mathcal{O}(K^2N)$ computation time ([read more](https://math.stackexchange.com/questions/84495/computational-complexity-of-least-square-regression-operation)) - Quadratic scaling in number of variables ($K$). - Stochastic gradient descent - Likely to converge faster with many observations ($N$) Fitting a polynomial (1)Polyonomial: $f(x) = 2+8*x^4$Try models of increasing order polynomials. - Split data into train and test (50/50)- For polynomial order 0 to 9: - Iteration n: $y = \sum_{k=0}^{n}(\beta_k\cdot x^k)+\varepsilon$. - Estimate order n model on training data - Evaluate with on test data with RMSE: - $log RMSE = \log (\sqrt{MSE})$ Fitting a polynomial (2)We generate samples of data from true model.
from sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import LinearRegression def true_fct(X): return 2+X**4 n_samples = 25 n_degrees = 15 np.random.seed(0) X_train = np.random.normal(size=(n_samples,1)) y_train = true_fct(X_train).reshape(-1) + np.random.randn(n_samples) X_test = np.random.normal(size=(n_samples,1)) y_test = true_fct(X_test).reshape(-1) + np.random.randn(n_samples)
_____no_output_____
MIT
Test_karl/material/session_12/lecture_12.ipynb
karlbindslev/sds_group29
Fitting a polynomial (3)We estimate the polynomials
from sklearn.metrics import mean_squared_error as mse test_mse = [] train_mse = [] parameters = [] degrees = range(n_degrees+1) for p in degrees: X_train_p = PolynomialFeatures(degree=p).fit_transform(X_train) X_test_p = PolynomialFeatures(degree=p).fit_transform(X_train) reg = LinearRegression().fit(X_train_p, y_train) train_mse += [mse(reg.predict(X_train_p),y_train)] test_mse += [mse(reg.predict(X_test_p),y_test)] parameters.append(reg.coef_)
_____no_output_____
MIT
Test_karl/material/session_12/lecture_12.ipynb
karlbindslev/sds_group29
Fitting a polynomial (4)*So what happens to the model performance in- and out-of-sample?*
degree_index = pd.Index(degrees,name='Polynomial degree ~ model complexity') ax = pd.DataFrame({'Train set':train_mse, 'Test set':test_mse})\ .set_index(degree_index)\ .plot(figsize=(10,4)) ax.set_ylabel('Mean squared error')
_____no_output_____
MIT
Test_karl/material/session_12/lecture_12.ipynb
karlbindslev/sds_group29
Fitting a polynomial (4)*Why does it go wrong?*- more spurious parameters- the coefficient size increases Fitting a polynomial (5)*What do you mean coefficient size increase?*
order_idx = pd.Index(range(n_degrees+1),name='Polynomial order') ax = pd.DataFrame(parameters,index=order_idx)\ .abs().mean(1)\ .plot(logy=True) ax.set_ylabel('Mean parameter size')
_____no_output_____
MIT
Test_karl/material/session_12/lecture_12.ipynb
karlbindslev/sds_group29
Fitting a polynomial (6)*How else could we visualize this problem?*
f_bias_var['regression'][2]
_____no_output_____
MIT
Test_karl/material/session_12/lecture_12.ipynb
karlbindslev/sds_group29
Colab FAQFor some basic overview and features offered in Colab notebooks, check out: [Overview of Colaboratory Features](https://colab.research.google.com/notebooks/basic_features_overview.ipynb)You need to use the colab GPU for this assignmentby selecting:> **Runtime**   →   **Change runtime type**   →   **Hardware Accelerator: GPU** Setup PyTorchAll files are stored at /content/csc421/a4/ folder
###################################################################### # Setup python environment and change the current working directory ###################################################################### !pip install torch torchvision !pip install imageio !pip install matplotlib %mkdir -p ./content/csc413/a4/ %cd ./content/csc413/a4
Requirement already satisfied: imageio in /home/tommy/miniconda3/lib/python3.8/site-packages (2.9.0) Requirement already satisfied: pillow in /home/tommy/miniconda3/lib/python3.8/site-packages (from imageio) (8.1.2) Requirement already satisfied: numpy in /home/tommy/miniconda3/lib/python3.8/site-packages (from imageio) (1.19.2) Requirement already satisfied: matplotlib in /home/tommy/miniconda3/lib/python3.8/site-packages (3.3.4) Requirement already satisfied: numpy>=1.15 in /home/tommy/miniconda3/lib/python3.8/site-packages (from matplotlib) (1.19.2) Requirement already satisfied: kiwisolver>=1.0.1 in /home/tommy/miniconda3/lib/python3.8/site-packages (from matplotlib) (1.3.1) Requirement already satisfied: python-dateutil>=2.1 in /home/tommy/miniconda3/lib/python3.8/site-packages (from matplotlib) (2.8.1) Requirement already satisfied: cycler>=0.10 in /home/tommy/miniconda3/lib/python3.8/site-packages (from matplotlib) (0.10.0) Requirement already satisfied: pillow>=6.2.0 in /home/tommy/miniconda3/lib/python3.8/site-packages (from matplotlib) (8.1.2) Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.3 in /home/tommy/miniconda3/lib/python3.8/site-packages (from matplotlib) (2.4.7) Requirement already satisfied: six in /home/tommy/miniconda3/lib/python3.8/site-packages (from cycler>=0.10->matplotlib) (1.15.0) /mnt/c/Users/superhardcocksgamerp/Documents/Github/Final-Project/content/csc413/a4
MIT
WGAN.ipynb
tommyliu9/CSC413-Generative-Model-Comparisons
Helper code Utility functions
import os import numpy as np import matplotlib.pyplot as plt import torch from torch import nn from torch.nn import Parameter import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable from torch.utils.data import DataLoader from torchvision import datasets from torchvision import transforms from six.moves.urllib.request import urlretrieve import tarfile import imageio from urllib.error import URLError from urllib.error import HTTPError os.environ["CUDA_VISABLE_DEVICES"] = "GPU_ID" def get_file(fname, origin, untar=False, extract=False, archive_format='auto', cache_dir='data'): datadir = os.path.join(cache_dir) if not os.path.exists(datadir): os.makedirs(datadir) if untar: untar_fpath = os.path.join(datadir, fname) fpath = untar_fpath + '.tar.gz' else: fpath = os.path.join(datadir, fname) print(fpath) if not os.path.exists(fpath): print('Downloading data from', origin) error_msg = 'URL fetch failure on {}: {} -- {}' try: try: urlretrieve(origin, fpath) except URLError as e: raise Exception(error_msg.format(origin, e.errno, e.reason)) except HTTPError as e: raise Exception(error_msg.format(origin, e.code, e.msg)) except (Exception, KeyboardInterrupt) as e: if os.path.exists(fpath): os.remove(fpath) raise if untar: if not os.path.exists(untar_fpath): print('Extracting file.') with tarfile.open(fpath) as archive: archive.extractall(datadir) return untar_fpath return fpath class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self def to_var(tensor, cuda=True): """Wraps a Tensor in a Variable, optionally placing it on the GPU. Arguments: tensor: A Tensor object. cuda: A boolean flag indicating whether to use the GPU. Returns: A Variable object, on the GPU if cuda==True. """ if cuda: return Variable(tensor.cuda()) else: return Variable(tensor) def to_data(x): """Converts variable to numpy.""" if torch.cuda.is_available(): x = x.cpu() return x.data.numpy() def create_dir(directory): """Creates a directory if it doesn't already exist. """ if not os.path.exists(directory): os.makedirs(directory) def gan_checkpoint(iteration, G, D, opts): """Saves the parameters of the generator G and discriminator D. """ G_path = os.path.join(opts.checkpoint_dir, 'G.pkl') D_path = os.path.join(opts.checkpoint_dir, 'D.pkl') torch.save(G.state_dict(), G_path) torch.save(D.state_dict(), D_path) def load_checkpoint(opts): """Loads the generator and discriminator models from checkpoints. """ G_path = os.path.join(opts.load, 'G.pkl') D_path = os.path.join(opts.load, 'D_.pkl') G = DCGenerator(noise_size=opts.noise_size, conv_dim=opts.g_conv_dim, spectral_norm=opts.spectral_norm) D = DCDiscriminator(conv_dim=opts.d_conv_dim) G.load_state_dict(torch.load(G_path, map_location=lambda storage, loc: storage)) D.load_state_dict(torch.load(D_path, map_location=lambda storage, loc: storage)) if torch.cuda.is_available(): G.cuda() D.cuda() print('Models moved to GPU.') return G, D def merge_images(sources, targets, opts): """Creates a grid consisting of pairs of columns, where the first column in each pair contains images source images and the second column in each pair contains images generated by the CycleGAN from the corresponding images in the first column. """ _, _, h, w = sources.shape row = int(np.sqrt(opts.batch_size)) merged = np.zeros([3, row * h, row * w * 2]) for (idx, s, t) in (zip(range(row ** 2), sources, targets, )): i = idx // row j = idx % row merged[:, i * h:(i + 1) * h, (j * 2) * h:(j * 2 + 1) * h] = s merged[:, i * h:(i + 1) * h, (j * 2 + 1) * h:(j * 2 + 2) * h] = t return merged.transpose(1, 2, 0) def generate_gif(directory_path, keyword=None): images = [] for filename in sorted(os.listdir(directory_path)): if filename.endswith(".png") and (keyword is None or keyword in filename): img_path = os.path.join(directory_path, filename) print("adding image {}".format(img_path)) images.append(imageio.imread(img_path)) if keyword: imageio.mimsave( os.path.join(directory_path, 'anim_{}.gif'.format(keyword)), images) else: imageio.mimsave(os.path.join(directory_path, 'anim.gif'), images) def create_image_grid(array, ncols=None): """ """ num_images, channels, cell_h, cell_w = array.shape if not ncols: ncols = int(np.sqrt(num_images)) nrows = int(np.math.floor(num_images / float(ncols))) result = np.zeros((cell_h * nrows, cell_w * ncols, channels), dtype=array.dtype) for i in range(0, nrows): for j in range(0, ncols): result[i * cell_h:(i + 1) * cell_h, j * cell_w:(j + 1) * cell_w, :] = array[i * ncols + j].transpose(1, 2, 0) if channels == 1: result = result.squeeze() return result def gan_save_samples(G, fixed_noise, iteration, opts): generated_images = G(fixed_noise) generated_images = to_data(generated_images) grid = create_image_grid(generated_images) # merged = merge_images(X, fake_Y, opts) path = os.path.join(opts.sample_dir, 'sample-{:06d}.png'.format(iteration)) imageio.imwrite(path, grid) print('Saved {}'.format(path))
_____no_output_____
MIT
WGAN.ipynb
tommyliu9/CSC413-Generative-Model-Comparisons
Data loader
def get_emoji_loader(emoji_type, opts): """Creates training and test data loaders. """ transform = transforms.Compose([ transforms.Scale(opts.image_size), transforms.ToTensor(), transforms.Normalize((0.5, ), (0.5, )), ]) train_path = os.path.join('data/emojis', emoji_type) test_path = os.path.join('data/emojis', 'Test_{}'.format(emoji_type)) train_dataset = datasets.ImageFolder(train_path, transform) test_dataset = datasets.ImageFolder(test_path, transform) train_dloader = DataLoader(dataset=train_dataset, batch_size=opts.batch_size, shuffle=True, num_workers=opts.num_workers) test_dloader = DataLoader(dataset=test_dataset, batch_size=opts.batch_size, shuffle=False, num_workers=opts.num_workers) return train_dloader, test_dloader def get_emnist_loader(emnist_type, opts): transform = transforms.Compose([ transforms.Scale(opts.image_size), transforms.ToTensor(), transforms.Normalize((0.5), (0.5)), ]) train = datasets.EMNIST(".", split=emnist_type,train = True, download = True, transform= transform) test = datasets.EMNIST(".", split=emnist_type,train = False, download = True, transform = transform) train_dloader = DataLoader(dataset=train, batch_size=opts.batch_size, shuffle=True,num_workers=opts.num_workers) test_dloader = DataLoader(dataset=test, batch_size=opts.batch_size, shuffle=False,num_workers=opts.num_workers) return train_dloader, test_dloader
_____no_output_____
MIT
WGAN.ipynb
tommyliu9/CSC413-Generative-Model-Comparisons
Training and evaluation code
def print_models(G_XtoY, G_YtoX, D_X, D_Y): """Prints model information for the generators and discriminators. """ print(" G ") print("---------------------------------------") print(G_XtoY) print("---------------------------------------") print(" D ") print("---------------------------------------") print(D_X) print("---------------------------------------") def create_model(opts): """Builds the generators and discriminators. """ ### GAN G = DCGenerator(noise_size=opts.noise_size, conv_dim=opts.g_conv_dim, spectral_norm=opts.spectral_norm) D = DCDiscriminator(conv_dim=opts.d_conv_dim, spectral_norm=opts.spectral_norm) print_models(G, None, D, None) if torch.cuda.is_available(): G.cuda() D.cuda() print('Models moved to GPU.') return G, D def train(opts): """Loads the data, creates checkpoint and sample directories, and starts the training loop. """ # Create train and test dataloaders for images from the two domains X and Y dataloader_X, test_dataloader_X = get_emnist_loader(opts.X, opts=opts) # Create checkpoint and sample directories create_dir(opts.checkpoint_dir) create_dir(opts.sample_dir) # Start training G, D = gan_training_loop(dataloader_X, test_dataloader_X, opts) return G, D def print_opts(opts): """Prints the values of all command-line arguments. """ print('=' * 80) print('Opts'.center(80)) print('-' * 80) for key in opts.__dict__: if opts.__dict__[key]: print('{:>30}: {:<30}'.format(key, opts.__dict__[key]).center(80)) print('=' * 80)
_____no_output_____
MIT
WGAN.ipynb
tommyliu9/CSC413-Generative-Model-Comparisons
Your code for generators and discriminators Helper modules
def sample_noise(batch_size, dim): """ Generate a PyTorch Tensor of uniform random noise. Input: - batch_size: Integer giving the batch size of noise to generate. - dim: Integer giving the dimension of noise to generate. Output: - A PyTorch Tensor of shape (batch_size, dim, 1, 1) containing uniform random noise in the range (-1, 1). """ return to_var(torch.rand(batch_size, dim) * 2 - 1).unsqueeze(2).unsqueeze(3) def upconv(in_channels, out_channels, kernel_size, stride=2, padding=2, batch_norm=True, spectral_norm=False): """Creates a upsample-and-convolution layer, with optional batch normalization. """ layers = [] if stride>1: layers.append(nn.Upsample(scale_factor=stride)) conv_layer = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1, padding=padding, bias=False) if spectral_norm: layers.append(SpectralNorm(conv_layer)) else: layers.append(conv_layer) if batch_norm: layers.append(nn.BatchNorm2d(out_channels)) return nn.Sequential(*layers) def conv(in_channels, out_channels, kernel_size, stride=2, padding=2, batch_norm=True, init_zero_weights=False, spectral_norm=False): """Creates a convolutional layer, with optional batch normalization. """ layers = [] conv_layer = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) if init_zero_weights: conv_layer.weight.data = torch.randn(out_channels, in_channels, kernel_size, kernel_size) * 0.001 if spectral_norm: layers.append(SpectralNorm(conv_layer)) else: layers.append(conv_layer) if batch_norm: layers.append(nn.BatchNorm2d(out_channels)) return nn.Sequential(*layers) class ResnetBlock(nn.Module): def __init__(self, conv_dim): super(ResnetBlock, self).__init__() self.conv_layer = conv(in_channels=conv_dim, out_channels=conv_dim, kernel_size=3, stride=1, padding=1) def forward(self, x): out = x + self.conv_layer(x) return out
_____no_output_____
MIT
WGAN.ipynb
tommyliu9/CSC413-Generative-Model-Comparisons
DCGAN Spectral Norm class GAN generator
class DCGenerator(nn.Module): def __init__(self, noise_size, conv_dim, spectral_norm=False): super(DCGenerator, self).__init__() self.conv_dim = conv_dim self.relu = nn.ReLU() self.linear_bn = upconv(100, conv_dim*4,3) #BS X noise_size x 1 x 1 -> BS x 128 x 4 x 4 self.upconv1 = upconv(conv_dim*4,conv_dim*2,5) self.upconv2 = upconv(conv_dim*2,conv_dim,5) self.upconv3 = upconv(conv_dim,1,5, batch_norm=False) self.tanh = nn.Tanh() def forward(self, z): """Generates an image given a sample of random noise. Input ----- z: BS x noise_size x 1 x 1 --> BSx100x1x1 (during training) Output ------ out: BS x channels x image_width x image_height --> BSx3x32x32 (during training) """ batch_size = z.size(0) out = self.relu(self.linear_bn(z)) # BS x 128 x 4 x 4 conv_dim=32 out = out.view(-1, self.conv_dim*4, 4, 4) out = self.relu(self.upconv1(out)) # BS x 64 x 8 x 8 out = self.relu(self.upconv2(out)) # BS x 32 x 16 x 16 out = self.tanh(self.upconv3(out)) # BS x 3 x 32 x 32 out_size = out.size() if out_size != torch.Size([batch_size, 1, 32, 32]): raise ValueError("expect {} x 3 x 32 x 32, but get {}".format(batch_size, out_size)) return out
_____no_output_____
MIT
WGAN.ipynb
tommyliu9/CSC413-Generative-Model-Comparisons
GAN discriminator
class DCDiscriminator(nn.Module): """Defines the architecture of the discriminator network. Note: Both discriminators D_X and D_Y have the same architecture in this assignment. """ def __init__(self, conv_dim=64, spectral_norm=False): super(DCDiscriminator, self).__init__() self.conv1 = conv(in_channels=1, out_channels=conv_dim, kernel_size=5, stride=2, spectral_norm=spectral_norm) self.conv2 = conv(in_channels=conv_dim, out_channels=conv_dim*2, kernel_size=5, stride=2, spectral_norm=spectral_norm) self.conv3 = conv(in_channels=conv_dim*2, out_channels=conv_dim*4, kernel_size=5, stride=2, spectral_norm=spectral_norm) self.conv4 = conv(in_channels=conv_dim*4, out_channels=1, kernel_size=5, stride=2, padding=1, batch_norm=False, spectral_norm=spectral_norm) def forward(self, x): batch_size = x.size(0) out = F.relu(self.conv1(x)) # BS x 64 x 16 x 16 out = F.relu(self.conv2(out)) # BS x 64 x 8 x 8 out = F.relu(self.conv3(out)) # BS x 64 x 4 x 4 out = self.conv4(out).squeeze() out_size = out.size() if out_size != torch.Size([batch_size,]): raise ValueError("expect {} x 1, but get {}".format(batch_size, out_size)) return out from torch.utils.tensorboard import SummaryWriter import numpy as np def log_to_tensorboard(iteration, losses): writer = SummaryWriter("./runs/") for key in losses: arr = losses[key] writer.add_scalar(f'loss/{key}', arr[-1], iteration) writer.close() def calculate_log_likelihood(model, opts): transform = transforms.Compose([ transforms.Scale(opts.image_size), transforms.ToTensor(), transforms.Normalize((0.5), (0.5)), ]) train = datasets.EMNIST(".", split="letters",train = True, download = True, transform= transform) train_dloader = DataLoader(dataset=train, batch_size=opts.batch_size, shuffle=True,num_workers=opts.num_workers) x = next(iter(train_dloader))[0] print(x) return torch.log(model(x)).mean()
_____no_output_____
MIT
WGAN.ipynb
tommyliu9/CSC413-Generative-Model-Comparisons
GAN training loop
def gan_training_loop(dataloader, test_dataloader, opts): """Runs the training loop. * Saves checkpoint every opts.checkpoint_every iterations * Saves generated samples every opts.sample_every iterations """ # Create generators and discriminators G, D = create_model(opts) g_params = G.parameters() # Get generator parameters d_params = D.parameters() # Get discriminator parameters # Create optimizers for the generators and discriminators g_optimizer = optim.RMSprop(g_params, opts.lr) d_optimizer = optim.RMSprop(d_params, opts.lr) train_iter = iter(dataloader) test_iter = iter(test_dataloader) # Get some fixed data from domains X and Y for sampling. These are images that are held # constant throughout training, that allow us to inspect the model's performance. fixed_noise = sample_noise(100, opts.noise_size) # # 100 x noise_size x 1 x 1 iter_per_epoch = len(train_iter) total_train_iters = opts.train_iters losses = {"iteration": [], "D_fake_loss": [], "D_real_loss": [], "G_loss": [], "D_loss": [], "W_loss": []} # adversarial_loss = torch.nn.BCEWithLogitsLoss() gp_weight = 1 epoch = 0 total_iters = 0 try: for iteration in range(1, opts.train_iters + 1): # Reset data_iter for each epoch # ones = Variable(torch.Tensor(real_images.shape[0]).float().cuda().fill_(1.0), requires_grad=False) if total_iters % iter_per_epoch == 0: epoch +=1 train_iter = iter(dataloader) print("EPOCH:", epoch) b = opts.batch_size for i in range(opts.n_critic): real_images, real_labels = train_iter.next() real_images, real_labels = to_var(real_images), to_var(real_labels).long().squeeze() m = b noise = sample_noise(m, opts.noise_size) fake_images = G(noise) D_real_loss = D(real_images).mean() D_fake_loss = D(fake_images).mean() D_loss = -(D_real_loss - D_fake_loss) #Minimize D_real_loss - D_fake_loss D_loss.backward() d_optimizer.step() Wasserstein_Distance = D_real_loss - D_fake_loss total_iters += 1 for param in D.parameters(): param.data.clamp_(-opts.clip, opts.clip) D.zero_grad() G.zero_grad() # z = sample_noise(m, opts.noise_size) G_z = G(z) G_loss = -torch.mean(D(G_z)) G_loss.backward() g_optimizer.step() D.zero_grad() G.zero_grad() if iteration % opts.log_step == 0: w_loss = Wasserstein_Distance losses['iteration'].append(iteration) losses['D_real_loss'].append(D_real_loss.item()) losses['D_loss'].append(D_loss.item()) losses['D_fake_loss'].append(D_fake_loss.item()) losses['W_loss'].append(w_loss.item()) losses['G_loss'].append(G_loss.item()) print('Iteration [{:4d}/{:4d}] | D_real_loss: {:6.4f} | D_fake_loss: {:6.4f} | G_loss: {:6.4f} | D_loss: {:6.4f} | Wasserstein_Distance: {:6.4f}'.format( iteration, total_train_iters, D_real_loss.item(), D_fake_loss.item(), G_loss.item(), D_loss.item(), Wasserstein_Distance.item() )) log_to_tensorboard(iteration, losses) # Save the generated samples if iteration % opts.sample_every == 0: gan_save_samples(G, fixed_noise, iteration, opts) # Save the model parameters if iteration % opts.checkpoint_every == 0: gan_checkpoint(iteration, G, D, opts) except KeyboardInterrupt: print('Exiting early from training.') return G, D plt.figure() plt.plot(losses['iteration'], losses['D_real_loss'], label='D_real') plt.plot(losses['iteration'], losses['D_fake_loss'], label='D_fake') plt.plot(losses['iteration'], losses['G_loss'], label='G') plt.plot(losses['iteration'], losses['D_loss'], label='D') plt.legend() plt.savefig(os.path.join(opts.sample_dir, 'losses.png')) plt.close() return G, D
_____no_output_____
MIT
WGAN.ipynb
tommyliu9/CSC413-Generative-Model-Comparisons
Training Download dataset WGAN
SEED = 11 # Set the random seed manually for reproducibility. np.random.seed(SEED) torch.manual_seed(SEED) if torch.cuda.is_available(): torch.cuda.manual_seed(SEED) args = AttrDict() args_dict = { 'clip': .01, 'n_critic': 5, 'image_size':32, 'g_conv_dim':32, 'd_conv_dim':64, 'noise_size':100, 'num_workers': 0, 'train_iters':300000, 'X':'letters', # options: 'Windows' / 'Apple' 'Y': None, 'lr':5e-5, 'beta1':0.5, 'beta2':0.999, 'batch_size':64, 'checkpoint_dir':'./results/checkpoints_gan_gp1_lr3e-5', 'sample_dir': './results/samples_gan_gp1_lr3e-5', 'load': None, 'log_step':200, 'sample_every':200, 'checkpoint_every':1000, 'spectral_norm': False, 'gradient_penalty': False, 'd_train_iters': 1 } args.update(args_dict) print_opts(args) G, D = train(args) generate_gif("results/samples_gan_gp1_lr3e-5") torch.cuda.is_available() torch.cuda.device(0) torch.cuda.get_device_name(0) torch.version.cuda torch.cuda.FloatTensor() load_args = AttrDict() args_dict = { 'clip': .01, 'n_critic': 5, 'image_size':32, 'g_conv_dim':32, 'd_conv_dim':64, 'noise_size':100, 'num_workers': 0, 'train_iters':300000, 'X':'letters', # options: 'Windows' / 'Apple' 'Y': None, 'lr':5e-5, 'beta1':0.5, 'beta2':0.999, 'batch_size':64, 'checkpoint_dir':'./results/checkpoints_gan_gp1_lr3e-5', 'sample_dir': './results/samples_gan_gp1_lr3e-5', 'load': './results/samples_gan_gp1_lr3e-5', 'log_step':200, 'sample_every':200, 'checkpoint_every':1000, 'spectral_norm': False, 'gradient_penalty': False, 'd_train_iters': 1 } load_args.update(args_dict) D,G = load_checkpoint(load_args)
_____no_output_____
MIT
WGAN.ipynb
tommyliu9/CSC413-Generative-Model-Comparisons
Load dataset and have a peak: This cell is required in SageMaker Studio, otherwise the download of the dataset will throw an error.After running this cell, the kernel needs to be restarted. After restarting tthe kernel, continue with the cell below (loading the dataset)
%%capture import IPython !conda install -c conda-forge ipywidgets -y IPython.Application.instance().kernel.do_shutdown(True) from datasets import load_dataset import pandas as pd dataset = load_dataset('ade_corpus_v2', 'Ade_corpus_v2_classification') df = pd.DataFrame(dataset['train']) df.sample(5, random_state=124)
_____no_output_____
MIT
1_data_prep.ipynb
marshmellow77/adverse-drug-effect-detection