repo_name
stringlengths 6
77
| path
stringlengths 8
215
| license
stringclasses 15
values | cells
list | types
list |
|---|---|---|---|---|
h-mayorquin/time_series_basic
|
presentations/2016-03-13(Nexa Wall Street Columns High Resolution - Next Letter Prediction).ipynb
|
bsd-3-clause
|
[
"Nexa Wall Street Columns High Resolution (30 x 30) - Next letter prediction\nIn this notebook we used the features extract from Nexa from letters to try to predict the next letter.",
"import numpy as np\nfrom sklearn import svm, cross_validation\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nimport h5py\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline\n\nimport sys\nsys.path.append(\"../\")",
"Using constant number of data clusters (Ndata = 20)\nCover all policy\nLoad the data",
"# Data to use\nNdata = 10000\nNside = 30\n\n# First we load the file \nfile_location = '../results_database/text_wall_street_columns_30_Ndata20.hdf5'\n\n# Now we need to get the letters and align them\ntext_directory = '../data/wall_street_letters_30.npy'\nletters_sequence = np.load(text_directory)\nNletters = len(letters_sequence)\nsymbols = set(letters_sequence)\n\ntargets = []\n\nfor index in range(Ndata):\n letter_index = index // Nside\n targets.append(letters_sequence[letter_index + 1])\n\n# Transform to array\ntargets = np.array(targets)\nprint(list(enumerate(targets[0:40])))",
"Do the loop",
"# Calculate the predictions \n\nscores_mixed = []\nscores_indp = []\n\nmax_lags = np.arange(2, 17, 2)\n\n# Nexa parameters\nNtime_clusters = 20\nNembedding = 3\n\nfor max_lag in max_lags:\n \n print(max_lag)\n Nspatial_clusters = max_lag\n # Here calculate the scores for the mixes\n run_name = '/test' + str(max_lag)\n f = h5py.File(file_location, 'r')\n\n parameters_string = '/' + str(Nspatial_clusters)\n parameters_string += '-' + str(Ntime_clusters)\n parameters_string += '-' + str(Nembedding)\n\n nexa = f[run_name + parameters_string]\n cluster_to_index = nexa['cluster_to_index']\n code_vectors_softmax = np.array(nexa['code-vectors-softmax'])\n\n # Now we need to classify\n X = code_vectors_softmax[:Ndata]\n y = targets\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.10)\n\n clf_linear = LinearDiscriminantAnalysis()\n clf_linear.fit(X_train, y_train)\n score = clf_linear.score(X_test, y_test) * 100.0\n scores_mixed.append(score)\n\n # Here calculate the scores for the independent\n run_name = '/indep' + str(max_lag)\n f = h5py.File(file_location, 'r')\n \n parameters_string = '/' + str(Nspatial_clusters)\n parameters_string += '-' + str(Ntime_clusters)\n parameters_string += '-' + str(Nembedding)\n\n nexa = f[run_name + parameters_string]\n cluster_to_index = nexa['cluster_to_index']\n code_vectors_softmax = np.array(nexa['code-vectors-softmax'])\n\n # Now we need to classify\n X = code_vectors_softmax[:Ndata]\n y = targets\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.10)\n\n clf_linear = LinearDiscriminantAnalysis()\n clf_linear.fit(X_train, y_train)\n score = clf_linear.score(X_test, y_test) * 100.0\n scores_indp.append(score)\n\nfig = plt.figure(figsize=(16, 12))\nax = fig.add_subplot(111)\nax.plot(max_lags, scores_indp, 'o-', label='independent', lw=2, markersize=10)\nax.plot(max_lags, scores_mixed, 'o-', label='mixed', lw=2, markersize=10)\n\nax.set_ylim(0, 105)\nax.set_ylabel('Accuracy')\nax.set_xlabel('Max lags')\nax.set_title('Accuracy vs Max Lags (Inclusive Policy)')\nax.legend()",
"Cover Exclusively Policy\nLoad data",
"# Data to use\nNdata = 10000\nNside = 30\n\n# Calculate the predictions \n\nscores_mixed = []\nscores_indp = []\n\nmax_lags = np.arange(2, 17, 2)\n\n# Nexa parameters\nNembedding = 3\nNtime_clusters = 20\n\n# First we load the file \nfile_location = '../results_database/text_wall_street_columns_30_Ndata20.hdf5'\n\n# Now we need to get the letters and align them\ntext_directory = '../data/wall_street_letters_30.npy'\nletters_sequence = np.load(text_directory)\nNletters = len(letters_sequence)\nsymbols = set(letters_sequence)",
"Do the Loop and Calculate the Predictions",
"for max_lag in max_lags:\n targets = []\n \n aux = Nside - max_lag\n for index in range(Ndata):\n letter_index = index // aux\n targets.append(letters_sequence[letter_index + 1])\n \n # Transform to array\n targets = np.array(targets)\n \n print(max_lag)\n Nspatial_clusters = max_lag\n # Here calculate the scores for the mixes\n run_name = '/test' + str(max_lag)\n f = h5py.File(file_location, 'r')\n\n parameters_string = '/' + str(Nspatial_clusters)\n parameters_string += '-' + str(Ntime_clusters)\n parameters_string += '-' + str(Nembedding)\n\n nexa = f[run_name + parameters_string]\n cluster_to_index = nexa['cluster_to_index']\n code_vectors_softmax = np.array(nexa['code-vectors-softmax'])\n \n # Take only the ondes you need\n index = np.arange(Ndata * 3)\n index = index[(index % 30) < aux]\n code_vectors_softmax = code_vectors_softmax[index, :]\n \n if False:\n print(index[0:40])\n print(code_vectors_softmax.shape)\n print(list(enumerate(targets[0:40])))\n print(targets.size)\n print(code_vectors_softmax.shape)\n print(Ndata)\n \n # Now we need to classify\n X = code_vectors_softmax[:Ndata]\n y = targets\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.10)\n\n clf_linear = LinearDiscriminantAnalysis()\n clf_linear.fit(X_train, y_train)\n score = clf_linear.score(X_test, y_test) * 100.0\n scores_mixed.append(score)\n\n # Here calculate the scores for the independent\n run_name = '/indep' + str(max_lag)\n f = h5py.File(file_location, 'r')\n \n parameters_string = '/' + str(Nspatial_clusters)\n parameters_string += '-' + str(Ntime_clusters)\n parameters_string += '-' + str(Nembedding)\n\n nexa = f[run_name + parameters_string]\n cluster_to_index = nexa['cluster_to_index']\n code_vectors_softmax = np.array(nexa['code-vectors-softmax'])\n \n # Take only the ondes you need\n index = np.arange(Ndata * 3)\n index = index[(index % 30) < aux]\n code_vectors_softmax = code_vectors_softmax[index, :]\n\n # Now we need to classify\n X = code_vectors_softmax[:Ndata]\n y = targets\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.10)\n\n clf_linear = LinearDiscriminantAnalysis()\n clf_linear.fit(X_train, y_train)\n score = clf_linear.score(X_test, y_test) * 100.0\n scores_indp.append(score)\n\nfig = plt.figure(figsize=(16, 12))\nax = fig.add_subplot(111)\nax.plot(max_lags, scores_indp, 'o-', label='independent', lw=2, markersize=10)\nax.plot(max_lags, scores_mixed, 'o-', label='mixed', lw=2, markersize=10)\n\nax.set_ylim(0, 105)\nax.set_ylabel('Accuracy')\nax.set_xlabel('Max lags')\nax.set_title('Accuracy vs Max Lags (Exclusive Policy)')\nax.legend()",
"Using constant number of features\nCover all policy\nLoad the data",
"# Data to use\nNdata = 10000\nNside = 30\n\n# First we load the file \nfile_location = '../results_database/text_wall_street_columns_30_semi_constantNdata.hdf5'\n\n# Now we need to get the letters and align them\ntext_directory = '../data/wall_street_letters_30.npy'\nletters_sequence = np.load(text_directory)\nNletters = len(letters_sequence)\nsymbols = set(letters_sequence)\n\ntargets = []\n\nfor index in range(Ndata):\n letter_index = index // Nside\n targets.append(letters_sequence[letter_index + 1])\n\n# Transform to array\ntargets = np.array(targets)\nprint(list(enumerate(targets[0:40])))",
"Do the loop",
"# Calculate the predictions \n\nscores_mixed = []\nscores_indp = []\n\nmax_lags = np.arange(2, 17, 2)\n\n# Nexa parameters\nNtime_clusters = 20\nNembedding = 3\n\nfor max_lag in max_lags:\n \n print(max_lag)\n Nspatial_clusters = max_lag\n Ntime_clusters = 60 // max_lag\n \n # Here calculate the scores for the mixes\n run_name = '/test' + str(max_lag)\n f = h5py.File(file_location, 'r')\n\n parameters_string = '/' + str(Nspatial_clusters)\n parameters_string += '-' + str(Ntime_clusters)\n parameters_string += '-' + str(Nembedding)\n\n nexa = f[run_name + parameters_string]\n cluster_to_index = nexa['cluster_to_index']\n code_vectors_softmax = np.array(nexa['code-vectors-softmax'])\n\n # Now we need to classify\n X = code_vectors_softmax[:Ndata]\n y = targets\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.10)\n\n clf_linear = LinearDiscriminantAnalysis()\n clf_linear.fit(X_train, y_train)\n score = clf_linear.score(X_test, y_test) * 100.0\n scores_mixed.append(score)\n\n # Here calculate the scores for the independent\n run_name = '/indep' + str(max_lag)\n f = h5py.File(file_location, 'r')\n \n parameters_string = '/' + str(Nspatial_clusters)\n parameters_string += '-' + str(Ntime_clusters)\n parameters_string += '-' + str(Nembedding)\n\n nexa = f[run_name + parameters_string]\n cluster_to_index = nexa['cluster_to_index']\n code_vectors_softmax = np.array(nexa['code-vectors-distance'])\n\n # Now we need to classify\n X = code_vectors_softmax[:Ndata]\n y = targets\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.10)\n\n clf_linear = LinearDiscriminantAnalysis()\n clf_linear.fit(X_train, y_train)\n score = clf_linear.score(X_test, y_test) * 100.0\n scores_indp.append(score)\n\nfig = plt.figure(figsize=(16, 12))\nax = fig.add_subplot(111)\nax.plot(max_lags, scores_indp, 'o-', label='independent', lw=2, markersize=10)\nax.plot(max_lags, scores_mixed, 'o-', label='mixed', lw=2, markersize=10)\n\nax.set_ylim(0, 105)\nax.set_ylabel('Accuracy')\nax.set_xlabel('Max lags')\nax.set_title('Accuracy vs Max Lags (Inclusive Policy)')\nax.legend()",
"Cover Exclusively Policy\nLoad the Data",
"# Data to use\nNdata = 10000\nNside = 30\n\n# Calculate the predictions \n\nscores_mixed = []\nscores_indp = []\n\nmax_lags = np.arange(2, 17, 2)\n\n# Nexa parameters\nNtime_clusters = 20\nNembedding = 3\n\n# First we load the file \nfile_location = '../results_database/text_wall_street_columns_30_semi_constantNdata.hdf5'\n\n# Now we need to get the letters and align them\ntext_directory = '../data/wall_street_letters_30.npy'\nletters_sequence = np.load(text_directory)\nNletters = len(letters_sequence)\nsymbols = set(letters_sequence)",
"Do the Loop",
"for max_lag in max_lags:\n targets = []\n \n aux = Nside - max_lag\n \n #Take the corresponding letters\n for index in range(Ndata):\n letter_index = index // aux\n targets.append(letters_sequence[letter_index + 1])\n \n # Transform to array\n targets = np.array(targets)\n \n print('lags', max_lag)\n Nspatial_clusters = max_lag\n Ntime_clusters = 60 // max_lag\n print('Ndata clusters', Ntime_clusters)\n \n # Here calculate the scores for the mixes\n run_name = '/test' + str(max_lag)\n f = h5py.File(file_location, 'r')\n\n parameters_string = '/' + str(Nspatial_clusters)\n parameters_string += '-' + str(Ntime_clusters)\n parameters_string += '-' + str(Nembedding)\n \n nexa = f[run_name + parameters_string]\n cluster_to_index = nexa['cluster_to_index']\n code_vectors_softmax = np.array(nexa['code-vectors-softmax'])\n \n # Take only the ondes you need\n index = np.arange(Ndata * 3)\n index = index[(index % 30) < aux]\n code_vectors_softmax = code_vectors_softmax[index, :]\n \n if False:\n print(index[0:40])\n print(code_vectors_softmax.shape)\n print(list(enumerate(targets[0:40])))\n print(targets.size)\n print(code_vectors_softmax.shape)\n print(Ndata)\n \n # Now we need to classify\n X = code_vectors_softmax[:Ndata]\n y = targets\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.10)\n\n clf_linear = LinearDiscriminantAnalysis()\n clf_linear.fit(X_train, y_train)\n score = clf_linear.score(X_test, y_test) * 100.0\n scores_mixed.append(score)\n\n # Here calculate the scores for the independent\n run_name = '/indep' + str(max_lag)\n f = h5py.File(file_location, 'r')\n \n parameters_string = '/' + str(Nspatial_clusters)\n parameters_string += '-' + str(Ntime_clusters)\n parameters_string += '-' + str(Nembedding)\n\n nexa = f[run_name + parameters_string]\n cluster_to_index = nexa['cluster_to_index']\n code_vectors_softmax = np.array(nexa['code-vectors-softmax'])\n \n # Take only the ondes you need\n index = np.arange(Ndata * 3)\n index = index[(index % 30) < aux]\n code_vectors_softmax = code_vectors_softmax[index, :]\n\n # Now we need to classify\n X = code_vectors_softmax[:Ndata]\n y = targets\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.10)\n\n clf_linear = LinearDiscriminantAnalysis()\n clf_linear.fit(X_train, y_train)\n score = clf_linear.score(X_test, y_test) * 100.0\n scores_indp.append(score)\n\nfig = plt.figure(figsize=(16, 12))\nax = fig.add_subplot(111)\nax.plot(max_lags, scores_indp, 'o-', label='independent', lw=2, markersize=10)\nax.plot(max_lags, scores_mixed, 'o-', label='mixed', lw=2, markersize=10)\n\nax.set_ylim(0, 105)\nax.set_ylabel('Accuracy')\nax.set_xlabel('Max lags')\nax.set_title('Accuracy vs Max Lags (Exclusive Policy)')\nax.legend()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
tritemio/multispot_paper
|
realtime kinetics/Simulated Kinetic Curve Fit - Template.ipynb
|
mit
|
[
"sigma = 0.016\n\ntime_window = 30\ntime_step = 5\ntime_start = -900\ntime_stop = 900\ndecimation = 20\nt0_vary = True\n\ntrue_params = dict(\n tau = 60, # time constant\n init_value = 0.3, # initial value (for t < t0) \n final_value = 0.8, # final value (for t -> +inf)\n t0 = 0) # time origin\n\nnum_sim_cycles = 1000\ntaus = (30, 60)",
"Notebook arguments\n\nsigma (float): standard deviation of additive Gaussian noise to be simulated\ntime_window (float): seconds, integration window duration\ntime_step (float): seconds, time step for the moving integration window\ntime_start (float): seconds, start of time axis (kinetics starts at t = t0).\ntime_stop (float): seconds, stop of time axis (kinetics starts at t = t0).\nt0_vary (bool): whether models should vary the curve origin (t0) during the fit\ntrue_params (dict): parameters used to generate simulated kinetic curves\nnum_sim_cycles (int): number of times fit is repeated (Monte-Carlo)\ntaus (tuple): list of values for time-costant tau simulated during repeated fits (Monte-Carlo).\n\n\nSimulated Kinetic Curve Fit\n<p class=lead>This notebook fits simulated exponential transients with additive Gaissian noise in order to study time-constant fitting accuracy.\nIn particular we compare a simple exponential model with a more realistic model \nwith integration window, checking the effect on the fit results.\n<p>\n\nYou can either run this notebook directly, or run it through the [master notebook](Simulated Kinetic Curve Fit - Run-All.ipynb) for batch processing.\n\n## Imports",
"%matplotlib inline\nimport numpy as np\nimport lmfit\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport models # custom module",
"Fitting models\nModels used to fit the data.\n1. Simple Exponential\nIn this model, we define the model function as an exponential transient:\n$$ y = f(t) = A \\cdot e^{-t/\\tau} + K$$\nThe python function implementing it is:\n\nmodels.exp_func().\n\nNext cell defines and initializes the fitting model (lmfit.model.Model) including parameters' constrains:",
"labels = ('tau', 'init_value', 'final_value')\nmodel = models.factory_model_exp(t0_vary=True)",
"2. Integrated Exponential\nA more realistic model needs to take into account that each data point \nis the result of an integration over a time window $w$:\n$$f(t) = A \\cdot e^{-t/\\tau} + K$$\n$$y(t) = \\int_{t}^{t+w} f(t')\\;dt'$$\nIn other words, when we process a measurement in time chunks, we are integrating\na non-stationary signal $f(t)$ over a time window $w$. This integration causes\na smoothing of $f(t)$, regardless of the fact that time is binned or \nis swiped-through with a moving windows (overlapping chunks).\nNumerically, $t$ is discretized with step equal to (time_step / decimation).\nThe python function implementing this model function is:\n\nmodels.expwindec_func().\n\nAnd, finally, we define and initialize the fitting model parameters' constrains:",
"modelw = models.factory_model_expwin(t_window=time_window, decimation=decimation, t0_vary=t0_vary)",
"Generative model\nThese are the models used to generate the simulates (noisy) data.\n1. Simple Exponential + Noise\nIn this simple model, we simulate random data $Y$ as an exponential decay plus\nadditive Gaussian noise:\n$$ Y(t_k) = f(t_k) + N_k $$\n$$ {N_k} \\sim {\\rm Normal}{\\mu=0; \\sigma}$$\n$$ \\Delta t = t_k - t_{k-1} = \\texttt{time_step}$$\n2. Integrated Exponential + Noise\nFor the \"integrating window\" model, we first define a finer time axis $\\theta_i$ \nwhich oversamples $t_k$ by a factor $n$. Then we define the function $Y_f$ \nadding Gaussian noise $\\sqrt{n}\\,N_i$, with $n$ times larger variance:\n$$ Y_f(\\theta_i) = f(\\theta_i) + \\sqrt{n}\\,N_i $$\n$$ \\Delta \\theta = \\theta_i - \\theta_{i-1} = \\texttt{time_step} \\;/\\; n$$\nFinally, by averaging each time window, we compute the data on the coarse time axis $t_k$:\n$$ Y_w(t_k) = \\frac{1}{m}\\sum_{i} Y_f(\\theta_i)$$\nHere, for each $t_k$, we compute the mean of $m$ consecutive $Y_f$ values. The number $m$ \nis chosen so that $m\\, \\Delta \\theta$ is equal to the time window.\nNoise amplitude\nThe amplitude of the additive noise ($\\sigma$) is estimated from the experimental kinetic curves.\nIn particular we take the variance from the POST period (i.e. the steady state period after the transient).\nThe POST period has been chosen because it exhibits higher variance than the PRE period (i.e. the steady state period\nbefore the transient). These values have been calculated in 8-spot bubble-bubble kinetics - Summary.\nIn both models we define the noise amplitude as sigma (see first cell):\nsigma = 0.016\n\nTime axis\nWe also define the parameters for the time axis $t$:\ntime_start = -900 # seconds \ntime_stop = 900 # seconds\ntime_step = 5 # seconds\n\nKinetic curve paramenters\nThe simulated kinetic curve has the following parameters:\ntrue_params = dict(\n tau = 60, # time constant\n init_value = 0.3, # initial value (for t < t0) \n final_value = 0.8, # final value (for t -> +inf)\n t0 = 0) # time origin\n\n<div class=\"alert alert-info\">\n**NOTE**: All previous parameters are defined in the first notebook cell. \n</div>\n\nSingle kinetic curve fit\nHere we simulate one kinetic curve and fit it with the two models (simple exponential and integrated exponential).\nDraw simulated data\nTime axis for simulated data:",
"t = np.arange(time_start, time_stop-time_window, time_step).astype(float)\nt.size",
"An ideal transient (no noise, no integration):",
"y = models.expwindec_func(t, t_window=time_window, **true_params)\ny.shape",
"A simulated transient (including noise + integration):",
"time_window, time_step\n\nyr = models.expwindec_func(t, t_window=time_window, sigma=sigma, **true_params)\nyr.shape",
"Plot the computed curves:",
"plt.plot(t, y, '-', label='model')\nplt.plot(t, yr, 'o', label='model + noise')",
"Fit data\nFit the \"Integrated Exponential\" model:",
"#%%timeit\nresw = modelw.fit(yr, t=t, tau=10, init_value=0.1, final_value=0.9, verbose=False)",
"Fit the \"Simple Exponential\" model:",
"#%%timeit\nres = model.fit(yr, t=t + 0.5*time_window, tau=10, init_value=0.1, final_value=0.9, verbose=False)",
"Print and plot fit results:",
"fig = plt.figure(figsize=(14, 8))\nres.plot(fig=fig)\nci = lmfit.conf_interval(res, res)\nlmfit.report_fit(res)\nprint(lmfit.ci_report(ci, with_offset=False))\n#plt.xlim(-300, 300)\n\nfig = plt.figure(figsize=(14, 8))\nresw.plot(fig=fig)\nci = lmfit.conf_interval(resw, resw)\nlmfit.report_fit(resw)\nprint(lmfit.ci_report(ci, with_offset=False))\n#plt.xlim(-300, 300)",
"Monte-Carlo Simulation\nHere, fixed the model paramenters, we generate and fit several noisy datasets. Then, by plotting the distribution of the fitted parameters, we assess the stability and accuracy of the fit.\nParameters\nThe number simulation cycles is defined by num_sim_cycles. Current value is:",
"num_sim_cycles",
"The fixed kinetic curve parameters are:",
"{k: v for k, v in true_params.items() if k is not \"tau\"}",
"While tau is varied, taking the following values:",
"taus\n\nt0_vary",
"<div class=\"alert alert-info\">\n**NOTE**: All previous parameters are defined in the first notebook cell. \n</div>\n\nFunctions\nHere we define two functions:\n\n\ndraw_samples_and_fit() draws a set of data and fits it with both models\n\n\nmonte_carlo_sim() run the Monte-Carlo simulation: calls draw_samples_and_fit() many times.\n\n\n\nNOTE: Global variables are used by previous functions.",
"def draw_samples_and_fit(true_params):\n # Create the data\n t = np.arange(time_start, time_stop-time_window, time_step).astype(float)\n yr = models.expwindec_func(t, t_window=time_window, sigma=sigma, decimation=100, **true_params)\n\n # Fit the model\n tc = t + 0.5*time_window\n kws = dict(fit_kws=dict(nan_policy='omit'), verbose=False)\n res = model.fit(yr, t=tc, tau=90, method='nelder', **kws)\n res = model.fit(yr, t=tc, **kws)\n resw = modelw.fit(yr, t=t, tau=400, decimation=decimation, method='nelder', **kws)\n resw = modelw.fit(yr, t=t, decimation=decimation, **kws)\n return res, resw\n\ndef monte_carlo_sim(true_params, N):\n df1 = pd.DataFrame(index=range(N), columns=labels)\n df2 = df1.copy()\n for i in range(N):\n res1, res2 = draw_samples_and_fit(true_params)\n for var in labels:\n df1.loc[i, var] = res1.values[var]\n df2.loc[i, var] = res2.values[var]\n return df1, df2",
"Run Monte-Carlo simulation\nRun the Monte-Carlo fit for a set of different time-constants (taus)\nand save results in two DataFrames, one for each model.",
"mc_results1, mc_results2 = [], []\n\n%%timeit -n1 -r1 # <-- prints execution time\nfor tau in taus:\n true_params['tau'] = tau\n df1, df2 = monte_carlo_sim(true_params, num_sim_cycles)\n mc_results1.append(df1)\n mc_results2.append(df2)",
"<div class=\"alert alert-danger\">\n**WARNING**: The previous cell can take a long to execute. Execution time scales with **`num_sim_cycles * len(taus)`**. \n</div>\n\nResults1 - Simple Exponential",
"for tau, df in zip(taus, mc_results1):\n true_params['tau'] = tau\n fig, ax = plt.subplots(1, 3, figsize=(16, 4))\n for i, var in enumerate(labels):\n std = df[var].std()\n df[var].hist(bins=30, ax=ax[i])\n ax[i].set_title(\"%s = %.1f (%.3f)\" % (var, true_params[var], std), fontsize=18)\n ax[i].axvline(true_params[var], color='r', ls='--')\n #print('True parameters: %s' % true_params)",
"Results2 - Integrated Exponential",
"for tau, df in zip(taus, mc_results2):\n true_params['tau'] = tau\n fig, ax = plt.subplots(1, 3, figsize=(16, 4))\n for i, var in enumerate(labels):\n std = df[var].std()\n df[var].hist(bins=30, ax=ax[i])\n ax[i].set_title(\"%s = %.1f (%.3f)\" % (var, true_params[var], std), fontsize=18)\n ax[i].axvline(true_params[var], color='r', ls='--')\n #print('True parameters: %s' % true_params)",
"Conclusions\nThe last two multipanel figures compare the fitting accuracy\nof the model parameter for the simple-exponential and integrated-exponential models.\nWe note that, in particular for the tau parameter, \nthe integrated exponential model is significantly more accurate,\nproviding good estimates at much smaller integration times.\nThis comparison demonstrates empirically the strong advantage \nin using the theoretically more correct\nintegrated exponential model."
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
SchwaZhao/networkproject1
|
04_Twitter_Sentiment_Analysis.ipynb
|
mit
|
[
"Tweet sentiment analysis\nIn this section we will see how to extract features from tweets and use a classifier to classify the tweet as positive or negative.\nWe will use a pandas DataFrames (http://pandas.pydata.org/) to store tweets and process them.\nPandas DataFrames are very powerful python data-structures, like excel spreadsheets with the power of python.",
"# Let's create a DataFrame with each tweet using pandas\nimport pandas as pd\nimport json\nimport numpy as np\n\n\ndef getTweetID(tweet):\n \"\"\" If properly included, get the ID of the tweet \"\"\"\n return tweet.get('id')\n \ndef getUserIDandScreenName(tweet):\n \"\"\" If properly included, get the tweet \n user ID and Screen Name \"\"\"\n user = tweet.get('user')\n if user is not None:\n uid = user.get('id')\n screen_name = user.get('screen_name')\n return uid, screen_name\n else:\n return (None, None)\n \n\n \nfilename = 'AI2.txt'\n\n# create a list of dictionaries with the data that interests us\ntweet_data_list = []\nwith open(filename, 'r') as fopen:\n # each line correspond to a tweet\n for line in fopen:\n if line != '\\n':\n tweet = json.loads(line.strip('\\n'))\n tweet_id = getTweetID(tweet)\n user_id = getUserIDandScreenName(tweet)[0]\n text = tweet.get('text')\n if tweet_id is not None:\n tweet_data_list.append({'tweet_id' : tweet_id,\n 'user_id' : user_id,\n 'text' : text})\n\n# put everything in a dataframe\ntweet_df = pd.DataFrame.from_dict(tweet_data_list)\n\n\n\nprint(tweet_df.shape)\nprint(tweet_df.columns)\n\n#print 5 first element of one of the column\nprint(tweet_df.text.iloc[:5])\n# or\nprint(tweet_df['text'].iloc[:5])\n\n\n#show the first 10 rows\ntweet_df.head(10)",
"Extracting features from the tweets\n1) Tokenize the tweet in a list of words\nThis part uses concepts from Naltural Langage Processing.\nWe will use a tweet tokenizer I built based on TweetTokenizer from NLTK (http://www.nltk.org/).\nYou can see how it works by opening the file TwSentiment.py. The goal is to process any tweets and extract a list of words taking into account usernames, hashtags, urls, emoticons and all the informal text we can find in tweets. We also want to reduce the number of features by doing some transformations such as putting all the words in lower cases.",
"from TwSentiment import CustomTweetTokenizer\n\ntokenizer = CustomTweetTokenizer(preserve_case=False, # keep Upper cases\n reduce_len=True, # reduce repetition of letter to a maximum of three\n strip_handles=False, # remove usernames (@mentions)\n normalize_usernames=True, # replace all mentions to \"@USER\"\n normalize_urls=True, # replace all urls to \"URL\"\n keep_allupper=True) # keep upercase for words that are all in uppercase\n\n# example\ntweet_df.text.iloc[0]\n\ntokenizer.tokenize(tweet_df.text.iloc[0])\n\n# other examples\ntokenizer.tokenize('Hey! This is SO cooooooooooooooooool! :)')\n\ntokenizer.tokenize('Hey! This is so cooooooool! :)')",
"2) Define the features that will represent the tweet\nWe will use the occurrence of words and pair of words (bigrams) as features.\nThis corresponds to a bag-of-words representation (https://en.wikipedia.org/wiki/Bag-of-words_model): we just count each words (or n-grams) without taking account their order. For document classification, the frequency of occurence of each words is usually taken as a feature. In the case of tweets, they are so short that we can just count each words once.\nUsing pair of words allows to capture some of the context in which each words appear. This helps capturing the correct meaning of words.",
"from TwSentiment import bag_of_words_and_bigrams\n\n# this will return a dictionary of features,\n# we just list the features present in this tweet\nbag_of_words_and_bigrams(tokenizer.tokenize(tweet_df.text.iloc[0]))",
"Download the logistic regression classifier\nhttps://www.dropbox.com/s/09rw6a85f7ezk31/sklearn_SGDLogReg_.pickle.zip?dl=1\nI trained this classifier on this dataset: http://help.sentiment140.com/for-students/, following the approach from this paper: http://cs.stanford.edu/people/alecmgo/papers/TwitterDistantSupervision09.pdf\nThis is a set of 14 million tweets with emoticons. Tweets containing \"sad\" emoticons (7 million) are considered negative and tweets with \"happy\" emoticons (7 million) are considered positive.\nI used a Logistic Regression classifier with L2 regularization that I optimized with a 10 fold cross-validation using $F_1$ score as a metric.",
"# the classifier is saved in a \"pickle\" file\nimport pickle\n\nwith open('sklearn_SGDLogReg_.pickle', 'rb') as fopen:\n classifier_dict = pickle.load(fopen)\n\n\n\n# classifier_dict contain the classifier and label mappers\n# that I added so that we remember how the classes are \n# encoded\nclassifier_dict",
"The classifier is in fact contained in a pipeline.\nA sklearn pipeline allows to assemble several transformation of your data (http://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html)",
"pipline = classifier_dict['sklearn_pipeline']",
"In our case we have two steps: \n\nVectorize the textual features (using http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.DictVectorizer.html)\nClassify the vectorized features (using http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html)",
"pipline.steps\n\n# this the step that will transform a list of textual features to a vector of zeros and ones\ndict_vect = pipline.steps[0][1]\n\ndict_vect.feature_names_\n\n# number of features\nlen(dict_vect.feature_names_)\n\n# a little example\ntext = 'Hi all, I am very happy today'\n# first tokenize\ntokens = tokenizer.tokenize(text)\nprint(tokens)\n\n# list features\nfeatures = bag_of_words_and_bigrams(tokens)\nprint(features)\n\n# vectorize features\nX = dict_vect.transform(features)\n\nprint(X.shape)\n\n# X is a special kind of numpy array. beacause it is extremely sparse\n# it can be encoded to take less space in memory\n# if we want to see it fully, we can use .toarray()\n\n# number of non-zero values in X:\nX.toarray().sum()\n",
"The mapping between the list of features and the vector of zeros and ones is done when you train the pipeline with its .fit method.\nClassifing the tweet\nNow that we have vector representing the presence of features in a tweet, we can apply our logistic regression classifier to compute the probability that a tweet belong to the \"sad\" or \"happy\" category",
"classifier = pipline.steps[1][1]\n\nclassifier\n\n# access the weights of the logistic regression\nclassifier.coef_\n\n# we have as many weights as features\nclassifier.coef_.shape\n\n# plus the intrecept \nclassifier.intercept_\n\n# let's check the weight associated with a given feature\nx = dict_vect.transform({'bad': True})\n_, ind = np.where(x.todense())\nclassifier.coef_[0,ind]\n\n\n\n# find the probability for a specific tweet\nclassifier.predict_proba(X)",
"Using the sklearn pipeline to group the two last steps:",
"pipline.predict_proba(features)",
"We see to numbers, the first one is the probability of the tweet being sad, the second one is the probability of the tweet being happy.",
"# note that:\npipline.predict_proba(features).sum()",
"Putting it all together:\nWe will use the class TweetClassifier from TwSentiment.py that puts together this process for us:",
"from TwSentiment import TweetClassifier\n\ntwClassifier = TweetClassifier(pipline,\n tokenizer=tokenizer,\n feature_extractor=bag_of_words_and_bigrams)\n\n# example\ntext = 'Hi all, I am very happy today'\ntwClassifier.classify_text(text)\n\n# the classify text method also accepts a list of text as input\ntwClassifier.classify_text(['great day today!', 'bad day today...'])\n# the classify text method also accepts a list of text as input\n# twClassifier.classify_text(['not sad', 'not happy'])",
"We can now classify our tweets:",
"emo_clas, prob = twClassifier.classify_text(tweet_df.text.tolist())\n\n\n# add the result to the dataframe\n\ntweet_df['pos_class'] = (emo_clas == 'pos')\ntweet_df['pos_prob'] = prob[:,1]\n\ntweet_df.head()\n\n# plot the distribution of probability\nimport matplotlib.pyplot as plt\n%matplotlib inline\nh = plt.hist(tweet_df.pos_prob, bins=50)\n",
"We want to classify users based on the class of their tweets.\nPandas allows to easily group tweets per users using the groupy method of DataFrames:",
"user_group = tweet_df.groupby('user_id')\n\nprint(type(user_group))\n\n# let's look at one of the group\ngroups = user_group.groups\nuid = list(groups.keys())[5]\nuser_group.get_group(uid)\n\n# we need to make a function that takes the dataframe of tweets grouped by users and return the class of the users\ndef get_user_emo(group):\n num_pos = group.pos_class.sum()\n num_tweets = group.pos_class.size\n if num_pos/num_tweets > 0.5:\n return 'pos'\n elif num_pos/num_tweets < 0.5:\n return 'neg'\n else:\n return 'NA'\n\n# apply the function to each group\nuser_df = user_group.apply(get_user_emo)\n\n# This is a pandas Series where the index are the user_id\nuser_df.head(10)",
"Let's add this information to the graph we created earlier",
"import networkx as nx\n\nG = nx.read_graphml('twitter_lcc_AI2.graphml', node_type=int)\n\nfor n in G.nodes_iter():\n if n in user_df.index:\n # here we look at the value of the user_df series at the position where the index \n # is equal to the user_id of the node\n G.node[n]['emotion'] = user_df.loc[user_df.index == n].values[0]\n\n# we have added an attribute 'emotion' to the nodes\nG.node[n]\n\n# save the graph to open it with Gephi\nnx.write_graphml(G, 'twitter_lcc_emo_AI2.graphml')"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
GoogleCloudPlatform/training-data-analyst
|
courses/machine_learning/deepdive/09_sequence_keras/word2vec.ipynb
|
apache-2.0
|
[
"<h1> Creating a custom Word2Vec embedding on your data </h1>\n\nThis notebook illustrates:\n<ol>\n<li> Creating a training dataset\n<li> Running word2vec\n<li> Examining the created embedding\n<li> Export the embedding into a file you can use in other models\n<li> Training the text classification model of [txtcls2.ipynb](txtcls2.ipynb) with this custom embedding.\n</ol>",
"# change these to try this notebook out\nBUCKET = 'alexhanna-dev-ml'\nPROJECT = 'alexhanna-dev'\nREGION = 'us-central1'\n\nimport os\nos.environ['BUCKET'] = BUCKET\nos.environ['PROJECT'] = PROJECT\nos.environ['REGION'] = REGION",
"Creating a training dataset\nThe training dataset simply consists of a bunch of words separated by spaces extracted from your documents. The words are simply in the order that they appear in the documents and words from successive documents are simply appended together. In other words, there is not \"document separator\".\n<p>\nThe only preprocessing that I do is to replace anything that is not a letter or hyphen by a space.\n<p>\nRecall that word2vec is unsupervised. There is no label.",
"import google.datalab.bigquery as bq\n\nquery=\"\"\"\nSELECT\n CONCAT( LOWER(REGEXP_REPLACE(title, '[^a-zA-Z $-]', ' ')), \n \" \", \n LOWER(REGEXP_REPLACE(text, '[^a-zA-Z $-]', ' '))) AS text\nFROM\n `bigquery-public-data.hacker_news.stories`\nWHERE\n LENGTH(title) > 100\n AND LENGTH(text) > 100\n\"\"\"\n\ndf = bq.Query(query).execute().result().to_dataframe()\n\ndf[:5]\n\nwith open('word2vec/words.txt', 'w') as ofp:\n for txt in df['text']:\n ofp.write(txt + \" \")",
"This is what the resulting file looks like:",
"!cut -c-1000 word2vec/words.txt",
"Running word2vec\nWe can run the existing tutorial code as-is.",
"%%bash\ncd word2vec\nTF_CFLAGS=( $(python -c 'import tensorflow as tf; print(\" \".join(tf.sysconfig.get_compile_flags()))') )\nTF_LFLAGS=( $(python -c 'import tensorflow as tf; print(\" \".join(tf.sysconfig.get_link_flags()))') )\ng++ -std=c++11 \\\n -shared word2vec_ops.cc word2vec_kernels.cc \\\n -o word2vec_ops.so -fPIC ${TF_CFLAGS[@]} ${TF_LFLAGS[@]} \\\n -O2 -D_GLIBCXX_USE_CXX11_ABI=0\n\n# -I/usr/local/lib/python2.7/dist-packages/tensorflow/include/external/nsync/public \\",
"The actual evaluation dataset doesn't matter. Let's just make sure to have some words in the input also in the eval. The analogy dataset is of the form \n<pre>\nAthens Greece Cairo Egypt\nBaghdad Iraq Beijing China\n</pre>\ni.e. four words per line where the model is supposed to predict the fourth given the first three. But we'll just make up a junk file.",
"%%writefile word2vec/junk.txt\n: analogy-questions-ignored\nthe user plays several levels\nof the game puzzle\nvote down the negative\n\n%%bash\ncd word2vec\nrm -rf trained\npython word2vec.py \\\n --train_data=./words.txt --eval_data=./junk.txt --save_path=./trained \\\n --min_count=1 --embedding_size=10 --window_size=2",
"Examine the created embedding\nLet's load up the embedding file in TensorBoard. Start up TensorBoard, switch to the \"Projector\" tab and then click on the button to \"Load data\". Load the vocab.txt that is in the output directory of the model.",
"from google.datalab.ml import TensorBoard\nTensorBoard().start('word2vec/trained')",
"Here, for example, is the word \"founders\" in context -- it's near doing, creative, difficult, and fight, which sounds about right ... The numbers next to the words reflect the count -- we should try to get a large enough vocabulary that we can use --min_count=10 when training word2vec, but that would also take too long for a classroom situation. <img src=\"embeds.png\" />",
"for pid in TensorBoard.list()['pid']:\n TensorBoard().stop(pid)\n print('Stopped TensorBoard with pid {}'.format(pid))",
"Export the embedding vectors into a text file\nLet's export the embedding into a text file, so that we can use it the way we used the Glove embeddings in txtcls2.ipynb.\nNotice that we have written out our vocabulary and vectors into two files. We just have to merge them now.",
"!wc word2vec/trained/*.txt\n\n!head -3 word2vec/trained/*.txt\n\nimport pandas as pd\nvocab = pd.read_csv(\"word2vec/trained/vocab.txt\", sep=\"\\s+\", header=None, names=('word', 'count'))\nvectors = pd.read_csv(\"word2vec/trained/vectors.txt\", sep=\"\\s+\", header=None)\nvectors = pd.concat([vocab, vectors], axis=1)\ndel vectors['count']\nvectors.to_csv(\"word2vec/trained/embedding.txt.gz\", sep=\" \", header=False, index=False, index_label=False, compression='gzip')\n\n!zcat word2vec/trained/embedding.txt.gz | head -3",
"Training model with custom embedding\nNow, you can use this embedding file instead of the Glove embedding used in txtcls2.ipynb",
"%%bash\ngsutil cp word2vec/trained/embedding.txt.gz gs://${BUCKET}/txtcls2/custom_embedding.txt.gz\n\n%%bash\nOUTDIR=gs://${BUCKET}/txtcls2/trained_model\nJOBNAME=txtcls_$(date -u +%y%m%d_%H%M%S)\necho $OUTDIR $REGION $JOBNAME\ngsutil -m rm -rf $OUTDIR\ngsutil cp txtcls1/trainer/*.py $OUTDIR\ngcloud ml-engine jobs submit training $JOBNAME \\\n --region=$REGION \\\n --module-name=trainer.task \\\n --package-path=$(pwd)/txtcls1/trainer \\\n --job-dir=$OUTDIR \\\n --staging-bucket=gs://$BUCKET \\\n --scale-tier=BASIC_GPU \\\n --runtime-version=1.4 \\\n -- \\\n --bucket=${BUCKET} \\\n --output_dir=${OUTDIR} \\\n --glove_embedding=gs://${BUCKET}/txtcls2/custom_embedding.txt.gz \\\n --train_steps=36000",
"Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
kunaltyagi/SDES
|
notes/python/p_norvig/word/Fred Buns.ipynb
|
gpl-3.0
|
[
"<div style=\"float:right\"><i>Peter Norvig, 15 June 2015</i></div>\n\nLet's Code About Bike Locks\nThe June 15, 2015 post on Bike Snob NYC leads with \"Let's talk about bike locks.\" Here's what I want to talk about: in a local bike shop, I saw a combination lock called WordLock®,\nwhich replaces digits with letters. I classified this as a Fred lock,\n\"Fred\" being the term (championed by\nBike Snob NYC) for an amateurish bicyclist with the wrong equipment.\nI tried the combination \"FRED,\" and was amused with the result:\n<p><center><img src=\"http://norvig.com/ipython/fredbuns.jpg\"></center>\n<p>\n\n\nFRED BUNS! Naturally I bought the lock (and set the other ones on the rack to FRED BUNS as well). Unfortunately, it turns out the combination on each lock is pre-set; it can't be changed to FRED BUNS (some other models of WordLock® are user-settable).\nBut we can still have fun writing code to answer some questions about\nthe WordLock®:\n\nQuestions\n====\n\n1. How many words can the WordLock® make?\n3. Can a lock with different letters on the tumblers make more words? \n4. How many words can be made simultaneously? For example, with the tumbler set to \"FRED\", the lock\nabove also makes \"BUNS\" in the next line, but with \"SOMN\", fails to make a word in the third line.\nCould different letters make words in every horizontal line?\n5. Is it a coincidence that the phrase \"FRED BUNS\" appears, or was it planted there by mischievous WordLock® designers? \n\nVocabulary\n===\n\nBefore we can answer the questions, we'll need to be clear about the vocabulary of the problem and how to represent concepts in code:\n\n* **Lock**: For our purposes a lock can be modeled as a `list` of 4 **tumblers**. \n* **Tumbler:** Each tumbler has letters on it; in the lock above there are 4 tumblers, each with 10 distinct letters. I will represent a tumbler as a `str` of 10 letters.\n* **Combination**: Choosing a letter from each tumbler gives a combination, such as \"FRED\" or \"BUNS\". There are 4<sup>10</sup> = 10,000 combinations.\n* **Word**: Some combinations (such as \"BUNS\") are *words*; others (such as \"SOMN\") are not words. We'll need a list of dictionary words.\n\nNow on to the code! I took all the `import`s that were scattered throughout this notebook as I developed it and moved them here:",
"%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom __future__ import division, print_function\nfrom collections import Counter, defaultdict\nimport itertools\nimport random \nrandom.seed(42)",
"I will define fredbuns to be a lock with four tumblers. For now, each tumbler will consist of not all ten letters, but only the two letters that spell \"FRED BUNS\":",
"fredbuns = ['FB', 'RU', 'EN', 'DS'] # A lock with two letters on each of four tumblers",
"We need a way to get the combinations that can be made from this lock. It turns out that the built-in function itertools.product does the job; it generates the product of all 2 × 2 × 2 × 2 = 16 combinations of letters:",
"list(itertools.product(*fredbuns))",
"Note: product(*fredbuns) means to apply the function product to the list fredbuns; this is equivalent to itertools.product('FB', 'RU', 'EN', 'DS').\nI would prefer to deal with the string 'BUNS' rather than the tuple ('B', 'U', 'N', 'S'), so I will define a function, combinations, that takes a lock as input and returns a list of strings representing the combinations:",
"def combinations(lock):\n \"Return a list of all combinations that can be made by this lock.\"\n return [cat(c) for c in itertools.product(*lock)]\n\ncat = ''.join # Function to concatenate strings together.\n\ncombinations(fredbuns)",
"Dictionary Words\nI happen to have handy a file of four-letter words (no, not that kind of four-letter word). It is the union of an official Scrabble® word list and a list of proper names. The following shell command tests if the file has already been downloaded to our local directory and if not, fetches it from the web:",
"! [ -e words4.txt ] || curl -O http://norvig.com/ngrams/words4.txt",
"Here are the first few lines of the file:",
"! head words4.txt",
"Python can make a set of words:",
"WORDS = set(open('words4.txt').read().split())\n\nlen(WORDS)",
"So that means that no lock could ever make more than 4,360 words. Let's define words_from(lock):",
"def words_from(lock): \n \"A list of words that can be made by lock.\"\n return [c for c in combinations(lock) if c in WORDS]\n\nwords_from(fredbuns)",
"Note: An alternative is to represent a collection of words as a set, not a list; then words_from could be implemented as: return combinations(lock) & WORDS. \nI will also introduce the function show to print out a lock and its words:",
"def show(lock):\n \"Show a lock and the words it makes.\"\n words = words_from(lock)\n N = len(lock[0]) ** len(lock)\n print('Lock: {}\\n\\nWords: {}\\n\\nNumber of Words: {} / {}'\n .format(space(lock), space(sorted(words)), len(words), N))\n \nspace = ' '.join # Function to concatenate strings with a space between each one.\n\nshow(fredbuns)",
"For this tiny lock with just two letters on each tumbler, we find that 6 out of the 16 possible combinations are words. We're now ready to answer the real questions.\nQuestion 1: How Many Words?\nHere is the answer:",
"wordlock = ['SPHMTWDLFB', 'LEYHNRUOAI', 'ENMLRTAOSK', 'DSNMPYLKTE']\n\nshow(wordlock)",
"How Secure is WordLock?\nThe lock makes 1118 words (according to my word list). You might say that an attacker who knows the combination is a word would find this lock to be only 11.18% as secure as a 4-digit lock with 10,000 combinations. But in reality, every cable lock is vulnerable to an attacker with wire cutters, or with a knowledge of lock-picking, so security is equally poor for WordLock® and for an equivalent lock with numbers. (Get a hardened steel U-lock.)\nRandom Locks\nQuestion 2 asks if a different lock can make more words. As a baseline, before we get to improved locks, I will start with completely random locks, as produced by the function random_lock. Note that I use d=4 to say that by default there are 4 tumblers, and c=10 to indicate 10 letters on each tumbler (you can think of \"c\" for \"circumference\" of the tumbler); I will use d and c throughout, and even though I won't stray from the default values, it is comforting to know I could if I wanted to.",
"def random_lock(d=4, c=10):\n \"Make a lock by sampling randomly and uniformly from the alphabet.\"\n return Lock(Tumbler(random.sample(alphabet, c))\n for i in range(d))\n\nLock = list # A Lock is a list of tumblers\nTumbler = cat # A Tumbler is 10 characters joined into a str\nalphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' # The 26 letters\n\nshow(random_lock())",
"Wow, that's not very many words. Let's repeat 100 times and take the best one, with \"best\" determined by the function word_count:",
"def word_count(lock): return len(words_from(lock))\n\nrandom_locks = [random_lock() for _ in range(100)]\n\nshow(max(random_locks, key=word_count))",
"Still not very good. We will need a more systematic approach.\nQuestion 2: More Words (via Greedy Locks)\nMy first idea for a lock with more words is this: consider each tumbler, one at a time, and fill the tumbler with the letters that make the most words. How do I determine what letters make the most words? A Counter does most of the work; we feed it a list of the first letter of each word, and then ask it for the ten most common letters (and their counts):",
"first_letters = [w[0] for w in WORDS]\n\nCounter(first_letters).most_common(10)",
"In other words, the letters SPTBDCLMAR are the most common ways to start a word. Let's add up those counts:",
"def n_most_common(counter, n): return sum(n for (_, n) in counter.most_common(n))\n\nn_most_common(Counter(first_letters), 10)",
"This means that SPTBDCLMAR covers 2,599 words. We don't know for sure that these are the best 10 letters to put on the first tumbler, but we do know that whatever letters are best, they can't form more than 2,599 words, so we have an upper bound on the number of words in a lock (and the 1,118 from wordlock is a lower bound).\nWhat letters should we put on the second tumbler? We will do the same thing, but this time don't consider all the words in the dictionary; just consider the 2,599 words that start with one of the ten letters on the first tumbler. Continue this way until we fill in all four tumblers. This is called a greedy approach, because when we consider each tumbler, we pick the solution that looks best right then, for that tumbler, without consideration for future tumblers.",
"def greedy_lock(d=4, c=10, words=WORDS):\n \"Make a lock with d tumblers, each consisting of the c letters that cover the most words at that position.\"\n lock = Lock('?' * d)\n for i in range(d):\n # Make a tumbler of c letters, to be used in position i, such that the tumbler covers the most words.\n counter = Counter(word[i] for word in words)\n lock[i] = Tumbler(L for (L, _) in counter.most_common(10))\n # Keep only the words whose ith letter is one of the letters in the ith tumbler\n words = {w for w in words if w[i] in lock[i]}\n return lock\n\nshow(greedy_lock())",
"Remember that the wordlock gave 1118 words, so the greedy lock is better, but not by much (only 5%). Is it possible to do better still? \nQuestion 2: More Words (via Improved Locks)\nHere's another idea to get more words from a lock:\n\nStart with some lock.\nPick, at random, one letter on one tumbler and change it to a new letter.\nIf the change yields more words, keep the change; otherwise discard the change.\nRepeat.\n\nWe can implement this strategy with the function improved_lock:",
"def improved_lock(lock, num_changes=2000):\n \"Randomly change letters in lock, keeping changes that improve the score.\"\n score = word_count(lock)\n for i in range(num_changes):\n lock2 = changed_lock(lock)\n score2 = word_count(lock2)\n if score2 >= score: \n lock, score = lock2, score2\n return lock",
"We'll need a way to produce a new lock with a random change:",
"def changed_lock(lock): \n \"Change one letter in one tumbler.\"\n i = random.randrange(len(lock))\n other_letters = [L for L in alphabet if L not in lock[i]]\n new_tumbler = lock[i].replace(random.choice(lock[i]), \n random.choice(other_letters))\n return lock[:i] + [new_tumbler] + lock[i+1:]",
"(Note: I use the participle form (improved, changed) in analogy to the built-in function sorted, to indicate that the function creates a new object. I would tend to use the verb form (improve, change, sort) to indicate a function that mutates its argument.)\nLet's see how this does to improve the best lock we've seen so far, the greedy lock:",
"%time show(improved_lock(greedy_lock()))",
"Could we do better starting from wordlock?",
"show(improved_lock(wordlock))",
"How about starting from a random lock?",
"show(improved_lock(random_lock()))",
"It seems to be easy to generate a lock with about 1230 words, and it doesn't seem to matter much where you start. How hard is it to get more than about 1240 words? I'll improve 50 random locks and see (this will take around 10 minutes):",
"%%time \n\nimproved_locks = [improved_lock(random_lock(), 5000) \n for i in range(50)]",
"Let's get some basic stats on the scores:",
"def mean(numbers): \n \"The mean, or average, of a sequence of numbers.\"\n return sum(numbers) / len(numbers)\n\nscores = [word_count(lock) for lock in improved_locks]\n\nmin(scores), mean(scores), max(scores)",
"The scores are tightly grouped from 1232 to 1240, with the majority of them scoring 1240. We can also visualize the scores as a histogram:",
"plt.hist(scores); ",
"And see the best lock:",
" show(max(improved_locks, key=word_count))",
"Is 1240 the maximum?\nAt first I thought that there are probably locks with more than 1240 words. Bit after a discussion with Matt Chisholm, I now think that 1240 might be the maximum (given my 4360 word dictionary). I came to this realization after investigating how many different locks there are in improved_locks. The locks all look different, but I will define the function alock to put each tumbler into alphabetical order:",
"def alock(lock):\n \"Canonicalize lock by alphabetizing the letters in each tumbler.\"\n return tuple(cat(sorted(tumbler)) for tumbler in lock)\n\nunique_locks = {alock(lock): word_count(lock)\n for lock in improved_locks}\nunique_locks",
"So out of the 50 improved_locks there are actually only 6 distinct ones. And only two have a score of 1240.",
"L1240 = {lock for lock in unique_locks if unique_locks[lock] == 1240}\nL1240",
"These two differ in just one letter (a P or a W in the second tumbler). \nThis discovery changes my whole thinking about the space of scores for locks. Previously I thought there were many distinct locks with score 1240, and I imagined a spiky \"porcupine-shaped\" landscape with many peaks at 1240, and therefore it seemed likely that there were other peaks, not yet discovered, at 1240 or higher. But now I have a different picture of the landscape: it now looks like a single peak consisting of a tiny plateau (just large enough to fit a P and a W) with rolling hills leading up to the plateau. However, I haven't proven this yet.\nQuestion 3: Simultaneous Words\nCan we make a lock that spells 10 words simultaneously? One possible approach would be to start with any lock and randomly change it (just as we did with improved_lock), but measure improvements by the number of words formed. My intuition is that this approach would work, eventually, but that progress would be very slow, because most random changes to a letter would not make a word.\nAn alternative approach is to think of the lock not as a list of 4 vertical tumblers (each with 10 letters), but rather as a list of 10 horizontal words (each with 4 letters). I'll call this the word list representation, and note that a lock and a word list are matrix transposes of each other—they swap rows for columns. There is an old trick to compute the transpose of a matrix M with the expression zip(*M). But zip returns a list of tuples; we want strings, so we can define transpose as:",
"def transpose(strings): return [cat(letters) for letters in zip(*strings)]",
"And we can see the transpose of the wordlock is a list of words:",
"transpose(['SPHMTWDLFB', \n 'LEYHNRUOAI', \n 'ENMLRTAOSK', \n 'DSNMPYLKTE'])",
"The first row of the word list has the letters SLED, because those are the letters in the first column of the lock. You can see that the WordLock® is designed to spell out LOOK FAST BIKE, among other words.\nNow we're ready to find a good word list with this strategy:\n\nStart with some word list (e.g., a random sample of 10 words from WORDS).\nPick, at random, one word and change it to a new word.\nIf the change is an improvement, keep the change; otherwise discard the change.\nRepeat.\n\nBut what counts as an improvement? We can't improve the number of words, because we are only dealing with words. Rather, we will try to improve the number of duplicate letters on any tumbler (of the lock that corresponds to the word list). We improve by reducing the number of duplicate letters, and stop when there are no duplicates.\nThe following code implements this approach:",
"def improved_wordlist(wordlist):\n \"Find a wordlist that has no duplicate letters, via random changes to wordlist.\"\n score = duplicates(wordlist)\n while score > 0:\n wordlist2 = changed_wordlist(wordlist)\n score2 = duplicates(wordlist2)\n if score2 < score: \n wordlist, score = wordlist2, score2\n return wordlist\n \ndef duplicates(wordlist):\n \"The number of duplicate letters across all the tumblers of the lock that corresponds to this wordlist.\"\n lock = transpose(wordlist) \n def duplicates(tumbler): return len(tumbler) - len(set(tumbler))\n return sum(duplicates(tumbler) for tumbler in lock)\n\ndef changed_wordlist(wordlist, words=list(WORDS)):\n \"Make a copy of wordlist and replace one of the words.\"\n copy = list(wordlist)\n i = random.randrange(len(wordlist))\n copy[i] = random.choice(words)\n return copy",
"The structure of improved_wordlist is similar to improved_lock, with a few differences:\n1. We are minimizing duplicates, not maximizing word count. \n2. We stop when the score is 0, rather than continuing for a given number of iterations.\n3. We want to make a random.choice from WORDS. But random.choice can't operate on a set, so we\nhave to introduce words=list(WORDS).\nNow we can find some wordlists:",
"improved_wordlist(random.sample(WORDS, 10))",
"That was easy! Can we go to 11?",
"improved_wordlist(random.sample(WORDS, 11))",
"Improving Anything\nWe now have two similar functions, improved_lock and improved_wordlist. Could (and should?) we replace them by a single function, say, improved, that could improve locks, wordlists, and anything else?\nThe answer is: yes we could, and maybe we should.\nIt is nice to form an abstraction for the idea of improvement. (Traditionally, the method we have used for improvement has been called hill-climbing, because of the analogy that the score is like the elevation on a topological map, and we are trying to find our way to a peak.)\nHowever, there are many variations on the theme of improvement: maximizing or minimizing? Repeat for a given number of iterations, or continue until we meet a goal? I don't want improved to have an argument list a mile long, and I felt that five arguments is right on the border of acceptable. The arguments are:\n1. item: The object to start with; this is what we will try to improve.\n2. changed: a function that generates a new item.\n3. scorer: a function that evaluates the quality of an item.\n4. stop: a predicate with args (i, score, item), where i is the iteration number, and score is scorer(item). Return True to stop.\n5. extremum: By default, this is max, meaning we are trying to maximize score; it could also be min.",
"def improved(item, changed, scorer, stop, extremum=max):\n \"\"\"Apply the function changed to item and evaluate with the function scorer;\n When stop(i, score, item) is true, return item.\"\"\"\n score = scorer(item)\n for i in itertools.count(0):\n if stop(i, score, item):\n return item\n item2 = changed(item)\n score2 = scorer(item2)\n if score2 == extremum(score, score2):\n item, score = item2, score2",
"Now we can re-implement improved_lock and improved_wordlist using improved:",
"def improved_lock(lock, num_changes=2000):\n \"Randomly change letters in lock, keeping changes that improve the score.\"\n def stop_after_num_changes(i, _, __): return i == num_changes\n return improved(lock, changed_lock, word_count, stop_after_num_changes, max)\n\ndef improved_wordlist(wordlist):\n \"Find a wordlist that has no duplicate letters, via random changes to wordlist.\"\n def zero_score(_, score, __): return score == 0\n return improved(wordlist, changed_wordlist, duplicates, zero_score, min)\n\nshow(improved_lock(random_lock()))\n\nimproved_wordlist(random.sample(WORDS, 10))",
"Question 4: Coincidence?\nThere is still one unanswered question: did the designers of WordLock® deliberately put \"FRED BUNS\" in, or was it a coincidence? Astute Hacker News reader emhart commented that he had found the patent assigned to WordLock; it describes an algorithm similar to my greedy_lock.\nAfter seeing that, I'm inclined to believe that \"FRED BUNS\" is the coincidental result of running the algorithm. On\nthe other hand, there is a followup patent that discusses a refinement\n\"wherein the letters on the wheels are configured to spell a first word displayed on a first row of letters and a second word displayed on a second row of letters.\" So the possibility of a two-word phrase was somthing that Wordlock LLc. was aware of.\nWe see below that the procedure described in the patent is not quite as good as greedy_lock, because the patent states that at each tumbler position \"the entire word list is scanned\" to produce the letter frequencies, whereas greedy_lock scans only the words that are consistent with the previous tumblers, and thus greedy_lock produces more words, 1177 to 1161.",
"def patented_lock(d=4, c=10, words=WORDS):\n \"Make a lock with d tumblers, each consisting of the c letters that cover the most words at that position.\"\n lock = Lock('?' * d)\n for i in range(d):\n # Make a tumbler of c letters, to be used in position i, such that the tumbler covers the most words.\n counter = Counter(word[i] for word in words)\n lock[i] = Tumbler(L for (L, _) in counter.most_common(10))\n #words = {w for w in words if w[i] in lock[i]} ## The patent skips this step\n return lock\n\nword_count(greedy_lock()), word_count(patented_lock())",
"Tests\nIt is a \ngood idea to have some tests, in case you want to change some code and see if you have introduced an error. Also, tests serve as examples of usage of functions. The following tests have poor coverage, because it is harder to test non-deterministic functions, and I didn't attempt that here.",
"def tests():\n assert 'WORD' in WORDS\n assert 'FRED' in WORDS\n assert 'BUNS' in WORDS\n assert 'XYZZ' not in WORDS\n assert 'word' not in WORDS\n assert 'FIVER' not in WORDS\n assert len(WORDS) == 4360\n \n assert fredbuns == ['FB', 'RU', 'EN', 'DS']\n assert combinations(fredbuns) == ['FRED','FRES','FRND','FRNS','FUED','FUES','FUND','FUNS',\n 'BRED','BRES','BRND','BRNS','BUED','BUES','BUND','BUNS']\n assert words_from(fredbuns) == ['FRED', 'FUND', 'FUNS', 'BRED', 'BUND', 'BUNS']\n\n assert wordlock == ['SPHMTWDLFB', 'LEYHNRUOAI', 'ENMLRTAOSK', 'DSNMPYLKTE']\n assert len(combinations(wordlock)) == 10000\n assert word_count(wordlock) == 1118\n \n assert transpose(['HIE', 'BYE']) == ['HB', 'IY', 'EE']\n assert transpose(transpose(wordlock)) == wordlock\n assert mean([3, 4, 5]) == 4\n assert mean([True, False, False, False]) == 0.25\n \n return 'tests pass'\n \ntests()",
"One More Question\nI wonder if @BIKESNOBNYC would appreciate this notebook? On the one hand, he is the kind of guy who, in discussing the fact that bicycling is the seventh most popular recreational activity, wrote \"the number seven is itself a highly significant number. It is the lowest number that cannot be represented as the sum of the square of three integers,\" so it seems he has some interest in mathematical oddities. On the other hand, he followed that up by writing \"I have no idea what that means, but it's true,\" so maybe not.",
"def not_sum_3_squares(N):\n \"Positive integers < N that are not the sum of three squares.\"\n squares = [i ** 2 for i in range(int(N ** 0.5)+1)]\n sums = {A + B + C for A in squares for B in squares for C in squares}\n return set(range(N)) - sums\n\nnot_sum_3_squares(100)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
chetnapriyadarshini/deep-learning
|
batch-norm/Batch_Normalization_Lesson.ipynb
|
mit
|
[
"Batch Normalization – Lesson\n\nWhat is it?\nWhat are it's benefits?\nHow do we add it to a network?\nLet's see it work!\nWhat are you hiding?\n\nWhat is Batch Normalization?<a id='theory'></a>\nBatch normalization was introduced in Sergey Ioffe's and Christian Szegedy's 2015 paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift. The idea is that, instead of just normalizing the inputs to the network, we normalize the inputs to layers within the network. It's called \"batch\" normalization because during training, we normalize each layer's inputs by using the mean and variance of the values in the current mini-batch.\nWhy might this help? Well, we know that normalizing the inputs to a network helps the network learn. But a network is a series of layers, where the output of one layer becomes the input to another. That means we can think of any layer in a neural network as the first layer of a smaller network.\nFor example, imagine a 3 layer network. Instead of just thinking of it as a single network with inputs, layers, and outputs, think of the output of layer 1 as the input to a two layer network. This two layer network would consist of layers 2 and 3 in our original network. \nLikewise, the output of layer 2 can be thought of as the input to a single layer network, consistng only of layer 3.\nWhen you think of it like that - as a series of neural networks feeding into each other - then it's easy to imagine how normalizing the inputs to each layer would help. It's just like normalizing the inputs to any other neural network, but you're doing it at every layer (sub-network).\nBeyond the intuitive reasons, there are good mathematical reasons why it helps the network learn better, too. It helps combat what the authors call internal covariate shift. This discussion is best handled in the paper and in Deep Learning a book you can read online written by Ian Goodfellow, Yoshua Bengio, and Aaron Courville. Specifically, check out the batch normalization section of Chapter 8: Optimization for Training Deep Models.\nBenefits of Batch Normalization<a id=\"benefits\"></a>\nBatch normalization optimizes network training. It has been shown to have several benefits:\n1. Networks train faster – Each training iteration will actually be slower because of the extra calculations during the forward pass and the additional hyperparameters to train during back propagation. However, it should converge much more quickly, so training should be faster overall. \n2. Allows higher learning rates – Gradient descent usually requires small learning rates for the network to converge. And as networks get deeper, their gradients get smaller during back propagation so they require even more iterations. Using batch normalization allows us to use much higher learning rates, which further increases the speed at which networks train. \n3. Makes weights easier to initialize – Weight initialization can be difficult, and it's even more difficult when creating deeper networks. Batch normalization seems to allow us to be much less careful about choosing our initial starting weights.\n4. Makes more activation functions viable – Some activation functions do not work well in some situations. Sigmoids lose their gradient pretty quickly, which means they can't be used in deep networks. And ReLUs often die out during training, where they stop learning completely, so we need to be careful about the range of values fed into them. Because batch normalization regulates the values going into each activation function, non-linearlities that don't seem to work well in deep networks actually become viable again.\n5. Simplifies the creation of deeper networks – Because of the first 4 items listed above, it is easier to build and faster to train deeper neural networks when using batch normalization. And it's been shown that deeper networks generally produce better results, so that's great.\n6. Provides a bit of regularlization – Batch normalization adds a little noise to your network. In some cases, such as in Inception modules, batch normalization has been shown to work as well as dropout. But in general, consider batch normalization as a bit of extra regularization, possibly allowing you to reduce some of the dropout you might add to a network. \n7. May give better results overall – Some tests seem to show batch normalization actually improves the train.ing results. However, it's really an optimization to help train faster, so you shouldn't think of it as a way to make your network better. But since it lets you train networks faster, that means you can iterate over more designs more quickly. It also lets you build deeper networks, which are usually better. So when you factor in everything, you're probably going to end up with better results if you build your networks with batch normalization.\nBatch Normalization in TensorFlow<a id=\"implementation_1\"></a>\nThis section of the notebook shows you one way to add batch normalization to a neural network built in TensorFlow. \nThe following cell imports the packages we need in the notebook and loads the MNIST dataset to use in our experiments. However, the tensorflow package contains all the code you'll actually need for batch normalization.",
"# Import necessary packages\nimport tensorflow as tf\nimport tqdm\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n# Import MNIST data so we have something for our experiments\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)",
"Neural network classes for testing\nThe following class, NeuralNet, allows us to create identical neural networks with and without batch normalization. The code is heaviy documented, but there is also some additional discussion later. You do not need to read through it all before going through the rest of the notebook, but the comments within the code blocks may answer some of your questions.\nAbout the code:\n\nThis class is not meant to represent TensorFlow best practices – the design choices made here are to support the discussion related to batch normalization.\nIt's also important to note that we use the well-known MNIST data for these examples, but the networks we create are not meant to be good for performing handwritten character recognition. We chose this network architecture because it is similar to the one used in the original paper, which is complex enough to demonstrate some of the benefits of batch normalization while still being fast to train.",
"class NeuralNet:\n def __init__(self, initial_weights, activation_fn, use_batch_norm):\n \"\"\"\n Initializes this object, creating a TensorFlow graph using the given parameters.\n \n :param initial_weights: list of NumPy arrays or Tensors\n Initial values for the weights for every layer in the network. We pass these in\n so we can create multiple networks with the same starting weights to eliminate\n training differences caused by random initialization differences.\n The number of items in the list defines the number of layers in the network,\n and the shapes of the items in the list define the number of nodes in each layer.\n e.g. Passing in 3 matrices of shape (784, 256), (256, 100), and (100, 10) would \n create a network with 784 inputs going into a hidden layer with 256 nodes,\n followed by a hidden layer with 100 nodes, followed by an output layer with 10 nodes.\n :param activation_fn: Callable\n The function used for the output of each hidden layer. The network will use the same\n activation function on every hidden layer and no activate function on the output layer.\n e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.\n :param use_batch_norm: bool\n Pass True to create a network that uses batch normalization; False otherwise\n Note: this network will not use batch normalization on layers that do not have an\n activation function.\n \"\"\"\n # Keep track of whether or not this network uses batch normalization.\n self.use_batch_norm = use_batch_norm\n self.name = \"With Batch Norm\" if use_batch_norm else \"Without Batch Norm\"\n\n # Batch normalization needs to do different calculations during training and inference,\n # so we use this placeholder to tell the graph which behavior to use.\n self.is_training = tf.placeholder(tf.bool, name=\"is_training\")\n\n # This list is just for keeping track of data we want to plot later.\n # It doesn't actually have anything to do with neural nets or batch normalization.\n self.training_accuracies = []\n\n # Create the network graph, but it will not actually have any real values until after you\n # call train or test\n self.build_network(initial_weights, activation_fn)\n \n def build_network(self, initial_weights, activation_fn):\n \"\"\"\n Build the graph. The graph still needs to be trained via the `train` method.\n \n :param initial_weights: list of NumPy arrays or Tensors\n See __init__ for description. \n :param activation_fn: Callable\n See __init__ for description. \n \"\"\"\n self.input_layer = tf.placeholder(tf.float32, [None, initial_weights[0].shape[0]])\n layer_in = self.input_layer\n for weights in initial_weights[:-1]:\n layer_in = self.fully_connected(layer_in, weights, activation_fn) \n self.output_layer = self.fully_connected(layer_in, initial_weights[-1])\n \n def fully_connected(self, layer_in, initial_weights, activation_fn=None):\n \"\"\"\n Creates a standard, fully connected layer. Its number of inputs and outputs will be\n defined by the shape of `initial_weights`, and its starting weight values will be\n taken directly from that same parameter. If `self.use_batch_norm` is True, this\n layer will include batch normalization, otherwise it will not. \n \n :param layer_in: Tensor\n The Tensor that feeds into this layer. It's either the input to the network or the output\n of a previous layer.\n :param initial_weights: NumPy array or Tensor\n Initial values for this layer's weights. The shape defines the number of nodes in the layer.\n e.g. Passing in 3 matrix of shape (784, 256) would create a layer with 784 inputs and 256 \n outputs. \n :param activation_fn: Callable or None (default None)\n The non-linearity used for the output of the layer. If None, this layer will not include \n batch normalization, regardless of the value of `self.use_batch_norm`. \n e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.\n \"\"\"\n # Since this class supports both options, only use batch normalization when\n # requested. However, do not use it on the final layer, which we identify\n # by its lack of an activation function.\n if self.use_batch_norm and activation_fn:\n # Batch normalization uses weights as usual, but does NOT add a bias term. This is because \n # its calculations include gamma and beta variables that make the bias term unnecessary.\n # (See later in the notebook for more details.)\n weights = tf.Variable(initial_weights)\n linear_output = tf.matmul(layer_in, weights)\n\n # Apply batch normalization to the linear combination of the inputs and weights\n batch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training)\n\n # Now apply the activation function, *after* the normalization.\n return activation_fn(batch_normalized_output)\n else:\n # When not using batch normalization, create a standard layer that multiplies\n # the inputs and weights, adds a bias, and optionally passes the result \n # through an activation function. \n weights = tf.Variable(initial_weights)\n biases = tf.Variable(tf.zeros([initial_weights.shape[-1]]))\n linear_output = tf.add(tf.matmul(layer_in, weights), biases)\n return linear_output if not activation_fn else activation_fn(linear_output)\n\n def train(self, session, learning_rate, training_batches, batches_per_sample, save_model_as=None):\n \"\"\"\n Trains the model on the MNIST training dataset.\n \n :param session: Session\n Used to run training graph operations.\n :param learning_rate: float\n Learning rate used during gradient descent.\n :param training_batches: int\n Number of batches to train.\n :param batches_per_sample: int\n How many batches to train before sampling the validation accuracy.\n :param save_model_as: string or None (default None)\n Name to use if you want to save the trained model.\n \"\"\"\n # This placeholder will store the target labels for each mini batch\n labels = tf.placeholder(tf.float32, [None, 10])\n\n # Define loss and optimizer\n cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=self.output_layer))\n \n # Define operations for testing\n correct_prediction = tf.equal(tf.argmax(self.output_layer, 1), tf.argmax(labels, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n if self.use_batch_norm:\n # If we don't include the update ops as dependencies on the train step, the \n # tf.layers.batch_normalization layers won't update their population statistics,\n # which will cause the model to fail at inference time\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)\n else:\n train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)\n \n # Train for the appropriate number of batches. (tqdm is only for a nice timing display)\n for i in tqdm.tqdm(range(training_batches)):\n # We use batches of 60 just because the original paper did. You can use any size batch you like.\n batch_xs, batch_ys = mnist.train.next_batch(60)\n session.run(train_step, feed_dict={self.input_layer: batch_xs, \n labels: batch_ys, \n self.is_training: True})\n \n # Periodically test accuracy against the 5k validation images and store it for plotting later.\n if i % batches_per_sample == 0:\n test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.validation.images,\n labels: mnist.validation.labels,\n self.is_training: False})\n self.training_accuracies.append(test_accuracy)\n\n # After training, report accuracy against test data\n test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.validation.images,\n labels: mnist.validation.labels,\n self.is_training: False})\n print('{}: After training, final accuracy on validation set = {}'.format(self.name, test_accuracy))\n\n # If you want to use this model later for inference instead of having to retrain it,\n # just construct it with the same parameters and then pass this file to the 'test' function\n if save_model_as:\n tf.train.Saver().save(session, save_model_as)\n\n def test(self, session, test_training_accuracy=False, include_individual_predictions=False, restore_from=None):\n \"\"\"\n Trains a trained model on the MNIST testing dataset.\n\n :param session: Session\n Used to run the testing graph operations.\n :param test_training_accuracy: bool (default False)\n If True, perform inference with batch normalization using batch mean and variance;\n if False, perform inference with batch normalization using estimated population mean and variance.\n Note: in real life, *always* perform inference using the population mean and variance.\n This parameter exists just to support demonstrating what happens if you don't.\n :param include_individual_predictions: bool (default True)\n This function always performs an accuracy test against the entire test set. But if this parameter\n is True, it performs an extra test, doing 200 predictions one at a time, and displays the results\n and accuracy.\n :param restore_from: string or None (default None)\n Name of a saved model if you want to test with previously saved weights.\n \"\"\"\n # This placeholder will store the true labels for each mini batch\n labels = tf.placeholder(tf.float32, [None, 10])\n\n # Define operations for testing\n correct_prediction = tf.equal(tf.argmax(self.output_layer, 1), tf.argmax(labels, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n # If provided, restore from a previously saved model\n if restore_from:\n tf.train.Saver().restore(session, restore_from)\n\n # Test against all of the MNIST test data\n test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.test.images,\n labels: mnist.test.labels,\n self.is_training: test_training_accuracy})\n print('-'*75)\n print('{}: Accuracy on full test set = {}'.format(self.name, test_accuracy))\n\n # If requested, perform tests predicting individual values rather than batches\n if include_individual_predictions:\n predictions = []\n correct = 0\n\n # Do 200 predictions, 1 at a time\n for i in range(200):\n # This is a normal prediction using an individual test case. However, notice\n # we pass `test_training_accuracy` to `feed_dict` as the value for `self.is_training`.\n # Remember that will tell it whether it should use the batch mean & variance or\n # the population estimates that were calucated while training the model.\n pred, corr = session.run([tf.arg_max(self.output_layer,1), accuracy],\n feed_dict={self.input_layer: [mnist.test.images[i]],\n labels: [mnist.test.labels[i]],\n self.is_training: test_training_accuracy})\n correct += corr\n\n predictions.append(pred[0])\n\n print(\"200 Predictions:\", predictions)\n print(\"Accuracy on 200 samples:\", correct/200)\n",
"There are quite a few comments in the code, so those should answer most of your questions. However, let's take a look at the most important lines.\nWe add batch normalization to layers inside the fully_connected function. Here are some important points about that code:\n1. Layers with batch normalization do not include a bias term.\n2. We use TensorFlow's tf.layers.batch_normalization function to handle the math. (We show lower-level ways to do this later in the notebook.)\n3. We tell tf.layers.batch_normalization whether or not the network is training. This is an important step we'll talk about later.\n4. We add the normalization before calling the activation function.\nIn addition to that code, the training step is wrapped in the following with statement:\npython\nwith tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\nThis line actually works in conjunction with the training parameter we pass to tf.layers.batch_normalization. Without it, TensorFlow's batch normalization layer will not operate correctly during inference.\nFinally, whenever we train the network or perform inference, we use the feed_dict to set self.is_training to True or False, respectively, like in the following line:\npython\nsession.run(train_step, feed_dict={self.input_layer: batch_xs, \n labels: batch_ys, \n self.is_training: True})\nWe'll go into more details later, but next we want to show some experiments that use this code and test networks with and without batch normalization.\nBatch Normalization Demos<a id='demos'></a>\nThis section of the notebook trains various networks with and without batch normalization to demonstrate some of the benefits mentioned earlier. \nWe'd like to thank the author of this blog post Implementing Batch Normalization in TensorFlow. That post provided the idea of - and some of the code for - plotting the differences in accuracy during training, along with the idea for comparing multiple networks using the same initial weights.\nCode to support testing\nThe following two functions support the demos we run in the notebook. \nThe first function, plot_training_accuracies, simply plots the values found in the training_accuracies lists of the NeuralNet objects passed to it. If you look at the train function in NeuralNet, you'll see it that while it's training the network, it periodically measures validation accuracy and stores the results in that list. It does that just to support these plots.\nThe second function, train_and_test, creates two neural nets - one with and one without batch normalization. It then trains them both and tests them, calling plot_training_accuracies to plot how their accuracies changed over the course of training. The really imporant thing about this function is that it initializes the starting weights for the networks outside of the networks and then passes them in. This lets it train both networks from the exact same starting weights, which eliminates performance differences that might result from (un)lucky initial weights.",
"def plot_training_accuracies(*args, **kwargs):\n \"\"\"\n Displays a plot of the accuracies calculated during training to demonstrate\n how many iterations it took for the model(s) to converge.\n \n :param args: One or more NeuralNet objects\n You can supply any number of NeuralNet objects as unnamed arguments \n and this will display their training accuracies. Be sure to call `train` \n the NeuralNets before calling this function.\n :param kwargs: \n You can supply any named parameters here, but `batches_per_sample` is the only\n one we look for. It should match the `batches_per_sample` value you passed\n to the `train` function.\n \"\"\"\n fig, ax = plt.subplots()\n\n batches_per_sample = kwargs['batches_per_sample']\n \n for nn in args:\n ax.plot(range(0,len(nn.training_accuracies)*batches_per_sample,batches_per_sample),\n nn.training_accuracies, label=nn.name)\n ax.set_xlabel('Training steps')\n ax.set_ylabel('Accuracy')\n ax.set_title('Validation Accuracy During Training')\n ax.legend(loc=4)\n ax.set_ylim([0,1])\n plt.yticks(np.arange(0, 1.1, 0.1))\n plt.grid(True)\n plt.show()\n\ndef train_and_test(use_bad_weights, learning_rate, activation_fn, training_batches=50000, batches_per_sample=500):\n \"\"\"\n Creates two networks, one with and one without batch normalization, then trains them\n with identical starting weights, layers, batches, etc. Finally tests and plots their accuracies.\n \n :param use_bad_weights: bool\n If True, initialize the weights of both networks to wildly inappropriate weights;\n if False, use reasonable starting weights.\n :param learning_rate: float\n Learning rate used during gradient descent.\n :param activation_fn: Callable\n The function used for the output of each hidden layer. The network will use the same\n activation function on every hidden layer and no activate function on the output layer.\n e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.\n :param training_batches: (default 50000)\n Number of batches to train.\n :param batches_per_sample: (default 500)\n How many batches to train before sampling the validation accuracy.\n \"\"\"\n # Use identical starting weights for each network to eliminate differences in\n # weight initialization as a cause for differences seen in training performance\n #\n # Note: The networks will use these weights to define the number of and shapes of\n # its layers. The original batch normalization paper used 3 hidden layers\n # with 100 nodes in each, followed by a 10 node output layer. These values\n # build such a network, but feel free to experiment with different choices.\n # However, the input size should always be 784 and the final output should be 10.\n if use_bad_weights:\n # These weights should be horrible because they have such a large standard deviation\n weights = [np.random.normal(size=(784,100), scale=5.0).astype(np.float32),\n np.random.normal(size=(100,100), scale=5.0).astype(np.float32),\n np.random.normal(size=(100,100), scale=5.0).astype(np.float32),\n np.random.normal(size=(100,10), scale=5.0).astype(np.float32)\n ]\n else:\n # These weights should be good because they have such a small standard deviation\n weights = [np.random.normal(size=(784,100), scale=0.05).astype(np.float32),\n np.random.normal(size=(100,100), scale=0.05).astype(np.float32),\n np.random.normal(size=(100,100), scale=0.05).astype(np.float32),\n np.random.normal(size=(100,10), scale=0.05).astype(np.float32)\n ]\n\n # Just to make sure the TensorFlow's default graph is empty before we start another\n # test, because we don't bother using different graphs or scoping and naming \n # elements carefully in this sample code.\n tf.reset_default_graph()\n\n # build two versions of same network, 1 without and 1 with batch normalization\n nn = NeuralNet(weights, activation_fn, False)\n bn = NeuralNet(weights, activation_fn, True)\n \n # train and test the two models\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n\n nn.train(sess, learning_rate, training_batches, batches_per_sample)\n bn.train(sess, learning_rate, training_batches, batches_per_sample)\n \n nn.test(sess)\n bn.test(sess)\n \n # Display a graph of how validation accuracies changed during training\n # so we can compare how the models trained and when they converged\n plot_training_accuracies(nn, bn, batches_per_sample=batches_per_sample)\n",
"Comparisons between identical networks, with and without batch normalization\nThe next series of cells train networks with various settings to show the differences with and without batch normalization. They are meant to clearly demonstrate the effects of batch normalization. We include a deeper discussion of batch normalization later in the notebook.\nThe following creates two networks using a ReLU activation function, a learning rate of 0.01, and reasonable starting weights.",
"train_and_test(False, 0.01, tf.nn.relu)",
"As expected, both networks train well and eventually reach similar test accuracies. However, notice that the model with batch normalization converges slightly faster than the other network, reaching accuracies over 90% almost immediately and nearing its max acuracy in 10 or 15 thousand iterations. The other network takes about 3 thousand iterations to reach 90% and doesn't near its best accuracy until 30 thousand or more iterations.\nIf you look at the raw speed, you can see that without batch normalization we were computing over 1100 batches per second, whereas with batch normalization that goes down to just over 500. However, batch normalization allows us to perform fewer iterations and converge in less time over all. (We only trained for 50 thousand batches here so we could plot the comparison.)\nThe following creates two networks with the same hyperparameters used in the previous example, but only trains for 2000 iterations.",
"train_and_test(False, 0.01, tf.nn.relu, 2000, 50)",
"As you can see, using batch normalization produces a model with over 95% accuracy in only 2000 batches, and it was above 90% at somewhere around 500 batches. Without batch normalization, the model takes 1750 iterations just to hit 80% – the network with batch normalization hits that mark after around 200 iterations! (Note: if you run the code yourself, you'll see slightly different results each time because the starting weights - while the same for each model - are different for each run.)\nIn the above example, you should also notice that the networks trained fewer batches per second then what you saw in the previous example. That's because much of the time we're tracking is actually spent periodically performing inference to collect data for the plots. In this example we perform that inference every 50 batches instead of every 500, so generating the plot for this example requires 10 times the overhead for the same 2000 iterations.\nThe following creates two networks using a sigmoid activation function, a learning rate of 0.01, and reasonable starting weights.",
"train_and_test(False, 0.01, tf.nn.sigmoid)",
"With the number of layers we're using and this small learning rate, using a sigmoid activation function takes a long time to start learning. It eventually starts making progress, but it took over 45 thousand batches just to get over 80% accuracy. Using batch normalization gets to 90% in around one thousand batches. \nThe following creates two networks using a ReLU activation function, a learning rate of 1, and reasonable starting weights.",
"train_and_test(False, 1, tf.nn.relu)",
"Now we're using ReLUs again, but with a larger learning rate. The plot shows how training started out pretty normally, with the network with batch normalization starting out faster than the other. But the higher learning rate bounces the accuracy around a bit more, and at some point the accuracy in the network without batch normalization just completely crashes. It's likely that too many ReLUs died off at this point because of the high learning rate.\nThe next cell shows the same test again. The network with batch normalization performs the same way, and the other suffers from the same problem again, but it manages to train longer before it happens.",
"train_and_test(False, 1, tf.nn.relu)",
"In both of the previous examples, the network with batch normalization manages to gets over 98% accuracy, and get near that result almost immediately. The higher learning rate allows the network to train extremely fast.\nThe following creates two networks using a sigmoid activation function, a learning rate of 1, and reasonable starting weights.",
"train_and_test(False, 1, tf.nn.sigmoid)",
"In this example, we switched to a sigmoid activation function. It appears to hande the higher learning rate well, with both networks achieving high accuracy.\nThe cell below shows a similar pair of networks trained for only 2000 iterations.",
"train_and_test(False, 1, tf.nn.sigmoid, 2000, 50)",
"As you can see, even though these parameters work well for both networks, the one with batch normalization gets over 90% in 400 or so batches, whereas the other takes over 1700. When training larger networks, these sorts of differences become more pronounced.\nThe following creates two networks using a ReLU activation function, a learning rate of 2, and reasonable starting weights.",
"train_and_test(False, 2, tf.nn.relu)",
"With this very large learning rate, the network with batch normalization trains fine and almost immediately manages 98% accuracy. However, the network without normalization doesn't learn at all.\nThe following creates two networks using a sigmoid activation function, a learning rate of 2, and reasonable starting weights.",
"train_and_test(False, 2, tf.nn.sigmoid)",
"Once again, using a sigmoid activation function with the larger learning rate works well both with and without batch normalization.\nHowever, look at the plot below where we train models with the same parameters but only 2000 iterations. As usual, batch normalization lets it train faster.",
"train_and_test(False, 2, tf.nn.sigmoid, 2000, 50)",
"In the rest of the examples, we use really bad starting weights. That is, normally we would use very small values close to zero. However, in these examples we choose randome values with a standard deviation of 5. If you were really training a neural network, you would not want to do this. But these examples demonstrate how batch normalization makes your network much more resilient. \nThe following creates two networks using a ReLU activation function, a learning rate of 0.01, and bad starting weights.",
"train_and_test(True, 0.01, tf.nn.relu)",
"As the plot shows, without batch normalization the network never learns anything at all. But with batch normalization, it actually learns pretty well and gets to almost 80% accuracy. The starting weights obviously hurt the network, but you can see how well batch normalization does in overcoming them. \nThe following creates two networks using a sigmoid activation function, a learning rate of 0.01, and bad starting weights.",
"train_and_test(True, 0.01, tf.nn.sigmoid)",
"Using a sigmoid activation function works better than the ReLU in the previous example, but without batch normalization it would take a tremendously long time to train the network, if it ever trained at all. \nThe following creates two networks using a ReLU activation function, a learning rate of 1, and bad starting weights.<a id=\"successful_example_lr_1\"></a>",
"train_and_test(True, 1, tf.nn.relu)",
"The higher learning rate used here allows the network with batch normalization to surpass 90% in about 30 thousand batches. The network without it never gets anywhere.\nThe following creates two networks using a sigmoid activation function, a learning rate of 1, and bad starting weights.",
"train_and_test(True, 1, tf.nn.sigmoid)",
"Using sigmoid works better than ReLUs for this higher learning rate. However, you can see that without batch normalization, the network takes a long time tro train, bounces around a lot, and spends a long time stuck at 90%. The network with batch normalization trains much more quickly, seems to be more stable, and achieves a higher accuracy.\nThe following creates two networks using a ReLU activation function, a learning rate of 2, and bad starting weights.<a id=\"successful_example_lr_2\"></a>",
"train_and_test(True, 2, tf.nn.relu)",
"We've already seen that ReLUs do not do as well as sigmoids with higher learning rates, and here we are using an extremely high rate. As expected, without batch normalization the network doesn't learn at all. But with batch normalization, it eventually achieves 90% accuracy. Notice, though, how its accuracy bounces around wildly during training - that's because the learning rate is really much too high, so the fact that this worked at all is a bit of luck.\nThe following creates two networks using a sigmoid activation function, a learning rate of 2, and bad starting weights.",
"train_and_test(True, 2, tf.nn.sigmoid)",
"In this case, the network with batch normalization trained faster and reached a higher accuracy. Meanwhile, the high learning rate makes the network without normalization bounce around erratically and have trouble getting past 90%.\nFull Disclosure: Batch Normalization Doesn't Fix Everything\nBatch normalization isn't magic and it doesn't work every time. Weights are still randomly initialized and batches are chosen at random during training, so you never know exactly how training will go. Even for these tests, where we use the same initial weights for both networks, we still get different weights each time we run.\nThis section includes two examples that show runs when batch normalization did not help at all.\nThe following creates two networks using a ReLU activation function, a learning rate of 1, and bad starting weights.",
"train_and_test(True, 1, tf.nn.relu)",
"When we used these same parameters earlier, we saw the network with batch normalization reach 92% validation accuracy. This time we used different starting weights, initialized using the same standard deviation as before, and the network doesn't learn at all. (Remember, an accuracy around 10% is what the network gets if it just guesses the same value all the time.)\nThe following creates two networks using a ReLU activation function, a learning rate of 2, and bad starting weights.",
"train_and_test(True, 2, tf.nn.relu)",
"When we trained with these parameters and batch normalization earlier, we reached 90% validation accuracy. However, this time the network almost starts to make some progress in the beginning, but it quickly breaks down and stops learning. \nNote: Both of the above examples use extremely bad starting weights, along with learning rates that are too high. While we've shown batch normalization can overcome bad values, we don't mean to encourage actually using them. The examples in this notebook are meant to show that batch normalization can help your networks train better. But these last two examples should remind you that you still want to try to use good network design choices and reasonable starting weights. It should also remind you that the results of each attempt to train a network are a bit random, even when using otherwise identical architectures.\nBatch Normalization: A Detailed Look<a id='implementation_2'></a>\nThe layer created by tf.layers.batch_normalization handles all the details of implementing batch normalization. Many students will be fine just using that and won't care about what's happening at the lower levels. However, some students may want to explore the details, so here is a short explanation of what's really happening, starting with the equations you're likely to come across if you ever read about batch normalization. \nIn order to normalize the values, we first need to find the average value for the batch. If you look at the code, you can see that this is not the average value of the batch inputs, but the average value coming out of any particular layer before we pass it through its non-linear activation function and then feed it as an input to the next layer.\nWe represent the average as $\\mu_B$, which is simply the sum of all of the values $x_i$ divided by the number of values, $m$ \n$$\n\\mu_B \\leftarrow \\frac{1}{m}\\sum_{i=1}^m x_i\n$$\nWe then need to calculate the variance, or mean squared deviation, represented as $\\sigma_{B}^{2}$. If you aren't familiar with statistics, that simply means for each value $x_i$, we subtract the average value (calculated earlier as $\\mu_B$), which gives us what's called the \"deviation\" for that value. We square the result to get the squared deviation. Sum up the results of doing that for each of the values, then divide by the number of values, again $m$, to get the average, or mean, squared deviation.\n$$\n\\sigma_{B}^{2} \\leftarrow \\frac{1}{m}\\sum_{i=1}^m (x_i - \\mu_B)^2\n$$\nOnce we have the mean and variance, we can use them to normalize the values with the following equation. For each value, it subtracts the mean and divides by the (almost) standard deviation. (You've probably heard of standard deviation many times, but if you have not studied statistics you might not know that the standard deviation is actually the square root of the mean squared deviation.)\n$$\n\\hat{x_i} \\leftarrow \\frac{x_i - \\mu_B}{\\sqrt{\\sigma_{B}^{2} + \\epsilon}}\n$$\nAbove, we said \"(almost) standard deviation\". That's because the real standard deviation for the batch is calculated by $\\sqrt{\\sigma_{B}^{2}}$, but the above formula adds the term epsilon, $\\epsilon$, before taking the square root. The epsilon can be any small, positive constant - in our code we use the value 0.001. It is there partially to make sure we don't try to divide by zero, but it also acts to increase the variance slightly for each batch. \nWhy increase the variance? Statistically, this makes sense because even though we are normalizing one batch at a time, we are also trying to estimate the population distribution – the total training set, which itself an estimate of the larger population of inputs your network wants to handle. The variance of a population is higher than the variance for any sample taken from that population, so increasing the variance a little bit for each batch helps take that into account. \nAt this point, we have a normalized value, represented as $\\hat{x_i}$. But rather than use it directly, we multiply it by a gamma value, $\\gamma$, and then add a beta value, $\\beta$. Both $\\gamma$ and $\\beta$ are learnable parameters of the network and serve to scale and shift the normalized value, respectively. Because they are learnable just like weights, they give your network some extra knobs to tweak during training to help it learn the function it is trying to approximate. \n$$\ny_i \\leftarrow \\gamma \\hat{x_i} + \\beta\n$$\nWe now have the final batch-normalized output of our layer, which we would then pass to a non-linear activation function like sigmoid, tanh, ReLU, Leaky ReLU, etc. In the original batch normalization paper (linked in the beginning of this notebook), they mention that there might be cases when you'd want to perform the batch normalization after the non-linearity instead of before, but it is difficult to find any uses like that in practice.\nIn NeuralNet's implementation of fully_connected, all of this math is hidden inside the following line, where linear_output serves as the $x_i$ from the equations:\npython\nbatch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training)\nThe next section shows you how to implement the math directly. \nBatch normalization without the tf.layers package\nOur implementation of batch normalization in NeuralNet uses the high-level abstraction tf.layers.batch_normalization, found in TensorFlow's tf.layers package.\nHowever, if you would like to implement batch normalization at a lower level, the following code shows you how.\nIt uses tf.nn.batch_normalization from TensorFlow's neural net (nn) package.\n1) You can replace the fully_connected function in the NeuralNet class with the below code and everything in NeuralNet will still work like it did before.",
"def fully_connected(self, layer_in, initial_weights, activation_fn=None):\n \"\"\"\n Creates a standard, fully connected layer. Its number of inputs and outputs will be\n defined by the shape of `initial_weights`, and its starting weight values will be\n taken directly from that same parameter. If `self.use_batch_norm` is True, this\n layer will include batch normalization, otherwise it will not. \n \n :param layer_in: Tensor\n The Tensor that feeds into this layer. It's either the input to the network or the output\n of a previous layer.\n :param initial_weights: NumPy array or Tensor\n Initial values for this layer's weights. The shape defines the number of nodes in the layer.\n e.g. Passing in 3 matrix of shape (784, 256) would create a layer with 784 inputs and 256 \n outputs. \n :param activation_fn: Callable or None (default None)\n The non-linearity used for the output of the layer. If None, this layer will not include \n batch normalization, regardless of the value of `self.use_batch_norm`. \n e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.\n \"\"\"\n if self.use_batch_norm and activation_fn:\n # Batch normalization uses weights as usual, but does NOT add a bias term. This is because \n # its calculations include gamma and beta variables that make the bias term unnecessary.\n weights = tf.Variable(initial_weights)\n linear_output = tf.matmul(layer_in, weights)\n\n num_out_nodes = initial_weights.shape[-1]\n\n # Batch normalization adds additional trainable variables: \n # gamma (for scaling) and beta (for shifting).\n gamma = tf.Variable(tf.ones([num_out_nodes]))\n beta = tf.Variable(tf.zeros([num_out_nodes]))\n\n # These variables will store the mean and variance for this layer over the entire training set,\n # which we assume represents the general population distribution.\n # By setting `trainable=False`, we tell TensorFlow not to modify these variables during\n # back propagation. Instead, we will assign values to these variables ourselves. \n pop_mean = tf.Variable(tf.zeros([num_out_nodes]), trainable=False)\n pop_variance = tf.Variable(tf.ones([num_out_nodes]), trainable=False)\n\n # Batch normalization requires a small constant epsilon, used to ensure we don't divide by zero.\n # This is the default value TensorFlow uses.\n epsilon = 1e-3\n\n def batch_norm_training():\n # Calculate the mean and variance for the data coming out of this layer's linear-combination step.\n # The [0] defines an array of axes to calculate over.\n batch_mean, batch_variance = tf.nn.moments(linear_output, [0])\n\n # Calculate a moving average of the training data's mean and variance while training.\n # These will be used during inference.\n # Decay should be some number less than 1. tf.layers.batch_normalization uses the parameter\n # \"momentum\" to accomplish this and defaults it to 0.99\n decay = 0.99\n train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))\n train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))\n\n # The 'tf.control_dependencies' context tells TensorFlow it must calculate 'train_mean' \n # and 'train_variance' before it calculates the 'tf.nn.batch_normalization' layer.\n # This is necessary because the those two operations are not actually in the graph\n # connecting the linear_output and batch_normalization layers, \n # so TensorFlow would otherwise just skip them.\n with tf.control_dependencies([train_mean, train_variance]):\n return tf.nn.batch_normalization(linear_output, batch_mean, batch_variance, beta, gamma, epsilon)\n \n def batch_norm_inference():\n # During inference, use the our estimated population mean and variance to normalize the layer\n return tf.nn.batch_normalization(linear_output, pop_mean, pop_variance, beta, gamma, epsilon)\n\n # Use `tf.cond` as a sort of if-check. When self.is_training is True, TensorFlow will execute \n # the operation returned from `batch_norm_training`; otherwise it will execute the graph\n # operation returned from `batch_norm_inference`.\n batch_normalized_output = tf.cond(self.is_training, batch_norm_training, batch_norm_inference)\n \n # Pass the batch-normalized layer output through the activation function.\n # The literature states there may be cases where you want to perform the batch normalization *after*\n # the activation function, but it is difficult to find any uses of that in practice.\n return activation_fn(batch_normalized_output)\n else:\n # When not using batch normalization, create a standard layer that multiplies\n # the inputs and weights, adds a bias, and optionally passes the result \n # through an activation function. \n weights = tf.Variable(initial_weights)\n biases = tf.Variable(tf.zeros([initial_weights.shape[-1]]))\n linear_output = tf.add(tf.matmul(layer_in, weights), biases)\n return linear_output if not activation_fn else activation_fn(linear_output)\n",
"This version of fully_connected is much longer than the original, but once again has extensive comments to help you understand it. Here are some important points:\n\nIt explicitly creates variables to store gamma, beta, and the population mean and variance. These were all handled for us in the previous version of the function.\nIt initializes gamma to one and beta to zero, so they start out having no effect in this calculation: $y_i \\leftarrow \\gamma \\hat{x_i} + \\beta$. However, during training the network learns the best values for these variables using back propagation, just like networks normally do with weights.\nUnlike gamma and beta, the variables for population mean and variance are marked as untrainable. That tells TensorFlow not to modify them during back propagation. Instead, the lines that call tf.assign are used to update these variables directly.\nTensorFlow won't automatically run the tf.assign operations during training because it only evaluates operations that are required based on the connections it finds in the graph. To get around that, we add this line: with tf.control_dependencies([train_mean, train_variance]): before we run the normalization operation. This tells TensorFlow it needs to run those operations before running anything inside the with block. \nThe actual normalization math is still mostly hidden from us, this time using tf.nn.batch_normalization.\ntf.nn.batch_normalization does not have a training parameter like tf.layers.batch_normalization did. However, we still need to handle training and inference differently, so we run different code in each case using the tf.cond operation.\nWe use the tf.nn.moments function to calculate the batch mean and variance.\n\n2) The current version of the train function in NeuralNet will work fine with this new version of fully_connected. However, it uses these lines to ensure population statistics are updated when using batch normalization: \npython\nif self.use_batch_norm:\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)\nelse:\n train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)\nOur new version of fully_connected handles updating the population statistics directly. That means you can also simplify your code by replacing the above if/else condition with just this line:\npython\ntrain_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)\n3) And just in case you want to implement every detail from scratch, you can replace this line in batch_norm_training:\npython\nreturn tf.nn.batch_normalization(linear_output, batch_mean, batch_variance, beta, gamma, epsilon)\nwith these lines:\npython\nnormalized_linear_output = (linear_output - batch_mean) / tf.sqrt(batch_variance + epsilon)\nreturn gamma * normalized_linear_output + beta\nAnd replace this line in batch_norm_inference:\npython\nreturn tf.nn.batch_normalization(linear_output, pop_mean, pop_variance, beta, gamma, epsilon)\nwith these lines:\npython\nnormalized_linear_output = (linear_output - pop_mean) / tf.sqrt(pop_variance + epsilon)\nreturn gamma * normalized_linear_output + beta\nAs you can see in each of the above substitutions, the two lines of replacement code simply implement the following two equations directly. The first line calculates the following equation, with linear_output representing $x_i$ and normalized_linear_output representing $\\hat{x_i}$: \n$$\n\\hat{x_i} \\leftarrow \\frac{x_i - \\mu_B}{\\sqrt{\\sigma_{B}^{2} + \\epsilon}}\n$$\nAnd the second line is a direct translation of the following equation:\n$$\ny_i \\leftarrow \\gamma \\hat{x_i} + \\beta\n$$\nWe still use the tf.nn.moments operation to implement the other two equations from earlier – the ones that calculate the batch mean and variance used in the normalization step. If you really wanted to do everything from scratch, you could replace that line, too, but we'll leave that to you. \nWhy the difference between training and inference?\nIn the original function that uses tf.layers.batch_normalization, we tell the layer whether or not the network is training by passing a value for its training parameter, like so:\npython\nbatch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training)\nAnd that forces us to provide a value for self.is_training in our feed_dict, like we do in this example from NeuralNet's train function:\npython\nsession.run(train_step, feed_dict={self.input_layer: batch_xs, \n labels: batch_ys, \n self.is_training: True})\nIf you looked at the low level implementation, you probably noticed that, just like with tf.layers.batch_normalization, we need to do slightly different things during training and inference. But why is that?\nFirst, let's look at what happens when we don't. The following function is similar to train_and_test from earlier, but this time we are only testing one network and instead of plotting its accuracy, we perform 200 predictions on test inputs, 1 input at at time. We can use the test_training_accuracy parameter to test the network in training or inference modes (the equivalent of passing True or False to the feed_dict for is_training).",
"def batch_norm_test(test_training_accuracy):\n \"\"\"\n :param test_training_accuracy: bool\n If True, perform inference with batch normalization using batch mean and variance;\n if False, perform inference with batch normalization using estimated population mean and variance.\n \"\"\"\n\n weights = [np.random.normal(size=(784,100), scale=0.05).astype(np.float32),\n np.random.normal(size=(100,100), scale=0.05).astype(np.float32),\n np.random.normal(size=(100,100), scale=0.05).astype(np.float32),\n np.random.normal(size=(100,10), scale=0.05).astype(np.float32)\n ]\n\n tf.reset_default_graph()\n\n # Train the model\n bn = NeuralNet(weights, tf.nn.relu, True)\n \n # First train the network\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n\n bn.train(sess, 0.01, 2000, 2000)\n\n bn.test(sess, test_training_accuracy=test_training_accuracy, include_individual_predictions=True)",
"In the following cell, we pass True for test_training_accuracy, which performs the same batch normalization that we normally perform during training.",
"batch_norm_test(True)",
"As you can see, the network guessed the same value every time! But why? Because during training, a network with batch normalization adjusts the values at each layer based on the mean and variance of that batch. The \"batches\" we are using for these predictions have a single input each time, so their values are the means, and their variances will always be 0. That means the network will normalize the values at any layer to zero. (Review the equations from before to see why a value that is equal to the mean would always normalize to zero.) So we end up with the same result for every input we give the network, because its the value the network produces when it applies its learned weights to zeros at every layer. \nNote: If you re-run that cell, you might get a different value from what we showed. That's because the specific weights the network learns will be different every time. But whatever value it is, it should be the same for all 200 predictions.\nTo overcome this problem, the network does not just normalize the batch at each layer. It also maintains an estimate of the mean and variance for the entire population. So when we perform inference, instead of letting it \"normalize\" all the values using their own means and variance, it uses the estimates of the population mean and variance that it calculated while training. \nSo in the following example, we pass False for test_training_accuracy, which tells the network that we it want to perform inference with the population statistics it calculates during training.",
"batch_norm_test(False)",
"As you can see, now that we're using the estimated population mean and variance, we get a 97% accuracy. That means it guessed correctly on 194 of the 200 samples – not too bad for something that trained in under 4 seconds. :)\nConsiderations for other network types\nThis notebook demonstrates batch normalization in a standard neural network with fully connected layers. You can also use batch normalization in other types of networks, but there are some special considerations.\nConvNets\nConvolution layers consist of multiple feature maps. (Remember, the depth of a convolutional layer refers to its number of feature maps.) And the weights for each feature map are shared across all the inputs that feed into the layer. Because of these differences, batch normalizaing convolutional layers requires batch/population mean and variance per feature map rather than per node in the layer.\nWhen using tf.layers.batch_normalization, be sure to pay attention to the order of your convolutionlal dimensions.\nSpecifically, you may want to set a different value for the axis parameter if your layers have their channels first instead of last. \nIn our low-level implementations, we used the following line to calculate the batch mean and variance:\npython\nbatch_mean, batch_variance = tf.nn.moments(linear_output, [0])\nIf we were dealing with a convolutional layer, we would calculate the mean and variance with a line like this instead:\npython\nbatch_mean, batch_variance = tf.nn.moments(conv_layer, [0,1,2], keep_dims=False)\nThe second parameter, [0,1,2], tells TensorFlow to calculate the batch mean and variance over each feature map. (The three axes are the batch, height, and width.) And setting keep_dims to False tells tf.nn.moments not to return values with the same size as the inputs. Specifically, it ensures we get one mean/variance pair per feature map.\nRNNs\nBatch normalization can work with recurrent neural networks, too, as shown in the 2016 paper Recurrent Batch Normalization. It's a bit more work to implement, but basically involves calculating the means and variances per time step instead of per layer. You can find an example where someone extended tf.nn.rnn_cell.RNNCell to include batch normalization in this GitHub repo."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
alfkjartan/nvgimu
|
notebooks/Get started.ipynb
|
gpl-3.0
|
[
"Getting started with the analysis of nvg data\nThis notebook assumes that data exists in a database in the hdf5 format. For instructions how to set up the database with data see [../readme.md].\nThe approximate orientation of the IMUs\nThe IMUs has its local z-axis pointing out of the box, and its local x-axis pointing in the long direction. The IMUs were strapped to the body with the positive local x-direction pointing downwards along the main axis of the segment. The z-axis was approximately pointing outwards from the body, normal to the sagittal plane. \nImport modules",
"import numpy as np\nimport matplotlib.pyplot as plt\nimport nvg.ximu.ximudata as ximudata\n%matplotlib notebook",
"Load the database",
"reload(ximudata)\ndbfilename = \"/home/kjartan/Dropbox/Public/nvg201209.hdf5\"\ndb = ximudata.NVGData(dbfilename);",
"Explore contents of the database file",
"dbfile = db.hdfFile;\nprint \"Subjects: \", dbfile.keys()\nprint \"Trials: \", dbfile['S5'].keys()\nprint \"IMUs: \", dbfile['S5/B'].keys()\nprint \"Attributes of example trial\", dbfile['S5/B'].attrs.keys()\nprint \"Shape of example IMU data entry\", dbfile['S5/B/N'].shape\n",
"The content of the raw IMU file\nThe columns of the IMU data contain: \n<ol start=\"0\">\n <li>Packet number</li>\n <li>Gyroscope X (deg/s)</li>\n <li>Gyroscope Y (deg/s)</li>\n <li>Gyroscope Z (deg/s)</li>\n <li>Accelerometer X (g)</li>\n <li>Accelerometer Y (g)</li>\n <li>Accelerometer Z (g)</li>\n <li>Magnetometer X (G)</li>\n <li>Magnetometer Y (G)</li>\n <li>Magnetometer Z (G)</li>\n</ol>\n\nPlot example data",
"db.plot_imu_data(\"S12\", \"D\", \"RA\")",
"Implemented analysis methods",
"print [s for s in dir(db) if s.startswith(\"get\")]",
"Try an analysis",
"db.get_angle_to_vertical_markers(subject=\"S4\")\n\nres = db.apply_to_all_trials(db.get_RoM_angle_to_vertical, {'imu':'LH'},\n subjlist=['S4', 'S6'], triallist=['B', 'N'])\n\nres = db.apply_to_all_trials(db.get_angle_to_vertical_markers, {'imu':'LH'},\n subjlist=['S4', 'S6'], triallist=['B', 'N'])"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
jinntrance/MOOC
|
coursera/ml-clustering-and-retrieval/assignments/4_em-with-text-data_blank.ipynb
|
cc0-1.0
|
[
"Fitting a diagonal covariance Gaussian mixture model to text data\nIn a previous assignment, we explored k-means clustering for a high-dimensional Wikipedia dataset. We can also model this data with a mixture of Gaussians, though with increasing dimension we run into two important issues associated with using a full covariance matrix for each component.\n * Computational cost becomes prohibitive in high dimensions: score calculations have complexity cubic in the number of dimensions M if the Gaussian has a full covariance matrix.\n * A model with many parameters require more data: bserve that a full covariance matrix for an M-dimensional Gaussian will have M(M+1)/2 parameters to fit. With the number of parameters growing roughly as the square of the dimension, it may quickly become impossible to find a sufficient amount of data to make good inferences.\nBoth of these issues are avoided if we require the covariance matrix of each component to be diagonal, as then it has only M parameters to fit and the score computation decomposes into M univariate score calculations. Recall from the lecture that the M-step for the full covariance is:\n\\begin{align}\n\\hat{\\Sigma}k &= \\frac{1}{N_k^{soft}} \\sum{i=1}^N r_{ik} (x_i-\\hat{\\mu}_k)(x_i - \\hat{\\mu}_k)^T\n\\end{align}\nNote that this is a square matrix with M rows and M columns, and the above equation implies that the (v, w) element is computed by\n\\begin{align}\n\\hat{\\Sigma}{k, v, w} &= \\frac{1}{N_k^{soft}} \\sum{i=1}^N r_{ik} (x_{iv}-\\hat{\\mu}{kv})(x{iw} - \\hat{\\mu}_{kw})\n\\end{align}\nWhen we assume that this is a diagonal matrix, then non-diagonal elements are assumed to be zero and we only need to compute each of the M elements along the diagonal independently using the following equation. \n\\begin{align}\n\\hat{\\sigma}^2_{k, v} &= \\hat{\\Sigma}{k, v, v} \\\n&= \\frac{1}{N_k^{soft}} \\sum{i=1}^N r_{ik} (x_{iv}-\\hat{\\mu}_{kv})^2\n\\end{align}\nIn this section, we will use an EM implementation to fit a Gaussian mixture model with diagonal covariances to a subset of the Wikipedia dataset. The implementation uses the above equation to compute each variance term. \nWe'll begin by importing the dataset and coming up with a useful representation for each article. After running our algorithm on the data, we will explore the output to see whether we can give a meaningful interpretation to the fitted parameters in our model.\nNote to Amazon EC2 users: To conserve memory, make sure to stop all the other notebooks before running this notebook.\nImport necessary packages\nThe following code block will check if you have the correct version of GraphLab Create. Any version later than 1.8.5 will do. To upgrade, read this page.",
"import graphlab\n\n'''Check GraphLab Create version'''\nfrom distutils.version import StrictVersion\nassert (StrictVersion(graphlab.version) >= StrictVersion('1.8.5')), 'GraphLab Create must be version 1.8.5 or later.'",
"We also have a Python file containing implementations for several functions that will be used during the course of this assignment.",
"from em_utilities import *",
"Load Wikipedia data and extract TF-IDF features\nLoad Wikipedia data and transform each of the first 5000 document into a TF-IDF representation.",
"wiki = graphlab.SFrame('people_wiki.gl/').head(5000)\nwiki['tf_idf'] = graphlab.text_analytics.tf_idf(wiki['text'])",
"Using a utility we provide, we will create a sparse matrix representation of the documents. This is the same utility function you used during the previous assignment on k-means with text data.",
"tf_idf, map_index_to_word = sframe_to_scipy(wiki, 'tf_idf')",
"As in the previous assignment, we will normalize each document's TF-IDF vector to be a unit vector.",
"tf_idf = normalize(tf_idf)",
"We can check that the length (Euclidean norm) of each row is now 1.0, as expected.",
"for i in range(5):\n doc = tf_idf[i]\n print(np.linalg.norm(doc.todense()))",
"EM in high dimensions\nEM for high-dimensional data requires some special treatment:\n * E step and M step must be vectorized as much as possible, as explicit loops are dreadfully slow in Python.\n * All operations must be cast in terms of sparse matrix operations, to take advantage of computational savings enabled by sparsity of data.\n * Initially, some words may be entirely absent from a cluster, causing the M step to produce zero mean and variance for those words. This means any data point with one of those words will have 0 probability of being assigned to that cluster since the cluster allows for no variability (0 variance) around that count being 0 (0 mean). Since there is a small chance for those words to later appear in the cluster, we instead assign a small positive variance (~1e-10). Doing so also prevents numerical overflow.\nWe provide the complete implementation for you in the file em_utilities.py. For those who are interested, you can read through the code to see how the sparse matrix implementation differs from the previous assignment. \nYou are expected to answer some quiz questions using the results of clustering.\nInitializing mean parameters using k-means\nRecall from the lectures that EM for Gaussian mixtures is very sensitive to the choice of initial means. With a bad initial set of means, EM may produce clusters that span a large area and are mostly overlapping. To eliminate such bad outcomes, we first produce a suitable set of initial means by using the cluster centers from running k-means. That is, we first run k-means and then take the final set of means from the converged solution as the initial means in our EM algorithm.",
"from sklearn.cluster import KMeans\n\nnp.random.seed(5)\nnum_clusters = 25\n\n# Use scikit-learn's k-means to simplify workflow\nkmeans_model = KMeans(n_clusters=num_clusters, n_init=5, max_iter=400, random_state=1, n_jobs=-1)\nkmeans_model.fit(tf_idf)\ncentroids, cluster_assignment = kmeans_model.cluster_centers_, kmeans_model.labels_\n\nmeans = [centroid for centroid in centroids]\n\n(cluster_assignment == 2).sum()",
"Initializing cluster weights\nWe will initialize each cluster weight to be the proportion of documents assigned to that cluster by k-means above.",
"num_docs = tf_idf.shape[0]\nweights = []\nfor i in xrange(num_clusters):\n # Compute the number of data points assigned to cluster i:\n num_assigned = (cluster_assignment == i).sum() # YOUR CODE HERE\n w = float(num_assigned) / num_docs\n weights.append(w)",
"Initializing covariances\nTo initialize our covariance parameters, we compute $\\hat{\\sigma}{k, j}^2 = \\sum{i=1}^{N}(x_{i,j} - \\hat{\\mu}_{k, j})^2$ for each feature $j$. For features with really tiny variances, we assign 1e-8 instead to prevent numerical instability. We do this computation in a vectorized fashion in the following code block.",
"covs = []\nfor i in xrange(num_clusters):\n member_rows = tf_idf[cluster_assignment==i]\n cov = (member_rows.power(2) - 2*member_rows.dot(diag(means[i]))).sum(axis=0).A1 / member_rows.shape[0] \\\n + means[i]**2\n cov[cov < 1e-8] = 1e-8\n covs.append(cov)",
"Running EM\nNow that we have initialized all of our parameters, run EM.",
"out = EM_for_high_dimension(tf_idf, means, covs, weights, cov_smoothing=1e-10)\n\nout['loglik']",
"Interpret clustering results\nIn contrast to k-means, EM is able to explicitly model clusters of varying sizes and proportions. The relative magnitude of variances in the word dimensions tell us much about the nature of the clusters.\nWrite yourself a cluster visualizer as follows. Examining each cluster's mean vector, list the 5 words with the largest mean values (5 most common words in the cluster). For each word, also include the associated variance parameter (diagonal element of the covariance matrix). \nA sample output may be:\n```\n==========================================================\nCluster 0: Largest mean parameters in cluster \nWord Mean Variance \nfootball 1.08e-01 8.64e-03\nseason 5.80e-02 2.93e-03\nclub 4.48e-02 1.99e-03\nleague 3.94e-02 1.08e-03\nplayed 3.83e-02 8.45e-04\n...\n```",
"# Fill in the blanks\ndef visualize_EM_clusters(tf_idf, means, covs, map_index_to_word):\n print('')\n print('==========================================================')\n\n num_clusters = len(means)\n for c in xrange(num_clusters):\n print('Cluster {0:d}: Largest mean parameters in cluster '.format(c))\n print('\\n{0: <12}{1: <12}{2: <12}'.format('Word', 'Mean', 'Variance'))\n \n # The k'th element of sorted_word_ids should be the index of the word \n # that has the k'th-largest value in the cluster mean. Hint: Use np.argsort().\n sorted_word_ids = np.argsort(means[c])[::-1] # make it descenting\n\n for i in sorted_word_ids[:5]:\n print '{0: <12}{1:<10.2e}{2:10.2e}'.format(map_index_to_word['category'][i], \n means[c][i],\n covs[c][i])\n print '\\n=========================================================='\n\n'''By EM'''\nvisualize_EM_clusters(tf_idf, out['means'], out['covs'], map_index_to_word)",
"Quiz Question. Select all the topics that have a cluster in the model created above. [multiple choice]\nComparing to random initialization\nCreate variables for randomly initializing the EM algorithm. Complete the following code block.",
"np.random.seed(5) # See the note below to see why we set seed=5.\nnum_clusters = len(means)\nnum_docs, num_words = tf_idf.shape\n\nrandom_means = []\nrandom_covs = []\nrandom_weights = []\n\nfor k in range(num_clusters):\n \n # Create a numpy array of length num_words with random normally distributed values.\n # Use the standard univariate normal distribution (mean 0, variance 1).\n # YOUR CODE HERE\n mean = np.random.standard_normal(num_words)\n \n # Create a numpy array of length num_words with random values uniformly distributed between 1 and 5.\n # YOUR CODE HERE\n cov = np.random.uniform(1, 5, num_words)\n\n # Initially give each cluster equal weight.\n # YOUR CODE HERE\n weight = 1.0 / num_clusters\n \n random_means.append(mean)\n random_covs.append(cov)\n random_weights.append(weight)\n ",
"Quiz Question: Try fitting EM with the random initial parameters you created above. (Use cov_smoothing=1e-5.) Store the result to out_random_init. What is the final loglikelihood that the algorithm converges to?",
"out_random_init = EM_for_high_dimension(tf_idf, random_means, random_covs, random_weights, cov_smoothing=1e-5)\n\nout_random_init['loglik']",
"Quiz Question: Is the final loglikelihood larger or smaller than the final loglikelihood we obtained above when initializing EM with the results from running k-means?",
"out_random_init['loglik'][-1] > out['loglik'][-1]",
"Quiz Question: For the above model, out_random_init, use the visualize_EM_clusters method you created above. Are the clusters more or less interpretable than the ones found after initializing using k-means?",
"# YOUR CODE HERE. Use visualize_EM_clusters, which will require you to pass in tf_idf and map_index_to_word.\nvisualize_EM_clusters(tf_idf, out_random_init['means'], out_random_init['covs'], map_index_to_word)",
"Note: Random initialization may sometimes produce a superior fit than k-means initialization. We do not claim that random initialization is always worse. However, this section does illustrate that random initialization often produces much worse clustering than k-means counterpart. This is the reason why we provide the particular random seed (np.random.seed(5)).\nTakeaway\nIn this assignment we were able to apply the EM algorithm to a mixture of Gaussians model of text data. This was made possible by modifying the model to assume a diagonal covariance for each cluster, and by modifying the implementation to use a sparse matrix representation. In the second part you explored the role of k-means initialization on the convergence of the model as well as the interpretability of the clusters."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
hunterherrin/phys202-2015-work
|
assignments/midterm/InteractEx06.ipynb
|
mit
|
[
"Interact Exercise 6\nImports\nPut the standard imports for Matplotlib, Numpy and the IPython widgets in the following cell.",
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom IPython.display import Image\nfrom IPython.html.widgets import interact, interactive, fixed",
"Exploring the Fermi distribution\nIn quantum statistics, the Fermi-Dirac distribution is related to the probability that a particle will be in a quantum state with energy $\\epsilon$. The equation for the distribution $F(\\epsilon)$ is:",
"Image('fermidist.png')",
"In this equation:\n\n$\\epsilon$ is the single particle energy.\n$\\mu$ is the chemical potential, which is related to the total number of particles.\n$k$ is the Boltzmann constant.\n$T$ is the temperature in Kelvin.\n\nIn the cell below, typeset this equation using LaTeX:\n\\begin{equation}\nF(\\epsilon)=\\frac{1}{e^{(\\epsilon-\\mu)/kT}+1}\n\\end{equation}\nDefine a function fermidist(energy, mu, kT) that computes the distribution function for a given value of energy, chemical potential mu and temperature kT. Note here, kT is a single variable with units of energy. Make sure your function works with an array and don't use any for or while loops in your code.",
"np.exp(2)\n\ndef fermidist(energy, mu, kT):\n \"\"\"Compute the Fermi distribution at energy, mu and kT.\"\"\"\n x=np.exp((energy-mu)/(kT))\n F=1/(x+1)\n return F\n\nassert np.allclose(fermidist(0.5, 1.0, 10.0), 0.51249739648421033)\nassert np.allclose(fermidist(np.linspace(0.0,1.0,10), 1.0, 10.0),\n np.array([ 0.52497919, 0.5222076 , 0.51943465, 0.5166605 , 0.51388532,\n 0.51110928, 0.50833256, 0.50555533, 0.50277775, 0.5 ]))",
"Write a function plot_fermidist(mu, kT) that plots the Fermi distribution $F(\\epsilon)$ as a function of $\\epsilon$ as a line plot for the parameters mu and kT.\n\nUse enegies over the range $[0,10.0]$ and a suitable number of points.\nChoose an appropriate x and y limit for your visualization.\nLabel your x and y axis and the overall visualization.\nCustomize your plot in 3 other ways to make it effective and beautiful.",
"plt.plot?\n\ndef plot_fermidist(mu, kT):\n energy=np.linspace(0.0,10.0,100)\n plot_f=fermidist(energy,mu,kT)\n plt.figure(figsize=(5,5))\n plt.plot(energy,plot_f, 'ro')\n plt.ylabel('F($\\epsilon$)')\n plt.xlabel('$\\epsilon$')\n plt.title('F($\\epsilon$) vs $\\epsilon$')\n plt.tight_layout()\n plt.xlim(0,2*mu)\n \n\nplot_fermidist(4.0, 1.0)\n\nassert True # leave this for grading the plot_fermidist function",
"Use interact with plot_fermidist to explore the distribution:\n\nFor mu use a floating point slider over the range $[0.0,5.0]$.\nfor kT use a floating point slider over the range $[0.1,10.0]$.",
"interact(plot_fermidist, mu=(0.0,5.0), kT=(0.1,10.0))",
"Provide complete sentence answers to the following questions in the cell below:\n\nWhat happens when the temperature $kT$ is low?\nWhat happens when the temperature $kT$ is high?\nWhat is the effect of changing the chemical potential $\\mu$?\nThe number of particles in the system are related to the area under this curve. How does the chemical potential affect the number of particles.\n\nUse LaTeX to typeset any mathematical symbols in your answer.\n-When the value $kT$ becomes extremely low, the probability of finding the particle in question past a quantam state value of $\\mu$ becomes nearly 0\n-When the value $kT$ becomes extremely high, the probability of finding the particle in higher quantam states is much more likely\n-As you increase the chemical potential $\\mu$, the quantam states possible for the particle will increase for a fixed $kT$ value\n-As would be rational to assume, chemcial potential increase is related to an increase in the number of particles in the system, which is shwon by the graph because as &\\mu$ increases so does the area under the graph"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
rubensfernando/mba-analytics-big-data
|
Python/2016-07-29/aula4-parte4-pesquisar-tweets.ipynb
|
mit
|
[
"Pesquisar por Tweets\nNa API Rest também podemos utilizar o método search para procurar por tweets que combinam com o termo definido.\nO método contém algumas opções como:\napi.search(q, count, max_id, lang)\n\n\nq - é o parâmetro que terá o termo a ser pesquisado.\ncount - é a quantidade de tweets que serão retornados. O limite é 100 e o padrão é 15. \nmax_id - retorna apenas os tweets com o ID menor ou igual ao que foi especificado.\nlang - restringe a busca por tweets de um determinado idioma.",
"import tweepy\n\nconsumer_key = ''\nconsumer_secret = ''\naccess_token = ''\naccess_token_secret = ''\n\nautorizar = tweepy.OAuthHandler(consumer_key, consumer_secret)\nautorizar.set_access_token(access_token, access_token_secret)\n\napi = tweepy.API(autorizar)\nprint(api)",
"Pesquisando",
"tweets = api.search(q='Python', lang='pt') # Teste o parâmetro count=150\n\nfor i, tweet in enumerate(tweets, start=1):\n print(\"%d ---- %s\" % (i, tweet.text))",
"Recuperar 1000 tweets",
"tweets_salvos = []\nultimo_id = -1\nqtde_tweets = 1000\n\nwhile len(tweets_salvos) < qtde_tweets:\n contador = qtde_tweets - len(tweets_salvos)\n try:\n novos_tweets = api.search(q='Python', count=contador, max_id=str(ultimo_id - 1)) # Teste com o parâmetro lang='pt'\n if not novos_tweets:\n print(\"Nenhum tweet para recuperar\")\n break\n tweets_salvos.extend(novos_tweets)\n ultimo_id = novos_tweets[-1].id\n except tweepy.TweepError as e:\n print(\"Erro:\", (e))\n break\n\nfor i, tweet in enumerate(tweets_salvos, start=1):\n print(\"{} ---- {}\".format(i, tweet.text))",
"Os códigos do parâmetro lang deve seguir a ISO 639-1 - https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes\nMaiores informações: https://dev.twitter.com/rest/public/timelines"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ES-DOC/esdoc-jupyterhub
|
notebooks/cccma/cmip6/models/sandbox-1/aerosol.ipynb
|
gpl-3.0
|
[
"ES-DOC CMIP6 Model Properties - Aerosol\nMIP Era: CMIP6\nInstitute: CCCMA\nSource ID: SANDBOX-1\nTopic: Aerosol\nSub-Topics: Transport, Emissions, Concentrations, Optical Radiative Properties, Model. \nProperties: 69 (37 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:53:46\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook",
"# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'cccma', 'sandbox-1', 'aerosol')",
"Document Authors\nSet document authors",
"# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Contributors\nSpecify document contributors",
"# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Publication\nSpecify document publication status",
"# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)",
"Document Table of Contents\n1. Key Properties\n2. Key Properties --> Software Properties\n3. Key Properties --> Timestep Framework\n4. Key Properties --> Meteorological Forcings\n5. Key Properties --> Resolution\n6. Key Properties --> Tuning Applied\n7. Transport\n8. Emissions\n9. Concentrations\n10. Optical Radiative Properties\n11. Optical Radiative Properties --> Absorption\n12. Optical Radiative Properties --> Mixtures\n13. Optical Radiative Properties --> Impact Of H2o\n14. Optical Radiative Properties --> Radiative Scheme\n15. Optical Radiative Properties --> Cloud Interactions\n16. Model \n1. Key Properties\nKey properties of the aerosol model\n1.1. Model Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of aerosol model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.2. Model Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nName of aerosol model code",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.3. Scheme Scope\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nAtmospheric domains covered by the aerosol model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.scheme_scope') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"troposhere\" \n# \"stratosphere\" \n# \"mesosphere\" \n# \"mesosphere\" \n# \"whole atmosphere\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"1.4. Basic Approximations\nIs Required: TRUE Type: STRING Cardinality: 1.1\nBasic approximations made in the aerosol model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.basic_approximations') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.5. Prognostic Variables Form\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPrognostic variables in the aerosol model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"3D mass/volume ratio for aerosols\" \n# \"3D number concenttration for aerosols\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"1.6. Number Of Tracers\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nNumber of tracers in the aerosol model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.number_of_tracers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"1.7. Family Approach\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nAre aerosol calculations generalized into families of species?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.family_approach') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"2. Key Properties --> Software Properties\nSoftware properties of aerosol code\n2.1. Repository\nIs Required: FALSE Type: STRING Cardinality: 0.1\nLocation of code for this component.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"2.2. Code Version\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCode version identifier.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"2.3. Code Languages\nIs Required: FALSE Type: STRING Cardinality: 0.N\nCode language(s).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3. Key Properties --> Timestep Framework\nPhysical properties of seawater in ocean\n3.1. Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMathematical method deployed to solve the time evolution of the prognostic variables",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses atmospheric chemistry time stepping\" \n# \"Specific timestepping (operator splitting)\" \n# \"Specific timestepping (integrated)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"3.2. Split Operator Advection Timestep\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nTimestep for aerosol advection (in seconds)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"3.3. Split Operator Physical Timestep\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nTimestep for aerosol physics (in seconds).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"3.4. Integrated Timestep\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nTimestep for the aerosol model (in seconds)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"3.5. Integrated Scheme Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nSpecify the type of timestep scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Implicit\" \n# \"Semi-implicit\" \n# \"Semi-analytic\" \n# \"Impact solver\" \n# \"Back Euler\" \n# \"Newton Raphson\" \n# \"Rosenbrock\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"4. Key Properties --> Meteorological Forcings\n**\n4.1. Variables 3D\nIs Required: FALSE Type: STRING Cardinality: 0.1\nThree dimensionsal forcing variables, e.g. U, V, W, T, Q, P, conventive mass flux",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.2. Variables 2D\nIs Required: FALSE Type: STRING Cardinality: 0.1\nTwo dimensionsal forcing variables, e.g. land-sea mask definition",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.3. Frequency\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nFrequency with which meteological forcings are applied (in seconds).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"5. Key Properties --> Resolution\nResolution in the aersosol model grid\n5.1. Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5.2. Canonical Horizontal Resolution\nIs Required: FALSE Type: STRING Cardinality: 0.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5.3. Number Of Horizontal Gridpoints\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"5.4. Number Of Vertical Levels\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nNumber of vertical levels resolved on computational grid.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"5.5. Is Adaptive Grid\nIs Required: FALSE Type: BOOLEAN Cardinality: 0.1\nDefault is False. Set true if grid resolution changes during execution.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"6. Key Properties --> Tuning Applied\nTuning methodology for aerosol model\n6.1. Description\nIs Required: TRUE Type: STRING Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.2. Global Mean Metrics Used\nIs Required: FALSE Type: STRING Cardinality: 0.N\nList set of metrics of the global mean state used in tuning model/component",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.3. Regional Metrics Used\nIs Required: FALSE Type: STRING Cardinality: 0.N\nList of regional metrics of mean state used in tuning model/component",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.4. Trend Metrics Used\nIs Required: FALSE Type: STRING Cardinality: 0.N\nList observed trend metrics used in tuning model/component",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7. Transport\nAerosol transport\n7.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of transport in atmosperic aerosol model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.2. Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMethod for aerosol transport modeling",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Specific transport scheme (eulerian)\" \n# \"Specific transport scheme (semi-lagrangian)\" \n# \"Specific transport scheme (eulerian and semi-lagrangian)\" \n# \"Specific transport scheme (lagrangian)\" \n# TODO - please enter value(s)\n",
"7.3. Mass Conservation Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nMethod used to ensure mass conservation.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Mass adjustment\" \n# \"Concentrations positivity\" \n# \"Gradients monotonicity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"7.4. Convention\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nTransport by convention",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.convention') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Convective fluxes connected to tracers\" \n# \"Vertical velocities connected to tracers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"8. Emissions\nAtmospheric aerosol emissions\n8.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of emissions in atmosperic aerosol model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.2. Method\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nMethod used to define aerosol species (several methods allowed because the different species may not use the same method).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Prescribed (climatology)\" \n# \"Prescribed CMIP6\" \n# \"Prescribed above surface\" \n# \"Interactive\" \n# \"Interactive above surface\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"8.3. Sources\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nSources of the aerosol species are taken into account in the emissions scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.sources') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Vegetation\" \n# \"Volcanos\" \n# \"Bare ground\" \n# \"Sea surface\" \n# \"Lightning\" \n# \"Fires\" \n# \"Aircraft\" \n# \"Anthropogenic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"8.4. Prescribed Climatology\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nSpecify the climatology type for aerosol emissions",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_climatology') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Interannual\" \n# \"Annual\" \n# \"Monthly\" \n# \"Daily\" \n# TODO - please enter value(s)\n",
"8.5. Prescribed Climatology Emitted Species\nIs Required: FALSE Type: STRING Cardinality: 0.1\nList of aerosol species emitted and prescribed via a climatology",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.6. Prescribed Spatially Uniform Emitted Species\nIs Required: FALSE Type: STRING Cardinality: 0.1\nList of aerosol species emitted and prescribed as spatially uniform",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.7. Interactive Emitted Species\nIs Required: FALSE Type: STRING Cardinality: 0.1\nList of aerosol species emitted and specified via an interactive method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.8. Other Emitted Species\nIs Required: FALSE Type: STRING Cardinality: 0.1\nList of aerosol species emitted and specified via an "other method"",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.other_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.9. Other Method Characteristics\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCharacteristics of the "other method" used for aerosol emissions",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.other_method_characteristics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"9. Concentrations\nAtmospheric aerosol concentrations\n9.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of concentrations in atmosperic aerosol model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"9.2. Prescribed Lower Boundary\nIs Required: FALSE Type: STRING Cardinality: 0.1\nList of species prescribed at the lower boundary.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"9.3. Prescribed Upper Boundary\nIs Required: FALSE Type: STRING Cardinality: 0.1\nList of species prescribed at the upper boundary.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"9.4. Prescribed Fields Mmr\nIs Required: FALSE Type: STRING Cardinality: 0.1\nList of species prescribed as mass mixing ratios.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"9.5. Prescribed Fields Mmr\nIs Required: FALSE Type: STRING Cardinality: 0.1\nList of species prescribed as AOD plus CCNs.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"10. Optical Radiative Properties\nAerosol optical and radiative properties\n10.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of optical and radiative properties",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"11. Optical Radiative Properties --> Absorption\nAbsortion properties in aerosol scheme\n11.1. Black Carbon\nIs Required: FALSE Type: FLOAT Cardinality: 0.1\nAbsorption mass coefficient of black carbon at 550nm (if non-absorbing enter 0)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"11.2. Dust\nIs Required: FALSE Type: FLOAT Cardinality: 0.1\nAbsorption mass coefficient of dust at 550nm (if non-absorbing enter 0)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"11.3. Organics\nIs Required: FALSE Type: FLOAT Cardinality: 0.1\nAbsorption mass coefficient of organics at 550nm (if non-absorbing enter 0)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"12. Optical Radiative Properties --> Mixtures\n**\n12.1. External\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs there external mixing with respect to chemical composition?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"12.2. Internal\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs there internal mixing with respect to chemical composition?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"12.3. Mixing Rule\nIs Required: FALSE Type: STRING Cardinality: 0.1\nIf there is internal mixing with respect to chemical composition then indicate the mixinrg rule",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"13. Optical Radiative Properties --> Impact Of H2o\n**\n13.1. Size\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDoes H2O impact size?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"13.2. Internal Mixture\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDoes H2O impact internal mixture?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"14. Optical Radiative Properties --> Radiative Scheme\nRadiative scheme for aerosol\n14.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of radiative scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"14.2. Shortwave Bands\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nNumber of shortwave bands",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"14.3. Longwave Bands\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nNumber of longwave bands",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"15. Optical Radiative Properties --> Cloud Interactions\nAerosol-cloud interactions\n15.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of aerosol-cloud interactions",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"15.2. Twomey\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs the Twomey effect included?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"15.3. Twomey Minimum Ccn\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nIf the Twomey effect is included, then what is the minimum CCN number?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"15.4. Drizzle\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDoes the scheme affect drizzle?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"15.5. Cloud Lifetime\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDoes the scheme affect cloud lifetime?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"15.6. Longwave Bands\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nNumber of longwave bands",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"16. Model\nAerosol model\n16.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of atmosperic aerosol model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"16.2. Processes\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nProcesses included in the Aerosol model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Dry deposition\" \n# \"Sedimentation\" \n# \"Wet deposition (impaction scavenging)\" \n# \"Wet deposition (nucleation scavenging)\" \n# \"Coagulation\" \n# \"Oxidation (gas phase)\" \n# \"Oxidation (in cloud)\" \n# \"Condensation\" \n# \"Ageing\" \n# \"Advection (horizontal)\" \n# \"Advection (vertical)\" \n# \"Heterogeneous chemistry\" \n# \"Nucleation\" \n# TODO - please enter value(s)\n",
"16.3. Coupling\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nOther model components coupled to the Aerosol model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Radiation\" \n# \"Land surface\" \n# \"Heterogeneous chemistry\" \n# \"Clouds\" \n# \"Ocean\" \n# \"Cryosphere\" \n# \"Gas phase chemistry\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"16.4. Gas Phase Precursors\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nList of gas phase aerosol precursors.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.gas_phase_precursors') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"DMS\" \n# \"SO2\" \n# \"Ammonia\" \n# \"Iodine\" \n# \"Terpene\" \n# \"Isoprene\" \n# \"VOC\" \n# \"NOx\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"16.5. Scheme Type\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nType(s) of aerosol scheme used by the aerosols model (potentially multiple: some species may be covered by one type of aerosol scheme and other species covered by another type).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Bulk\" \n# \"Modal\" \n# \"Bin\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"16.6. Bulk Scheme Species\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nList of species covered by the bulk scheme.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.bulk_scheme_species') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sulphate\" \n# \"Nitrate\" \n# \"Sea salt\" \n# \"Dust\" \n# \"Ice\" \n# \"Organic\" \n# \"Black carbon / soot\" \n# \"SOA (secondary organic aerosols)\" \n# \"POM (particulate organic matter)\" \n# \"Polar stratospheric ice\" \n# \"NAT (Nitric acid trihydrate)\" \n# \"NAD (Nitric acid dihydrate)\" \n# \"STS (supercooled ternary solution aerosol particule)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"©2017 ES-DOC"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
malnoxon/board-game-data-science
|
stage5/stage5_report.ipynb
|
gpl-3.0
|
[
"Stage 5, Report\nhttps://github.com/anhaidgroup/py_entitymatching/blob/master/notebooks/vldb_demo/Demo_notebook_v6.ipynb",
"import py_entitymatching as em\nimport os\nimport pandas as pd\n\n# specify filepaths for tables A and B. \npath_A = 'tableA.csv'\npath_B = 'tableB.csv'\n# read table A; table A has 'ID' as the key attribute\nA = em.read_csv_metadata(path_A, key='id')\n# read table B; table B has 'ID' as the key attribute\nB = em.read_csv_metadata(path_B, key='id')",
"Filling in Missing Values",
"# Impute missing values\n\n# Manually set metadata properties, as current py_entitymatching.impute_table()\n# requires 'fk_ltable', 'fk_rtable', 'ltable', 'rtable' properties\nem.set_property(A, 'fk_ltable', 'id')\nem.set_property(A, 'fk_rtable', 'id')\nem.set_property(A, 'ltable', A)\nem.set_property(A, 'rtable', A)\n\nA_all_attrs = list(A.columns.values)\nA_impute_attrs = ['year','min_num_players','max_num_players','min_gameplay_time','max_gameplay_time','min_age']\nA_exclude_attrs = list(set(A_all_attrs) - set(A_impute_attrs))\nA1 = em.impute_table(A, exclude_attrs=A_exclude_attrs, missing_val='NaN', strategy='most_frequent', axis=0, val_all_nans=0, verbose=True)\n\n# Compare number of missing values to check the results\nprint(sum(A['min_num_players'].isnull()))\nprint(sum(A1['min_num_players'].isnull()))\n\n# Do the same thing for B\nem.set_property(B, 'fk_ltable', 'id')\nem.set_property(B, 'fk_rtable', 'id')\nem.set_property(B, 'ltable', B)\nem.set_property(B, 'rtable', B)\n\nB_all_attrs = list(B.columns.values)\n# TODO: add 'min_age'\nB_impute_attrs = ['year','min_num_players','max_num_players','min_gameplay_time','max_gameplay_time']\nB_exclude_attrs = list(set(B_all_attrs) - set(B_impute_attrs))\nB1 = em.impute_table(B, exclude_attrs=B_exclude_attrs, missing_val='NaN', strategy='most_frequent', axis=0, val_all_nans=0, verbose=True)\n\n# Compare number of missing values to check the results\nprint(sum(B['min_num_players'].isnull()))\nprint(sum(B1['min_num_players'].isnull()))\n\n\n# Load the pre-labeled data\nS = em.read_csv_metadata('sample_labeled.csv', \n key='_id',\n ltable=A1, rtable=B1, \n fk_ltable='ltable_id', fk_rtable='rtable_id')\n\npath_total_cand_set = 'candidate_set_C1.csv'\ntotal_cand_set = em.read_csv_metadata(path_total_cand_set, \n key='_id',\n ltable=A1, rtable=B1, \n fk_ltable='ltable_id', fk_rtable='rtable_id')\n\n# Split S into I an J\nIJ = em.split_train_test(S, train_proportion=0.75, random_state=35)\nI = IJ['train']\nJ = IJ['test']\n\ncorres = em.get_attr_corres(A1, B1)",
"Generating Features\nHere, we generate all the features we decided upon after our final iteration of cross validation and debugging. We only use the relevant subset of all these features in the reported iterations below.",
"# Generate a set of features\n#import pdb; pdb.set_trace();\nimport py_entitymatching.feature.attributeutils as au\nimport py_entitymatching.feature.simfunctions as sim\nimport py_entitymatching.feature.tokenizers as tok\n\nltable = A1\nrtable = B1\n\n# Get similarity functions for generating the features for matching\nsim_funcs = sim.get_sim_funs_for_matching()\n# Get tokenizer functions for generating the features for matching\ntok_funcs = tok.get_tokenizers_for_matching()\n\n# Get the attribute types of the input tables\nattr_types_ltable = au.get_attr_types(ltable)\nattr_types_rtable = au.get_attr_types(rtable)\n\n# Get the attribute correspondence between the input tables\nattr_corres = au.get_attr_corres(ltable, rtable)\nprint(attr_types_ltable['name'])\nprint(attr_types_rtable['name'])\nattr_types_ltable['name'] = 'str_bt_5w_10w'\nattr_types_rtable['name'] = 'str_bt_5w_10w'\n\n\n\n# Get the features\nF = em.get_features(ltable, rtable, attr_types_ltable,\n attr_types_rtable, attr_corres,\n tok_funcs, sim_funcs)\n\n#F = em.get_features_for_matching(A1, B1)\nprint(F['feature_name'])\n\n# Convert the I into a set of feature vectors using F\n# Here, we add name edit distance as a feature\ninclude_features_2 = [\n 'min_num_players_min_num_players_lev_dist',\n 'max_num_players_max_num_players_lev_dist',\n 'min_gameplay_time_min_gameplay_time_lev_dist',\n 'max_gameplay_time_max_gameplay_time_lev_dist',\n 'name_name_lev_dist'\n]\nF_2 = F.loc[F['feature_name'].isin(include_features_2)]",
"Generate training set",
"# Apply train, test set evaluation\nI_table = em.extract_feature_vecs(I, feature_table=F_2, attrs_after='label', show_progress=False)\nJ_table = em.extract_feature_vecs(J, feature_table=F_2, attrs_after='label', show_progress=False)\n\ntotal_cand_set_features = em.extract_feature_vecs(total_cand_set, feature_table=F_2, show_progress=False)\n\nm = em.LogRegMatcher(name='LogReg', random_state=0)\n\nm.fit(table=I_table, exclude_attrs=['_id', 'ltable_id', 'rtable_id','label'], target_attr='label')\n\ntotal_cand_set_features['prediction'] = m.predict(\n table=total_cand_set_features, \n exclude_attrs=['_id', 'ltable_id', 'rtable_id'],\n)",
"Joining Tables",
"# Join tables on matched tuples\nmatch_tuples = total_cand_set_features[total_cand_set_features['prediction']==1]\nmatch_tuples = match_tuples[['ltable_id','rtable_id']]\nA1['ltable_id'] = A1['id']\nB1['rtable_id'] = B1['id']\njoined_tables = pd.merge(match_tuples, A1, how='left', on='ltable_id')\njoined_tables = pd.merge(joined_tables, B1, how='left', on='rtable_id')\n\n\n\nfor n in A1.columns: \n if not n in ['_id', 'ltable_id', 'rtable_id']:\n joined_tables[n] = joined_tables.apply((lambda row: row[n+'_y'] if pd.isnull(row[n+'_x']) else row[n+'_x']), axis=1)\n joined_tables = joined_tables.drop(n+'_x', axis=1).drop(n+'_y',axis=1)\n\njoined_tables.to_csv('joined_table.csv')\n\njoined_tables",
"Adventure Time!",
"import pandas as pd\nimport matplotlib as plt\nfrom scipy.stats.stats import pearsonr\n%matplotlib inline\n\njoined_tables = pd.read_csv('new_joined_table.csv')\njoined_tables.iloc[1:4].to_csv('4_tuples.csv')\n\nfrom StringIO import StringIO\nimport prettytable \n\npt = prettytable.from_csv(open('new_joined_table.csv'))\nprint pt\n\njoined_tables.columns\nprint 'Size: ' + str(len(joined_tables))\nfor c in joined_tables.columns:\n print c + ' : '+ str(sum(joined_tables[c].isnull()))\nprint (len(joined_tables.iloc[12]))\n\n# Rating vs year\njoined_tables.groupby('year').agg({'year': 'mean','rating': 'mean'}).plot.scatter(x='year', y='rating')\n\njoined_tables.plot.scatter(x='year', y='rating')\npearsonr(\n joined_tables['year'][joined_tables['rating'].notnull()],\n joined_tables['rating'][joined_tables['rating'].notnull()]\n)\n\n\n\n# Complexity weight vs year\n\njoined_tables.plot.scatter(x='year', y='complexity_weight')\npearsonr(joined_tables['year'][joined_tables['complexity_weight'].notnull()],joined_tables['complexity_weight'][joined_tables['complexity_weight'].notnull()])\n\n# Price mean vs year\n\njoined_tables.groupby('year').agg({'year': 'mean','mean_price': 'mean'}).plot.scatter(x='year', y='mean_price')\njoined_tables.plot.scatter(x='year', y='mean_price')\npearsonr(joined_tables['year'][joined_tables['mean_price'].notnull()],joined_tables['mean_price'][joined_tables['mean_price'].notnull()])\n\n# Price mean vs rating\n\njoined_tables.plot.scatter(x='rating', y='mean_price')\nnonull = joined_tables['rating'].notnull() & joined_tables['mean_price'].notnull()\npearsonr(joined_tables['rating'][nonull],joined_tables['mean_price'][nonull])\n\n# Num players vs complexity weight\n\njoined_tables.plot.scatter(x='complexity_weight', y='min_num_players')\nnonull = joined_tables['complexity_weight'].notnull() & joined_tables['min_num_players'].notnull()\npearsonr(joined_tables['complexity_weight'][nonull],joined_tables['min_num_players'][nonull])\n\n#complexity weight vs gameplay time\n\njoined_tables.plot.scatter(x='max_gameplay_time', y='complexity_weight')\nnonull = joined_tables['complexity_weight'].notnull() & joined_tables['min_gameplay_time'].notnull()\npearsonr(joined_tables['complexity_weight'][nonull],joined_tables['min_gameplay_time'][nonull])"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
AtmaMani/pyChakras
|
udemy_ml_bootcamp/Python-for-Data-Analysis/Pandas/Pandas Exercises/SF Salaries Exercise- Solutions.ipynb
|
mit
|
[
"<a href='http://www.pieriandata.com'> <img src='../../Pierian_Data_Logo.png' /></a>\n\nSF Salaries Exercise - Solutions\nWelcome to a quick exercise for you to practice your pandas skills! We will be using the SF Salaries Dataset from Kaggle! Just follow along and complete the tasks outlined in bold below. The tasks will get harder and harder as you go along.\n Import pandas as pd.",
"import pandas as pd",
"Read Salaries.csv as a dataframe called sal.",
"sal = pd.read_csv('Salaries.csv')",
"Check the head of the DataFrame.",
"sal.head()",
"Use the .info() method to find out how many entries there are.",
"sal.info() # 148654 Entries",
"What is the average BasePay ?",
"sal['BasePay'].mean()",
"What is the highest amount of OvertimePay in the dataset ?",
"sal['OvertimePay'].max()",
"What is the job title of JOSEPH DRISCOLL ? Note: Use all caps, otherwise you may get an answer that doesn't match up (there is also a lowercase Joseph Driscoll).",
"sal[sal['EmployeeName']=='JOSEPH DRISCOLL']['JobTitle']",
"How much does JOSEPH DRISCOLL make (including benefits)?",
"sal[sal['EmployeeName']=='JOSEPH DRISCOLL']['TotalPayBenefits']",
"What is the name of highest paid person (including benefits)?",
"sal[sal['TotalPayBenefits']== sal['TotalPayBenefits'].max()] #['EmployeeName']\n# or\n# sal.loc[sal['TotalPayBenefits'].idxmax()]",
"What is the name of lowest paid person (including benefits)? Do you notice something strange about how much he or she is paid?",
"sal[sal['TotalPayBenefits']== sal['TotalPayBenefits'].min()] #['EmployeeName']\n# or\n# sal.loc[sal['TotalPayBenefits'].idxmax()]['EmployeeName']\n\n## ITS NEGATIVE!! VERY STRANGE",
"What was the average (mean) BasePay of all employees per year? (2011-2014) ?",
"sal.groupby('Year').mean()['BasePay']",
"How many unique job titles are there?",
"sal['JobTitle'].nunique()",
"What are the top 5 most common jobs?",
"sal['JobTitle'].value_counts().head(5)",
"How many Job Titles were represented by only one person in 2013? (e.g. Job Titles with only one occurence in 2013?)",
"sum(sal[sal['Year']==2013]['JobTitle'].value_counts() == 1) # pretty tricky way to do this...",
"How many people have the word Chief in their job title? (This is pretty tricky)",
"def chief_string(title):\n if 'chief' in title.lower():\n return True\n else:\n return False\n\nsum(sal['JobTitle'].apply(lambda x: chief_string(x)))",
"Bonus: Is there a correlation between length of the Job Title string and Salary?",
"sal['title_len'] = sal['JobTitle'].apply(len)\n\nsal[['title_len','TotalPayBenefits']].corr() # No correlation.",
"Great Job!"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
NuGrid/NuPyCEE
|
DOC/Capabilities/AddingDataToStellab.ipynb
|
bsd-3-clause
|
[
"Adding Stellar Data to STELLAB\nContributors: Christian Ritter\n In construction",
"%matplotlib nbagg\nimport matplotlib.pyplot as plt\nfrom NuPyCEE import stellab as st",
"The goal is to add your data to STELLAB to produce plots such as the plot below:",
"s1=st.stellab()\nxaxis='[Fe/H]'\nyaxis='[O/Fe]'\ns1.plot_spectro(fig=1,xaxis=xaxis,galaxy='carina')\nplt.xlim(-4.5,1),plt.ylim(-1.5,1.5)",
"Adding your own data.",
"from IPython.display import YouTubeVideo\nYouTubeVideo(\"R3_EZlXTFBo\")\n\ns1_new=st.stellab()\n\n# available data\n# s1_new.list_ref_papers()\n\ns1_new.plot_spectro(fig=2,yaxis=yaxis,\n obs=['stellab_data/carina_data/Fabrizio_et_al_2015_stellab'],show_err=True)\nplt.xlim(-4,0),plt.ylim(-2,2)",
"Uploading data\ncoming soon...",
"#from IPython.display import YouTubeVideo\n#YouTubeVideo(\"Pi9NpxAvYSs\")"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
whitead/numerical_stats
|
unit_10/hw_2019/homework_10_key.ipynb
|
gpl-3.0
|
[
"Homework 10 Key\nCHE 116: Numerical Methods and Statistics\n4/3/2019",
"import scipy.stats as ss\nimport numpy as np\n\n",
"1. Conceptual Questions\n\n\nDescribe the general process for parametric hypothesis tests.\n\n\nWhy would you choose a non-parametric hypothesis test over a parametric one?\n\n\nWhy would you choose a parametric hypothesis test over a non-parametric one?\n\n\nIf you do not reject the null hypothesis, does that mean you've proved it?\n\n\n1.1\nYou compute the end-points of an interval with values as extreme and more extreme as your sample data. You integrate the area of this interval to obtain your p-value. If the p-value is less than your significance threshold, you reject the null hypothesis.\n1.2\nTo avoid assuming normal or other distribution\n1.3\nA parametric test can show significance with small amounts of data.\n1.4\nNo\n2. Short Answer Questions\n\nIf your p-value is 0.4 and $\\alpha = 0.1$, should you reject the null hypothesis?\nWhat is your p-value if your $T$-value is -2 in the two-tailed/two-sided $t$-test with a DOF of 4?\nFor a one-sample $zM$ test, what is the minimum number of standard deviations away from the population mean a sample should be to reject the null hypothesis with $\\alpha = 0.05$?\nFor an N-sample $zM$ test, what is the minimum number of standard deviations away from the population mean a sample should be to reject the null hypothesis with $\\alpha = 0.05$ in terms of $N$?\nIn a Poisson hypothesis test, what is the p-value if $\\mu = 4.3$ and the sample is 8?\nWhat is the standard error for $\\bar{x} = 4$, $\\sigma_x = 0.4$ and $N = 11$?\n\n2.1\nNo\n2.2",
"import scipy.stats as ss\nss.t.cdf(-2, 4) * 2",
"2.3",
"-ss.norm.ppf(0.025)",
"2.4\n$$\n1.96 = \\frac{\\sqrt{N}\\bar{x}}{\\sigma}\n$$\nYou should be \n$\n\\frac{1.96}{\\sqrt{N}}\n$ standard deviations away\n2.5",
"1 - ss.poisson.cdf(7, mu=4.3)",
"2.6",
"import math\n0.4 / math.sqrt(11)",
"3. Choose the hypothesis test\nState which hypothesis test best fits the example below and state the null hypothesis. You can justify your answer if you feel like mulitiple tests fit. \n\n\nYou know that coffee should be brewed at 186 $^\\circ{}$F. You measure coffee from Starbuck 10 times over a week and want to know if they're brewing at the correct temperature.\n\n\nYou believe that the real estate market in SF is the same as NYC. You gather 100 home prices from both markets to compare them. \n\n\nAustralia banned most guns in 2002. You compare homicide rates before and after this date.\n\n\nA number of states have recently legalized recreational marijuana. You gather teen drug use data for the year prior and two years after the legislation took effect.\n\n\nYou think your mail is being stolen. You know that you typically get five pieces of mail on Wednesdays, but this Wednesday you got no mail. \n\n\n3.1\nt-test\nNull: The coffee is brewed at the correct temperature.\n3.2\nWilcoxon Sum of Ranks\nThe real estate prices in SF and NYC are from the same distribution.\n3.3\nWilcoxon Sum of Ranks\nThe homicide rates before the dat are from the same distribution\n3.4\nWilcoxon Signed Ranks\nThe teen drug use data for the year prior and the year after two years after the legislation are from the same distribution\n3.5\nPoisson\nYour mail is not being stolen\n4. Hypothesis Tests\nDo the following: \n\n[1 Point] State the test type\n[1 Point] State the null hypothesis\n[2 Points] State the p-value\n[1 Point] State if you accept/reject the null hypothesis\n[1 Point] Answer the question\n\n\n\nYou have heard an urban legend that you are taller in the morning. Using the height measurements in centimeters below, answer the question\n\n|Morning | Evening|\n|:---|----:|\n| 181 | 180 |\n| 182 | 179 |\n| 181 | 184 |\n| 182 | 179 |\n| 182 | 180 |\n| 183 | 183 |\n| 185 | 180 |\n\n\nOn a typical day in Rochester, there are 11 major car accidents. On the Monday after daylight savings time in the Spring, there are 18 major car accidents. Is this significant?\n\n\nYour cellphone bill is typically \\$20. The last four have been \\$21, \\$30. \\$25, \\$23. Has it significantly changed?\n\n\n4.1\n\nWilcoxon Signed Rank Test\nThe two heights are from the same distribution\n0.17\nCannot reject\nNo evidence for a difference in heights",
"p = ss.wilcoxon([181, 182, 181, 182, 182, 183, 185], [180, 179, 184, 179, 180, 183, 180])\nprint(p[1])",
"4.2\n\nPoisson\nThe number of accidents is from the population distribution\n0.032\nReject\nYes, there is a significant difference",
"1 - ss.poisson.cdf(17, mu=11)",
"4.3\n\nt-test\nThe new bills are from the population distribution of previous bills\n0.09\nDo not reject\nNo, the new bill is not significantly different",
"import numpy as np\ndata = [21, 30, 25, 23]\nse = np.std(data, ddof=1) / np.sqrt(len(data))\nT = (np.mean(data) - 20) / se\nss.t.cdf(-abs(T), df=len(data) - 1) * 2",
"5. Exponential Test (5 Bonus Points)\nYour dog typically greets you within 10 seconds of coming home. Is it significant that your dog took 16 seconds?",
"1 - ss.expon.cdf(16, scale=10)",
"No"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
adolfoguimaraes/machinelearning
|
Tensorflow/Tutorial02_MLP.ipynb
|
mit
|
[
"Tutorial 2 - MLP\nO objetivo final deste tutorial é mostrar como podemos implementar uma MLP (Multilayer Perceptron) no Tensorflow. No entanto, para ajudar a entender um pouco como funciona o Tensorflow vamos implementar primeiro uma rede mais simples (a Perceptron, que possui uma camada apenas) e, em seguida, iremos implementar a MLP. \nA implementção é baseada no Cap. 3 do livro do Redes Neurais Artificiais Para Engenharia e Ciências Aplicadas do professor Ivan Nunes e no tutorial Elementary Neural Networks with TensorFlow.\nRede Perceptron\nA rede perceptron é a forma mais simples de configuração de uma rede neural artificial. A arquitetura da rede se aproxima daquela que foi apresentada no problema de regressão linear do Tutorial 1. \nA imagem a seguir mostra a arquitetura da rede perceptron. \n<img src=\"https://www.embarcados.com.br/wp-content/uploads/2016/09/Perceptron-01.png\" width=\"50%\" />\nA rede é construída a partir de $n$ sinais de entrada e uma única saída, já que ela possui somente um neurônio. Mais detalhes de como a rede perceptron funciona, pode ser encontrado neste video:\nA rede perceptron é utilizada em problemas que são ditos linearmente separáveis. Entende-se por esse tipo de problema aqueles que são compostos por dados que podem ser separados por uma função linear. Para isso, vamos criar um conjunto de dados que possuem tal característica. Como o propósito é só mostrar o funcionamento da rede, vamos criar um conjunto de dados sem nenhum próposito específico.\nOs dados de entrada são constituídos de várias instâncias contendo duas variáveis cada ($x_1$ e $x_2$) e cada instância é classificada em 0 ou 1. Sendo assim, a tarefa da rede é aprender um modelo que seja capaz de separar estas duas classes. O código a seguir cria os dados e os exibem em um gráfico.",
"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef soma(x, y):\n return x + y\n\n\n#Criando os dados de entrada (x = features e y = classes)\nx_train = np.array([[2., 2.],[1., 3.],[2., 3.],[5., 3.],[7., 3.],[2., 4.],[3., 4.],[6., 4.],\n [1., 5.],[2., .5],[5., 5.],[4., 6.],[6., 6.],[5., 7.]],dtype=\"float32\")\ny_train = np.array([[0., 0., 0., 1., 1., 0., 0., 1., 0., 0., 1., 1., 1., 1.]], dtype=\"float32\")\n\n#Mostrando o Gráfico\nA = x_train[:, 0]\nB = x_train[:, 1]\n\ncolormap = np.array(['r', 'k'])\n\n# Plot the original data\nplt.scatter(A, B, c=colormap[[0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1]], s=40)\n\nplt.ylim([0,8]) # Limit the y axis size\nplt.show()\n\nsoma(3, 2)",
"O próximo passo é criar a seguinte rede no Tensorflow.\n<img src=\"https://www.embarcados.com.br/wp-content/uploads/2016/09/Perceptron-01.png\" width=\"50%\" />\nObserve que a rede é composta por um conjunto de sinais de entrada ($x_{train} = [x_1, x_2, ..., x_n]$). Cada sinal é poderado por um peso w, dado por $weights = [w_1, w_2, ..., w_3]$ e somado por um limiar de ativação ($\\theta$). Sendo assim, o neurônio é representado pela seguinte operação: \n$u = \\sum_{i=1}^{n}{w_i*x_i} + bias$\nO valor inicial do $bias$ é dado por $-\\theta$. Neste exemplo, $\\theta = 1$.\nO valor de $u$ é entrada para uma função de ativação ($g$) gerando o sinal de saída $y=g(u)$.\nNesse exemplo, a função de ativação é dada por: \n$g(u) = 1$, se $u >= 0$\n$g(u) = 0$, se $u < 0$\nO código a seguir implementa esse modelo. Mais detalhes são dados nos comentários do código.",
"# imports necessários\n\nimport tensorflow as tf\n\n# Função de ativação\ndef output(u):\n is_greater = tf.greater(u, 0)\n as_float = tf.to_float(is_greater)\n return as_float\n\n'''\nCriação do array que representa o limiar. O limiar é inicializado com -1. Neste caso, o limiar representa um vetor \n14x1, ou seja, é atribuído um limiar para cada valor de entrada (no caso, 14).\n'''\nlimiar_array = np.ones([14,1], dtype=\"float32\")*(-1)\nlimiar = tf.Variable(limiar_array, name=\"limiar\")\n\n'''\nCriação da variável com pesos. Como estamos trabalhando com dois valores de entrada por instância, os pesos são \ninstanciados por um vetor 2x1\n'''\nweights = tf.Variable(tf.random_normal([2,1]), name=\"pesos\")\n\n# Placeholders para feed dos dados de entrada e saída\nX = tf.placeholder(tf.float32, x_train.shape)\nY = tf.placeholder(tf.float32, y_train.shape)\n\n# Modelo criado \nu = tf.matmul(x_train,weights) + limiar\n\n# Aplicação da função de ativação\noutput_value = output(u)",
"Modelo criado. A próxima etapa é definir como nosso modelo será treinado. \nEste problema é uma tarefa de classificação. Cada instância vai ser classificada como 0 ou 1 de acordo com a classe que pertence. Sendo assim, o primeiro passo é comparar a saída com a classificação da base de treinamento. Para isso foi calculado o erro da seguinte forma:\n$mse = \\sum_{i = 1}^{N}{(y_i - output_i)^2}$\nO objetivo do treinamento é reduzir esse erro. Isso é dado pelo código a seguir:",
"error = tf.subtract(y_train.T, output_value)\nmse = tf.reduce_sum(tf.square(error))",
"Um outro passo do treinamento é a atualização dos valores dos pesos e do limiar. Esses parâmetros são atualizados segundo fórmula descrita no livro do Ivan Nunes. \n$w_{i}^{atual} = w_{i}^{anterior} + \\eta (d^{(k)} - y).x_{i}^{(k)}$\n$\\theta_{i}^{atual} = \\theta_{i}^{anterior} + \\eta (d^{(k)} - y).(-1)$\nonde:\n$d^{(k)}$ é o valor desejado e $y$, o valor de saída produzido pela perceptron. Essa diferença é representada pelo que chamamos de erro no código anterior. $\\eta$ é uma constante que define a taxa de aprendizagem da rede (no código, vamos referenciar $\\eta$ por learning_rate).",
"learning_rate = 0.001\n\ndelta_w = tf.matmul(x_train, learning_rate*error, transpose_a=True)\ndelta_limiar = tf.matmul(limiar, learning_rate*error, transpose_a=True)\n\ntrain_w = tf.assign(weights, tf.add(weights, delta_w))\ntrain_limiar = tf.assign(limiar, tf.add(limiar, delta_limiar))",
"Uma vez que criamos o modelo, vamos executar as operações para treina-lo.",
"init_op = tf.global_variables_initializer()\n\nsess = tf.Session()\n\nsess.run(init_op)\n\nfor step in range(5000):\n _, _, a, b, c = sess.run([train_w, train_limiar, mse, weights, limiar], feed_dict={X: x_train, Y: y_train})\n \nprint(\"Weights\")\nprint(b)\nprint(\"Limiar\", c[0][0])",
"O código a seguir apenas cria a função determinada pelos pesos e limiar achados pela rede e plota essa reta no gráfico dos dados mostrado anteriormente.",
"#Graphic display\nimport matplotlib.pyplot as plt\n\nA = x_train[:, 0]\nB = x_train[:, 1]\n\ncolormap = np.array(['r', 'k'])\n\n# Plot the original data\nplt.scatter(A, B, c=colormap[[0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1]], s=40)\n\nymin, ymax = plt.ylim()\n\n# Calc the hyperplane (decision boundary)\nymin, ymax = plt.ylim()\nw = b\na = -w[0] / w[1]\nxx = np.linspace(ymin, ymax)\nyy = a * xx - (c[0,0]) / w[1]\n \n# Plot the hyperplane\nplt.plot(xx,yy, 'k-')\nplt.ylim([0,8]) # Limit the y axis size\nplt.show()",
"Multilayer Perceptron\nUma rede perceptron multicamadas (Multilayer Perceptron - MLP) é caracterizada pela presença de pelo menos uma camada intermediária (escondida ou hidden layer) de neurônios, situada entre a camada de entrada e a respectiva camada neural de saída. Sendo assim, as MLP possuem pelo menos duas camadas de nurônios, o quais estarão distribuídos entre as camadas intermediárias e a camada de saída. \nA figura a seguir ilustra este modelo.\n<img src=\"https://elogeel.files.wordpress.com/2010/05/050510_1627_multilayerp1.png\" />\nDetalhes deste modelo podem ser encontrados no capítulo 6 do Deep Learning Book. Uma outra boa referência é o livro Redes Neurais Artificiais Para Engenharia e Ciências Aplicadas do professor Ivan Nunes. O tema é abordado no capítulo 5.\nPara mostrar este modelo vamos utilizar o exemplo disponível em neste link com a base do MNIST para treinar o modelo criado.\nAntes de começar a entrar em detalhes da rede, vamos baixar a base do MNIST que será utilizada. O MNIST é um dataset de dígitos escritos a mão. A tarefa consiste em dada uma imagem que representa um dígito escrito à mão classifica-la de acordo com o dígito que foi escrito. Detalhes da base podem ser encontrados neste link. Por ser uma base bastante utilizada, a API do tensorflow já possui a base em um pacote do framework.",
"# Carregando a base. Se a base não existir a pasta \"dataset/MNIST\" será criada e a base salva nesta pasta.\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"dataset/MNIST\", one_hot=True)",
"Cada imagem do dataset possui o tamanho de 28x28 e representa um dígito escrito à mão. A imagem a seguir ilustra uma instância da base:\n<img src=\"https://www.tensorflow.org/images/MNIST-Matrix.png\" width=\"70%\" />\nAs imagens vão ser transformadas em um vetor de 784 posições ($28*28$). A entrada da rede são vários vetores deste tipo. Cada vetor vai representar uma imagem. A saída da rede é definida por um vetor de 10 posições, onde cada posição representa uma possível classe do dígito (a base do MNIST trabalha com dígitos de 0 a 9).\nSe considerarmos que a base de treinamento possui 55000 imagens, as imagens a seguir representam a entrada e saída da rede, respectivamente:\n<img src=\"https://www.tensorflow.org/images/mnist-train-xs.png\" width=\"50%\" />\n<img src=\"https://www.tensorflow.org/images/mnist-train-ys.png\" width=\"50%\"/>\nA diferença desta representação para o modelo que será implementado aqui é que o nosso modelo será alimentado por batch. Uma breve explicação do que é batch no tensorflow pode ser encontrado neste link. Vamos utilizar um batch de 100. \nExplicações dadas, vamos para o modelo que será implementado.\nJessica Yung em seu tutorial Explaining TensorFlow code for a Multilayer Perceptron faz uma imagem bem representativa do modelo que será implementado: \n<img src=\"http://i0.wp.com/www.jessicayung.com/wp-content/uploads/2016/12/multilayer-perceptron-drawing.png\" />\nUma questão importante no entendimento (e, consequentemente, na implementação) de qualquer modelo de rede neural é entender as dimensões dos dados ao passar por cada camada. A imagem anterior deixa isso bem claro. Por isso, vamos analisar camada por camada para que possamos entender como essas dimensões são modificadas. Na imagem, h1 e h2 são a quantidade de neurônios nas camadas intermediárias. A quantidade de neurônios de uma camada é que indica a dimensão da saída daquela camada. Outra informação importante é o tamanho do batch (já explicado anteriormente).\nCom o batch igual a 100, a rede está recebendo como entrada uma matriz de 100x784, onde 784 é quantidade de pixel de cada imagem. Sendo assim, cada linha dessa matriz representa uma imagem da base de treinamento. Isso é passado para a primeira camada, onde será aplicada a seguinte operação $xW_1 + b_1$ onde, $W_1$ são os pesos de entrada e $b_1$, o bias. A imagem a seguir detalha esta operação juntamente com suas dimensões: \n<img src=\"http://adolfo.data2learning.com/ludiico/images/mlp_dimensions1.png\" width=\"70%\" />\nA saída da primeira camada é uma matriz 100x256, ou seja, 100 que representa a quantidade de instâncias que foram passadas na entrada e 256, a quantidade de neurônios. Ou seja, cada neurônio processou cada imagem e deu como resultado uma representação própria da entrada poderada pela operação definida. Ao resultado será aplicada uma função de ativação do tipo RELU (acesse o tutorial da Jessica Yung para ver detalhes do funcionamento deste tipo de função).\nA entrada da segunda rede é uma matriz 100x256 (saída da camada anterior). As operações e dimensões da segunda camada são detalhadas na imagem a seguir:\n<img src=\"http://adolfo.data2learning.com/ludiico/images/mlp_dimensions2.png\" width=\"70%\" />\nAssim, como na primeira camada, a saída é uma matriz 100x256 que será aplicada uma função de atividação do tipo RELU. A camada de saída recebe os dados da segunda e gera como saída uma vetor que represente as 10 classes. Nesse caso, a saída será de 100x10, por conta do batch. Em outras palavras, estamos gerando um vetor que pesa cada possível classe para cada uma das 100 instâncias passadas como entrada. A imagem ilustra as operações e dimensões da camada de saída.\n<img src=\"http://adolfo.data2learning.com/ludiico/images/mlp_dimensions3.png\" width=\"70%\" />\nÀ saída da rede é aplicada a função Softmax que transforma os valores dos vetores em probabilidades. A posição que possuir o maior valor de probabilidade representa a classe à qual o dígito pertence. \nUma rápida explicação de como funciona a softmax pode ser encontrada neste vídeo.\nExplicado o modelo, vamos para o código.",
"# Imports necessários\n\nimport tensorflow as tf\n\n#Definição de parâmetros\nlearning_rate = 0.001\ntraining_epochs = 15\nbatch_size = 100\ndisplay_step = 1\n\n\n#Parâmetros da rede\nn_hidden_1 = 256 # Quantidade de features da primeira camada escondida \nn_hidden_2 = 256 # Quantidade de features da segunda camada escondida\nn_input = 784 # Dados de entrada no MNIST (28 * 28 = 784 (quantidade de pixels da imagem))\nn_classes = 10 # Número total de classes no MNIST (dígitos de 0-9)\n\n# Instanciação dos Input do Grafo no Tensorflow \n\nx = tf.placeholder(tf.float32, [None, n_input]) # Irá armazenar os dados de entrada\ny = tf.placeholder(tf.float32, [None, n_classes]) #Irá armazenar os dados de saída",
"O modelo é implementado dentro da função multilayer_perceptron. Na função criamos cada camada de acordo com os dados passados. É muito importante que as dimensões das variáveis passadas tenham sido definidas corretamente.",
"def multilayer_perceptron(x, weights, biases):\n \n # Primeira camada como função de ativação RELU\n \n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n layer_1 = tf.nn.relu(layer_1)\n \n # Segunda camada com funç!ao de ativação RELU\n \n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n layer_2 = tf.nn.relu(layer_2)\n \n #Camada de Saída\n out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n \n return out_layer",
"Os pesos e bias utilizados serão armazenados em dois dicionários: weights e biases.",
"weights = {\n 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))\n}\nbiases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n}",
"Construindo o modelo:",
"pred = multilayer_perceptron(x, weights, biases)",
"Uma vez que o modelo foi criado, podemos treina-lo. O primeiro passo é definir como vai ser calculado o custo da solução e, em seguida, o método que será utilizado para otimizar o modelo. Três métodos são importantes nesta etapa:\n\ntf.nn.sofmax_cross_entropy_with_logits\ntf.reduce_mean\ntf.train.AdamOptimizer\n\nDetalhes destes métodos podem ser encontrados nos links de cada método. Basicamente, a rede será executada e à saída será aplicada a função softmax para transformar a saída em um vetor de probabilidades. A posição do vetor com maior valor de probabilidade corresponde à classe que a entrada é classificada. Esse resultado é comparado com o resultado esperado em y (aprendizado supervisionado) e o custo é calculado. O treinamento será executado com o objetivo de minimizar este custo, ou seja, reduzir a taxa de erro.",
"cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)",
"O código a seguir executa a etapa de treinamento. Detalhes são dados ao decorrer do código.",
"# Inicializa as variáveis\ninit = tf.global_variables_initializer()\n\n# Executa o grafo que representa o modelo construído\nwith tf.Session() as sess:\n sess.run(init)\n\n '''\n O ciclo de treinamento é chamado de épocas. Em cada época uma quantidade de dados de entrada (batch) é passada\n como entrada para a rede. Ao final de cada época, os parâmetros são atualizados de acordo com o treinamento e novos\n dados são dados como entrada.\n '''\n for epoch in range(training_epochs):\n \n avg_cost = 0. #Armazena a média do custo calculado\n \n total_batch = int(mnist.train.num_examples/batch_size) # Define o total de épocas: total da base / # batch\n \n # Loop por cada batch\n for i in range(total_batch):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n \n # Executa as operações de otimização dos parâmetros (backprop) and custo (retorna o valor de erro)\n _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y})\n \n # Calcula a média do erro\n avg_cost += c / total_batch\n \n # Display logs per epoch step\n if epoch % display_step == 0:\n print(\"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(avg_cost))\n \n print(\"Fim do treinamento\")\n \n # Testa o modelo\n correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n\n # Calcula a acurácia\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n print(\"Acurácia:\", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))",
"Base do noMNIST\nA tarefa resolvida anteriormente é relativamente simples, já que a base é bem simples e já foi pré-processada com um próposito bem específico. Pensando nisso é que foi criada uma outra base (noMNIST) para o mesmo propósito: classificar dígitos, neste caso dígitos de A-Z. Apessar de parecer com o clássico dataset do MNIST, ele envolve uma tarefa mais complicada e os dados estão \"menos limpos\" do que os dados do MNIST. Para mais detalhes, acesse o link: http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html.\nO primeiro passo é baixar o dataset. Detalhes de como baixa-lo estão disponíveis em: \nhttps://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/udacity/1_notmnist.ipynb \nNeste tutorial, assumimos que o dataset já foi baixado na pasta dataset/.",
"# Imports necessários\nfrom __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\nfrom six.moves import cPickle as pickle\nfrom six.moves import range\n\n#Carrega a base de dados \npickle_file = 'dataset/notMNIST.pickle'\n\nwith open(pickle_file, 'rb') as f:\n save = pickle.load(f)\n train_dataset = save['train_dataset']\n train_labels = save['train_labels']\n valid_dataset = save['valid_dataset']\n valid_labels = save['valid_labels']\n test_dataset = save['test_dataset']\n test_labels = save['test_labels']\n del save # hint to help gc free up memory\n print('Training set', train_dataset.shape, train_labels.shape)\n print('Validation set', valid_dataset.shape, valid_labels.shape)\n print('Test set', test_dataset.shape, test_labels.shape)\n\n# Formata os dados para as dimensões apropriadas (784)\n\nimage_size = 28\nnum_labels = 10\n\ndef reformat(dataset, labels):\n dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)\n # Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]\n labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)\n return dataset, labels\n\ntrain_dataset, train_labels = reformat(train_dataset, train_labels)\nvalid_dataset, valid_labels = reformat(valid_dataset, valid_labels)\ntest_dataset, test_labels = reformat(test_dataset, test_labels)\nprint('Training set', train_dataset.shape, train_labels.shape)\nprint('Validation set', valid_dataset.shape, valid_labels.shape)\nprint('Test set', test_dataset.shape, test_labels.shape)",
"Vamos utilizar o mesmo método definido anteriormente: multilayer_perceptron que recebe como parâmetro os dados de entrada e as variáveis para armazenar os bias e weights.",
"weights = {\n 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))\n}\nbiases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n}\n\ntrain_dataset = train_dataset[:10000, :]\ntrain_labels = train_labels[:10000]\n\nnew_pred = multilayer_perceptron(train_dataset, weights, biases)\n\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=new_pred, labels=train_labels))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)",
"O passo seguinte é treinar o modelo. Observe que diferente do exemplo anterior, neste exemplo vamos trabalhar com a base de treinamento para treinar os dados, a base de validação para testar o modelo ao longo das iterações e ao final testa-lo na base de teste.",
"# Inicializa as variáveis\n\ntraining_epochs = 30\n\ninit = tf.global_variables_initializer()\n\n# Executa o grafo que representa o modelo construído\nwith tf.Session() as sess:\n sess.run(init)\n\n '''\n O ciclo de treinamento é chamado de épocas. Em cada época uma quantidade de dados de entrada (batch) é passada\n como entrada para a rede. Ao final de cada época, os parâmetros são atualizados de acordo com o treinamento e novos\n dados são dados como entrada.\n '''\n for epoch in range(training_epochs):\n \n avg_cost = 0. #Armazena a média do custo calculado\n \n total_batch = int(train_dataset.shape[0]/batch_size) # Define o total de épocas: total da base / # batch\n \n # Loop por cada batch\n for i in range(total_batch):\n \n offset = (i * batch_size) % (train_labels.shape[0] - batch_size)\n \n batch_x = train_dataset[offset:(offset+batch_size), :]\n batch_y = train_labels[offset:(offset+batch_size), :]\n \n # Executa as operações de otimização dos parâmetros (backprop) and custo (retorna o valor de erro)\n _, c, prediction = sess.run([optimizer, cost, new_pred], feed_dict={x: batch_x, y: batch_y})\n \n # Calcula a média do erro\n avg_cost += c / total_batch\n \n # Display logs per epoch step\n if epoch % display_step == 0:\n print(\"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(avg_cost))\n \n # Acurácia de Treinamento\n correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(train_labels, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n print(\"\\tAcurácia Treinamento:\", accuracy.eval({x: train_dataset, y: train_labels}))\n \n # Acurácia de Validação\n \n valid_prediction = multilayer_perceptron(valid_dataset, weights, biases)\n correct_valid_prediction = tf.equal(tf.argmax(valid_prediction, 1), tf.argmax(valid_labels, 1))\n accuracy_valid = tf.reduce_mean(tf.cast(correct_valid_prediction, \"float\"))\n print(\"\\tAcurácia Validação:\", accuracy_valid.eval({x: valid_dataset, y: valid_labels}))\n \n \n print(\"Fim do treinamento\")\n \n # Testa o modelo\n test_prediction = multilayer_perceptron(test_dataset, weights, biases)\n correct_test_prediction = tf.equal(tf.argmax(test_prediction, 1), tf.argmax(test_labels, 1))\n accuracy_test = tf.reduce_mean(tf.cast(correct_test_prediction, \"float\"))\n print(\"Acurácia:\", accuracy_test.eval({x: test_dataset, y: test_labels}))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
bcorlett/Notebooks_Modelling
|
Corlett_Assign3.ipynb
|
mit
|
[
"12.850 - Assignment 3 | Bryce Corlett\nExploring Convergence Properties\nThis assignment was motivated by examining the different convergence properties of Jacobi, Gauss-Seidel, and SOR iteration schemes as applied to a toy 1-D elliptic problem:\n\\begin{align}\n \\frac{\\partial}{\\partial x} \\kappa \\frac{\\partial}{\\partial x} u = \\nu u\n\\end{align}\nThe problem is similar to those observed in the previous two assignments, but with the additional forced advection term ($\\nu u$), which appears in the $C_{k,k}$ term in the discretized equation:\n\\begin{align}\n u_{k+1}\\underbrace{\\left[\\frac{K_{k}}{\\Delta z^{c}{k+1} \\Delta z^{f}{k}}\\right]}{C{k,k+1}} - u_{k}\\underbrace{\\left[\\frac{K_{k}}{\\Delta z^{c}{k+1} \\Delta z^{f}{k}} + \\frac{K_{k-1}}{\\Delta z^{c}{k} \\Delta z^{f}{k}} - \\nu \\right]}{C{k,k}} + u_{k-1}\\underbrace{\\left[\\frac{K_{k-1}}{\\Delta z^{c}{k} \\Delta z^{f}{k}}\\right]}{C{k,k-1}} = 0\n\\end{align}\nWhen solving, the equation can be written as $[A]x = b$, where $[A]$ is the matrix of discrete values ($C_{k,k-1}$,$C_{k,k}$, and $C_{k,k+1}$), $x$ is a vector of the values you are solving for, and $b$ is a vector of the boundary conditions. The boundary conditions will affect the values within $[A]$, as the top and bottom cells are either forced with known values in the cells beneath (Dirichlet), or one known boundary value and a forced boundary flux (Dirichlet for the known boundary, and Neumann for the known flux boundary). In the first case, the known values move the respective portion of the $C_{k,k}$ term to the boundary conditions vector; in the second case, the flux is forced within the boundary conditions vector.\nOnce the discrete matrix $[A]$ is formed, the iteration processes break down the procedure in different manners according to whether they are wholly dependent on past iterations (Jacobi), use current-iteration estimates where available (Gauss-Seidel), or use a combination of current-iteration and past-iteration estimates (relaxed by some factor) to reach the solution more quickly (Succesive Over-Relaxation, or SOR). \nTo further grasp the differences, and what the following code is doing, we write the original equation as:\n\\begin{align}\n Ax = b\n\\end{align}\nwhere the matrix $A$ can be broken into three discrete matrices: the lower triangle ($\\triangleright$), the diagonal ($\\diagdown$), and the upper triangle ($\\triangleleft$), where $[A] = [\\triangleright] + [\\diagdown] + [\\triangleleft]$.\nUsing this notation, we can write the iterative methods as $B x_{n+1} = b - C x_{n}$, where $B$ and $C$ are formed from the original matrix $A$. Thus:\n\\begin{align}\n Jacobi: & B = [\\diagdown] & C = [\\triangleright] + [\\triangleleft] \\\n Gauss-Seidel: & B = [\\triangleright] + [\\diagdown] & C = [\\triangleleft] \\\n SOR: & B = \\omega[\\triangleright] + [\\diagdown] & C = (1-\\omega)[\\diagdown] + \\omega[\\triangleleft] \\\n\\end{align}\nThe convergence of a system can be quantified as its spectral radius ($\\rho$), where $\\rho = max(~|\\lambda_{i}|~)$, and $\\lambda_{i}$ are the eigenvalues of $\\left[ B^{-1}C \\right]$; thus, how quickly a system converges is intimately related to the iterative method and the type of boundary conditions. \nNow, we get to the code.",
"#Import toolboxes\nfrom scipy import sparse #Allows me to create sparse matrices (i.e. not store all of the zeros in the 'A' matrix)\nfrom scipy.sparse import linalg as linal\nfrom numpy import * #To make matrices and do matrix manipulation\nimport matplotlib.pyplot as plt \n\n%matplotlib inline\n",
"Here I create the code that will perform a specified iteration scheme, with the option of specifying a value for $\\omega$.",
"def space_iterate(method,A,b,resid,**optional):\n '''Uses sparse matrices'''\n \n if ('w' in optional):\n #print 'w found, it is ', optional['w']\n w = optional['w']\n else:\n #print 'no w found, assumed to be 1.17 if needed.'\n w = 1.17\n #w = 1.02\n \n n = max(A.get_shape())\n t = 0;\n Q = b[:,0]\n Rold = 100 #initialize value\n Rnew = 1\n while (absolute(Rnew - Rold)/float((absolute([Rold]))))*100.0 > resid:\n t = t+1\n Rold = Rnew\n Q = append(Q,Q[:,0].dot(0.),axis=1)\n \n if method == 'jacobi': #Jacobi iteration scheme\n \n B = sparse.tril(A,0) - sparse.tril(A,-1) #only the diagonal\n C = sparse.triu(A,1) + sparse.tril(A,-1) #only off-diagonal\n \n Q[:,t] = linal.inv(B).dot(b - C.dot(Q[:,t-1]))\n \n elif method == 'gaussseidel': #Gauss-Seidel iteration scheme\n \n B = sparse.tril(A,0)\n C = sparse.triu(A,1)\n \n Q[:,t] = linal.inv(B).dot(b - C.dot(Q[:,t-1]))\n \n elif method == 'sor': #SOR method\n #Q[i,t] = w*Q[i,t] + (1-w)*Q[i,t-1]\n\n B = sparse.tril(A,-1)*(float(w)) + (sparse.tril(A,0)-sparse.tril(A,-1))\n C = ( (sparse.tril(A,0)-sparse.tril(A,-1)).dot(float(w-1))) + (sparse.triu(A,1).dot(float(w)) )\n \n #B = - sparse.tril(A,-1)*(float(w)) + (sparse.tril(A,0)-sparse.tril(A,-1))\n #C = ( (sparse.tril(A,0)-sparse.tril(A,-1)).dot(float(1-w))) + (sparse.triu(A,1).dot(float(w)) )\n \n Q[:,t] = linal.inv(B).dot((float(w)*b) - C.dot(Q[:,t-1]))\n \n else: \n print('Improper Option - program closing.')\n return\n \n Rnew = mean(Q[:,t])\n #print (absolute(Rnew - Rold)/float((absolute(Rold))))*100.0\n \n B = B.tocsc() #convert sparse matrices to csc format\n C = C.tocsc()\n \n print('Iterations = '+str(t)+'; Spectral Radius = '+str(absolute(linalg.eigvals(linal.inv(B).dot(C).todense())).max()))\n #(absolute(linal.eigs(linal.inv(B.tocsc()).dot(C.tocsc()),k=ndim(b)-1,return_eigenvectors=False)).max())\n \n I = t\n S = absolute(linalg.eigvals(linal.inv(B).dot(C).todense())).max()\n \n #print('The spectral radius of the problem is '+str(absolute(linal.eigs(linal.inv(B).dot(C),k=ndim(b)-1,return_eigenvectors=False)).max()) )\n return[Q,I,S]",
"The following code will initialize the $[A]$ and $[b]$ matrices according to the inputs, and depending on which code I call, as they use Neumann + Dirichlet and Dirichlet-only boundary conditions, respectively.",
"def neumann_stable(n,v,u0,Z_f,Z_c,K):\n #Create Neumann + Dirichlet boundary conditions, yielding matrices A + b\n A=zeros((3,n)) # For solving for 'n+1' solution\n for item in range(1,n+1): #Start from bed and work to surface\n #j-1\n if item>1: \n A[0,item-2]=+(K[item-1]/((Z_f[item]-Z_f[item-1])*(Z_c[item]-Z_c[item-1])) )\n #j\n A[1,item-1]=-( (K[item-1]/((Z_f[item]-Z_f[item-1])*(Z_c[item]-Z_c[item-1]))) + (K[item]/((Z_f[item]-Z_f[item-1])*(Z_c[item+1]-Z_c[item]))) + v)\n if item == n: #Sets free-slip boundary condition at the surface\n A[1,item-1]=-( (K[item-1]/((Z_f[item]-Z_f[item-1])*(Z_c[item]-Z_c[item-1]))) + v)\n #j+1\n if item != n:\n A[2,item]=+(K[item]/((Z_f[item]-Z_f[item-1])*(Z_c[item+1]-Z_c[item])) )\n\n A = sparse.spdiags(A,array([-1,0,1]),n,n)\n \n # Construct Boundary Condition Matrix\n b=zeros(size(A,1))\n b[0]=b[0] + (u0* (K[item-1]/((Z_f[item]-Z_f[item-1])*(Z_c[item]-Z_c[item-1]))) ) #Because u0 is zero, this line does nothing.\n\n # Define + Apply guess + boundary conditions\n b=matrix(b).T\n return[A,b]\n \ndef dirichlet(n,v,u0,u1,Z_f,Z_c,K):\n #Create Dirichlet boundary conditions, yielding matrices A + b\n A=zeros((3,n)) # For solving for 'n+1' solution\n for item in range(1,n+1): #Start from bed and work to surface\n #j-1\n if item>1: \n A[0,item-2]=+(K[item-1]/((Z_f[item]-Z_f[item-1])*(Z_c[item]-Z_c[item-1])) )\n #j\n A[1,item-1]=-( (K[item-1]/((Z_f[item]-Z_f[item-1])*(Z_c[item]-Z_c[item-1]))) + (K[item]/((Z_f[item]-Z_f[item-1])*(Z_c[item+1]-Z_c[item]))) + v )\n #j+1\n if item != n:\n A[2,item]=+(K[item]/((Z_f[item]-Z_f[item-1])*(Z_c[item+1]-Z_c[item])) )\n\n A = sparse.spdiags(A,array([-1,0,1]),n,n)\n \n # Construct Boundary Condition Matrix\n b=zeros(size(A,1))\n b[0]=b[0] + (u0* (K[1-1]/((Z_f[1]-Z_f[1-1])*(Z_c[1]-Z_c[1-1]))) ) \n b[n-1]=b[n-1] + (u1* (K[n-1]/((Z_f[n]-Z_f[n-1])*(Z_c[n]-Z_c[n-1]))) ) \n\n # Define + Apply guess + boundary conditions\n b=matrix(b).T\n return [A,b]\n\n#Initialize a comparison\nn=20 #n must be greater than 6 for my SOR code to work - the issue lies with calculating eigenvalues.\nK=1\nv=0.3\nu0=0.\nu1=1.\n\n[A,b]=dirichlet(n,v,u0,1,matrix(arange(0,n+2)).T,matrix(arange(0,n+2)).T,K*ones([n+1,1]));\n[Q,I,S]=space_iterate('sor',A,b,1,w=1.25);",
"1. Sensitivity of the System to Boundary Conditions\nNote that the spectral radius changes as the boundary conditions change, but not when the values change. The spectral radius is not influenced by boundary condition values per say, but it is influenced by the type of boundary conditions present within the system. This is because the values are not introduced to the matrices $[B]$ or $[C]$, but the boundary conditions will influence the values on the diagonals of the matrix $[A]$, which will either end up in matrices $[B]$ or $[C]$ depending on the iteration scheme. \nSee below for comparison values:",
"#Initialize a comparison\nn=20 #n must be greater than 6 for my SOR code to work - the issue lies with calculating eigenvalues.\nK=0.3\nv=0.5\nu0=0.\nu1=1.\n\n#[A,b]=dirichlet(n,v,u0,u1,matrix(arange(0,n+2)).T,matrix(arange(0,n+2)).T,K*ones([n+1,1]))\n#[A,b]=neumann_stable(n,v,u1,matrix(arange(0,n+2)).T,matrix(arange(0,n+2)).T,K*ones([n+1,1]))",
"Variations in boundary condition values:",
"print 'Variations under changing boundary values, using Dirichlet boundary conditions:'\nprint '\\n Conditions with u_surface = 1: '\n[A,b]=dirichlet(n,v,u0,1,matrix(arange(0,n+2)).T,matrix(arange(0,n+2)).T,K*ones([n+1,1]));\n[Q,I,S]=space_iterate('sor',A,b,1);\n\nprint '\\n\\n Conditions with u_surface = 2: '\n[A,b]=dirichlet(n,v,u0,2,matrix(arange(0,n+2)).T,matrix(arange(0,n+2)).T,K*ones([n+1,1]));\n[Q,I,S]=space_iterate('sor',A,b,1);\n\nprint '\\n\\n Conditions with u_surface = 3: '\n[A,b]=dirichlet(n,v,u0,3,matrix(arange(0,n+2)).T,matrix(arange(0,n+2)).T,K*ones([n+1,1]));\n[Q,I,S]=space_iterate('sor',A,b,1);",
"Variations in the boundary condition itself:",
"print 'Variations under changing boundary conditions | Dirichlet v Neumann + Dirichlet:'\nprint '\\n\\n Conditions with Neumann + Dirichlet: '\n[A,b]=neumann_stable(n,v,1,matrix(arange(0,n+2)).T,matrix(arange(0,n+2)).T,K*ones([n+1,1]));\n[Q,I,S]=space_iterate('sor',A,b,1);\n\nprint '\\n\\n Conditions with Dirichlet: '\n[A,b]=dirichlet(n,v,1,0,matrix(arange(0,n+2)).T,matrix(arange(0,n+2)).T,K*ones([n+1,1]));\n[Q,I,S]=space_iterate('sor',A,b,1);",
"2. Sensitivity of the solution to $\\nu$\nThe convergence of the system is sensitive to the value of $\\nu$, as $\\nu$ will change the values on the diagonal of the matrix $[A]$, changing the determinate of $[B]$ in the process of calculating its inverse, and thus the spectral radius ($\\rho$).",
"[A,b]=dirichlet(n,0.5,1,0,matrix(arange(0,n+2)).T,matrix(arange(0,n+2)).T,K*ones([n+1,1]))\n[Q,I,S]=space_iterate('jacobi',A,b,1)\n[Q,I,S]=space_iterate('gaussseidel',A,b,1)\n[Q,I,S]=space_iterate('sor',A,b,1)\n \n\nnu=[1.0e3,1.0e2,1.0e1,9.0,8.0,7.0,6.0,5.0,4.0,3.0,2.0,1.0e0,.9,.8,.7,.6,.5,.4,.3,.2,1.0e-1,1.0e-2,1.0e-3]\n\n#fig, axs = plt.subplots(1,2)\n#,ax=axs[1]\nplt.figure(figsize=(20,5))\nplt.subplot(121)\nfor R in range(0,size(nu)): #Varies the value of nu\n [A,b]=dirichlet(n,nu[R],1,0,matrix(arange(0,n+2)).T,matrix(arange(0,n+2)).T,K*ones([n+1,1]))\n \n [Q,Is,Ss]=space_iterate('sor',A,b,1)\n [Q,Ig,Sg]=space_iterate('gaussseidel',A,b,1)\n [Q,Ij,Sj]=space_iterate('jacobi',A,b,1)\n \n plt.semilogx(nu[R],Ss,'ok')\n plt.semilogx(nu[R],Sg,'sk')\n plt.semilogx(nu[R],Sj,'+k')\n \n \n if int == 1.0:\n plt.legend(['SOR','Gauss-Seidel','Jacobi'])\n #plt.title('Surface Velocity')\n plt.ylabel(r'$\\rho$', fontsize=20)\n plt.xlabel(r'$\\nu$', fontsize=20)\n \nplt.subplot(122)\nfor R in range(0,size(nu)): #Varies the value of nu\n [A,b]=dirichlet(n,nu[R],1,0,matrix(arange(0,n+2)).T,matrix(arange(0,n+2)).T,K*ones([n+1,1]))\n \n [Q,Is,Ss]=space_iterate('sor',A,b,1)\n [Q,Ig,Sg]=space_iterate('gaussseidel',A,b,1)\n [Q,Ij,Sj]=space_iterate('jacobi',A,b,1)\n \n plt.semilogx(nu[R],Is,'ok')\n plt.semilogx(nu[R],Ig,'sk')\n plt.semilogx(nu[R],Ij,'+k')\n \n if R == 1.0:\n plt.legend(['SOR','Gauss-Seidel','Jacobi'])\n #plt.title('Surface Velocity')\n plt.ylabel(r'$Iterations$', fontsize=20)\n plt.xlabel(r'$\\nu$', fontsize=20)\n",
"Reviewing the effect of $\\nu$\nAfter plotting these values, it became apparent that further investigation of the SOR factor was needed. After testing the matrices with varying values for this factor, it appears that the factor can be chosen specifically for given conditions to maximize the code's efficiency. The downside of this is that the SOR method will become inaccurate if the SOR factor is not tailored to matrix $[A]$, i.e. adjusted relative to the magnitude of the terms $C_{k,k}$, as seen in the flat-lining of the SOR spectral radius for given values of $\\nu$, or effectively values of $\\frac{\\nu}{C_{k,k}}$.\nQuestioning the effectiveness of the SOR code\nAs the SOR code appears to yield highly-consistent values for the number of iterations required, I am unsure as to whether the issue is with my SOR code, or if the issue is with the SOR procedure. I am not sure if there is a way to pre-calibrate the SOR factor to work with the given conditions, but I hope to look into this in the next week or so.\n3. Convergence of the system on N\nThe system takes longer time to converge for matrices with larger N's, as the computations involve matrices of size N x N; however, it appears that the system requires the same number of iterations in spite of the spectral radius increasing (which should indicate that the procedure takes more iterations to reach the specified tolerance). This may be an artifact of my SOR code; further investigation is needed, which will take place this weekend and next week to iron out any bugs that I might be able to find in the SOR portion of my code. Fortunately, the Gauss-Seidel and Jacobi schemes appear to be working fine.",
"K=1.\nv=1.\nu0=0.\nu1=1.\n\nN=(arange(0,20)+1)*10\n\n#fig, axs = plt.subplots(1,2)\n#,ax=axs[1]\nplt.figure(figsize=(20,5))\nplt.subplot(121)\n\nfor R in range(0,size(N)): #Varies the value of n\n [A,b]=dirichlet(N[R],v,1,0,matrix(arange(0,N[R]+2)).T,matrix(arange(0,N[R]+2)).T,K*ones([N[R]+1,1]))\n \n [Q,Is,Ss]=space_iterate('sor',A,b,1)\n [Q,Ig,Sg]=space_iterate('gaussseidel',A,b,1)\n [Q,Ij,Sj]=space_iterate('jacobi',A,b,1)\n \n plt.plot(N[R],Ss,'ok')\n plt.plot(N[R],Sg,'sk')\n plt.plot(N[R],Sj,'+k')\n \n if R == 1.0:\n plt.legend(['SOR','Gauss-Seidel','Jacobi'])\n #plt.title('Surface Velocity')\n plt.ylabel(r'$\\rho$', fontsize=20)\n plt.xlabel(r'$n$', fontsize=20)\n \nplt.subplot(122)\nfor R in range(0,size(N)): #Varies the value of n\n [A,b]=dirichlet(N[R],v,1,0,matrix(arange(0,N[R]+2)).T,matrix(arange(0,N[R]+2)).T,K*ones([N[R]+1,1]))\n \n [Q,Is,Ss]=space_iterate('sor',A,b,1)\n [Q,Ig,Sg]=space_iterate('gaussseidel',A,b,1)\n [Q,Ij,Sj]=space_iterate('jacobi',A,b,1)\n \n plt.plot(N[R],Is,'ok')\n plt.plot(N[R],Ig,'sk')\n plt.plot(N[R],Ij,'+k')\n \n if R == 1.0:\n plt.legend(['SOR','Gauss-Seidel','Jacobi'])\n #plt.title('Surface Velocity')\n plt.ylabel(r'$Iterations$', fontsize=20)\n plt.xlabel(r'$n$', fontsize=20)",
"4. Estimate Iterations\nThe number of iterations required to reach a given error threshold ($\\delta$) can be estimated by referring back to the base of our definition of the spectral radius ($\\rho$). From $\\rho = max(~|\\lambda_{i}|~)$, where $\\lambda_{i}$ are the eigenvalues of $\\left[ B^{-1}C \\right]$, from \n\\begin{align}\n B x_{n+1} = b - C x_{n}\n\\end{align}\nTaking $e_{n} = x_{n+1}-x_{n}$, we can simplify the above equation to \n\\begin{align}\n e_{n} = \\left[ B^{-1} C \\right] e_{n-1}\n\\end{align}\nKeeping in mind that $e_{n-1} = \\left[ B^{-1} C \\right] e_{n-2}$, we can further simplify to\n\\begin{align}\n e_{n} = \\left[ B^{-1} C \\right]^{n} e_{0}\n\\end{align}\nwhere the final difference is a function of the initial difference times the basis of $\\rho$ to some power. As we are requiring that $e_{n}$, or the difference between iterations, be smaller than some threshold ($\\delta$), we can restate the problem as\n\\begin{align}\n e_{n} = \\left[ B^{-1} C \\right]^{n} e_{0} < \\delta\n\\end{align}\nwhere $n$ is the number of iterations required to reach our error threshold, and all values other than $n$ are matrices. Solving for $n$, we reach the following relation:\n\\begin{align}\n e_{0}^{-1} \\delta > \\left[ B^{-1}C \\right]^{n} \\\n \\frac{\\log( e_{0}^{-1} \\delta )}{\\log( B^{-1}C )} < n \\\n \\\n \\log( e_{0}^{-1} \\delta ) - \\log( B^{-1}C ) < n \n\\end{align}\nIn effect, we can estimate the number of iterations required to reach the threshold by calculating the difference between our initial conditions and our first estimate ($e_{0}$), knowing $\\delta$, and calculating $\\left[ B^{-1}C \\right]$ from our given system and boundary conditions. The number of iterations required to reach a given tolerance can be calculated, but depends on the specific conditions of the problem. In general, however, the smaller $\\left[ B^{-1}C \\right]$ becomes (and thus $\\rho$), the faster the scheme will converge to the specified tolerance.\nFor a specified problem, the number of iterations required to reach a final result will also change depending on the method of iteration, as each method utilizes a different form of matrices $B$ and $C$, as shown previously."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
GoogleCloudPlatform/vertex-ai-samples
|
notebooks/community/managed_notebooks/sentiment_analysis/Sentiment_Analysis.ipynb
|
apache-2.0
|
[
"Sentiment Analysis\nTable of contents\n\nOverview\nDataset\nObjective\nCosts\nLoad the data\nPrepare the training data\nCreate a dataset in Vertex AI\nTrain the model using Vertex AI\nDeploy the model to the endpoint\nPrediction\nReview visualization\nClean up\n\nOverview\n<a name=\"section-1\"></a>\nThis notebook demonstrates how to perform sentiment analysis on a Stanford movie reviews dataset using AutoML Natural Language and how to deploy the sentiment analysis model on Vertex AI to get predictions. \nNote: This notebook file was developed to run in a Vertex AI Workbench managed notebooks instance using the Python (Local) kernel. Some components of this notebook may not work in other notebook environments.\nDataset\n<a name=\"section-2\"></a>\nThe dataset used in this notebook is a part of the Stanford Sentiment Treebank dataset, which consists of movie review phrases and their corresponding sentiment scores.\nObjectives\n<a name=\"section-3\"></a>\nThe objectives of this notebook include:\n\nLoading the required data. \nPreprocessing the data.\nSelecting the required data for the model.\nLoading the dataset into Vertex AI managed datasets.\nTraining a sentiment model using AutoML Natural Language.\nEvaluating the model.\nDeploying the model on Vertex AI.\nGetting predictions.\nClean up.\n\nCosts\n<a name=\"section-4\"></a>\nThis tutorial uses the following billable components of Google Cloud:\n\nVertex AI\nCloud Storage\n\nLearn about Vertex AI\npricing and Cloud Storage\npricing, and use the Pricing\nCalculator\nto generate a cost estimate based on your projected usage.\nBefore you begin\nKernel selection\nSelect <b>Python</b> kernel while running this notebook on Vertex AI's managed instances and ensure that the following libraries are installed in the environment where this notebook is being run.\n\nwordcloud\nPandas \n\nAlong with the above libraries, the following google-cloud libraries are also used in this notebook.\n\ngoogle.cloud.aiplatform\ngoogle.cloud.storage\n\nInstall required packages",
"! pip install wordcloud",
"If you are using Vertex AI Workbench, your environment already meets all the requirements to run this notebook. You can skip this step.",
"! pip install google-cloud-aiplatform\n! pip install fsspec\n! pip install gcsfs",
"Set your project ID\nIf you don't know your project ID, you may be able to get your project ID using gcloud.",
"import os\n\nPROJECT_ID = \"\"\n\n# Get your Google Cloud project ID from gcloud\nif not os.getenv(\"IS_TESTING\"):\n shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null\n PROJECT_ID = shell_output[0]\n print(\"Project ID: \", PROJECT_ID)",
"Otherwise, set your project ID here.",
"if PROJECT_ID == \"\" or PROJECT_ID is None:\n PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}\n %env GOOGLE_CLOUD_PROJECT PROJECT_ID\n print(\"Project ID: \", PROJECT_ID)",
"Timestamp\nIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.",
"from datetime import datetime\n\nTIMESTAMP = datetime.now().strftime(\"%Y%m%d%H%M%S\")",
"Authenticate your Google Cloud account\nIf you are using Vertex AI Workbench managed notebooks or user-managed notebooks, your environment is already authenticated. Skip this step.\nIf you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.\nOtherwise, follow these steps:\nIn the Cloud Console, go to the Create service account key page.\nClick Create service account.\nIn the Service account name field, enter a name, and click Create.\nIn the Grant this service account access to project section, click the Role drop-down list. Type \"Vertex AI\" into the filter box, and select Vertex AI Administrator. Type \"Storage Object Admin\" into the filter box, and select Storage Object Admin.\nClick Create. A JSON file that contains your key downloads to your local environment.\nEnter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.",
"import os\nimport sys\n\n# If you are running this notebook in Colab, run this cell and follow the\n# instructions to authenticate your GCP account. This provides access to your\n# Cloud Storage bucket and lets you submit training jobs and prediction\n# requests.\n\n# The Google Cloud Notebook product has specific requirements\nIS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists(\"/opt/deeplearning/metadata/env_version\")\n\n# If on Google Cloud Notebooks, then don't execute this code\nif not IS_GOOGLE_CLOUD_NOTEBOOK:\n if \"google.colab\" in sys.modules:\n from google.colab import auth as google_auth\n\n google_auth.authenticate_user()\n\n # If you are running this notebook locally, replace the string below with the\n # path to your service account key and run this cell to authenticate your GCP\n # account.\n elif not os.getenv(\"IS_TESTING\"):\n %env GOOGLE_APPLICATION_CREDENTIALS ''",
"Import required libraries and define constants",
"import os\nimport random\nfrom typing import Dict, List, Optional, Sequence, Tuple, Union\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom google.cloud import aiplatform, storage\nfrom wordcloud import STOPWORDS, WordCloud\n\nLOCATION = \"us-central1\"\nBUCKET_NAME = \"{your_bucket_name}\"",
"Load the data\n<a name=\"section-5\"></a>",
"phrases = pd.read_csv(\n \"gs://vertex_ai_managed_services_demo/sentiment_analysis/stanfordSentimentTreebank/dictionary.txt\",\n sep=\"|\",\n)\nphrases.columns = [\"text\", \"phrase ids\"]\nscores = pd.read_csv(\n \"gs://vertex_ai_managed_services_demo/sentiment_analysis/stanfordSentimentTreebank/sentiment_labels.txt\",\n sep=\"|\",\n)\ndf = phrases.merge(scores, how=\"left\", on=\"phrase ids\")\nprint(df.head(5))\n\nprint(max(df[\"sentiment values\"]), min(df[\"sentiment values\"]))",
"The data itself doesn't contain any feature names and thus needs its columns to be renamed. dictionary.txt contains all phrases and their IDs, separated by a vertical line |. sentiment_labels.txt contains all phrase ids and the corresponding sentiment scores, separated by a vertical line. Four classes are created by mapping the positivity probability using the following cut-offs:\n[0, 0.25], (0.25, 0.5], (0.5, 0.75],(0.75, 1.0]\nCreate labels",
"VERYNEGATIVE = 0\nNEGATIVE = 1\nPOSITIVE = 2\nVERYPOSITIVE = 3\n\nbins = [0, 0.25, 0.5, 0.75, 1]\nlabels = [VERYNEGATIVE, NEGATIVE, POSITIVE, VERYPOSITIVE]\ndf[\"label\"] = pd.cut(df[\"sentiment values\"], bins=bins, labels=labels)\nprint(df.head())",
"Prepare the training data\n<a name=\"section-6\"></a>\nTo train a sentiment analysis model, you provide representative samples of the type of content you want AutoML Natural Language to analyze, each labeled with a value indicating how positive the sentiment is within the content.\nThe sentiment score is an integer ranging from 0 (relatively negative) to a maximum value of your choice (positive). For example, if you want to identify whether the sentiment is negative, positive, or neutral, you would label the training data with sentiment scores of 0 (negative), 1 (neutral), and 2 (positive). If you want to capture more granularity with five levels of sentiment, you still label documents with the most negative sentiment as 0 and use 4 for the most positive sentiment. The Maximum sentiment score (sentiment_max) for the dataset would be 4.\nSelect a subset of the orginal data to train on that consists of extreme positive and negative samples. Here the maximum sentiment would be 1. In the <i>ML use</i> column you could provide if it is a TRAIN/VALIDATION/TEST sample or let the Vertex AI randomly assign. \nEach line in a CSV file refers to a single document. The following example shows the general format of a valid CSV file. The ml_use column is optional.\n[ml_use],gcs_file_uri|\"inline_text\",sentiment,sentimentMax\nFor more information visit the official documentation\nSelect a subset of the data",
"subset_data = df[df[\"label\"].isin([VERYNEGATIVE, VERYPOSITIVE])].reset_index(drop=True)\nsubset_data.head()\n\nsubset_data[\"label\"] = subset_data[\"label\"].apply(lambda x: 1 if x == 3 else 0)\nsubset_data[\"ml_use\"] = \"\"\nsubset_data[\"sentimentMax\"] = 1\nsubset_data = subset_data[[\"ml_use\", \"text\", \"label\", \"sentimentMax\"]]\nprint(subset_data.head())",
"Create an import csv",
"FILE_NAME = \"sentiment_data.csv\"\nsubset_data.to_csv(FILE_NAME, index=False)\n# Upload the saved model file to Cloud Storage\nBLOB_PATH = \"sentiment_analysis/\"\nBLOB_NAME = os.path.join(BLOB_PATH, FILE_NAME)\nbucket = storage.Client().bucket(BUCKET_NAME)\nblob = bucket.blob(BLOB_NAME)\nblob.upload_from_filename(FILE_NAME)",
"Create a dataset in Vertex AI\n<a name=\"section-7\"></a>\nThe following code uses the Vertex AI SDK for Python to both create a dataset and import data.",
"def import_data_text_sentiment_analysis(\n project: str,\n location: str,\n display_name: str,\n src_uris: Union[str, List[str]],\n sync: bool = True,\n):\n aiplatform.init(project=project, location=location)\n\n ds = aiplatform.TextDataset.create(\n display_name=display_name,\n gcs_source=src_uris,\n import_schema_uri=aiplatform.schema.dataset.ioformat.text.sentiment,\n sync=sync,\n )\n\n ds.wait()\n\n print(ds.display_name)\n print(ds.resource_name)\n return ds\n\ndisplay_name = \"sentimentanalysis\"\nsrc_uris = [f\"gs://{BUCKET_NAME}/sentiment_analysis/sentiment_data.csv\"]\ndataset = import_data_text_sentiment_analysis(\n PROJECT_ID, LOCATION, display_name, src_uris\n)",
"Train the model using Vertex AI\n<a name=\"section-8\"></a>\nThe following code uses the Vertex AI SDK for Python to train the model on the above created dataset. You can get the dataset id from the Dataset section of Vertex AI in the console or from the resource name in the dataset object created above. You can specify how the training data is split between the training, validation, and test sets by setting the fraction_split variables.",
"def create_training_pipeline_text_sentiment_analysis(\n project: str,\n location: str,\n display_name: str,\n dataset_id: str,\n model_display_name: Optional[str] = None,\n sentiment_max: int = 10,\n training_fraction_split: float = 0.8,\n validation_fraction_split: float = 0.1,\n test_fraction_split: float = 0.1,\n sync: bool = True,\n):\n aiplatform.init(project=project, location=location)\n\n job = aiplatform.AutoMLTextTrainingJob(\n display_name=display_name,\n prediction_type=\"sentiment\",\n sentiment_max=sentiment_max,\n )\n\n text_dataset = aiplatform.TextDataset(dataset_id)\n\n model = job.run(\n dataset=text_dataset,\n model_display_name=model_display_name,\n training_fraction_split=training_fraction_split,\n validation_fraction_split=validation_fraction_split,\n test_fraction_split=test_fraction_split,\n sync=sync,\n )\n\n model.wait()\n\n print(model.display_name)\n print(model.resource_name)\n print(model.uri)\n return model\n\ndisplay_name = \"sentimentanalysis\"\ndataset_id = dataset.resource_name.split(\"/\")[-1]\nprint(dataset_id)\nmodel = create_training_pipeline_text_sentiment_analysis(\n PROJECT_ID, LOCATION, display_name, dataset_id, sentiment_max=1\n)",
"Deploy the model to the endpoint\n<a name=\"section-9\"></a>\nCreate the endpoint",
"def create_endpoint(\n project: str,\n display_name: str,\n location: str,\n):\n aiplatform.init(project=project, location=location)\n\n endpoint = aiplatform.Endpoint.create(\n display_name=display_name,\n project=project,\n location=location,\n )\n\n print(endpoint.display_name)\n print(endpoint.resource_name)\n return endpoint\n\ndisplay_name = \"sentiment-analysis\"\nendpoint = create_endpoint(PROJECT_ID, display_name, LOCATION)",
"Deploy the model\nThe following code uses the Vertex AI SDK for Python to deploy the model to a endpoint. You can get the model id from the models section of Vertex AI in the console",
"def deploy_model_with_automatic_resources(\n project,\n location,\n model_name: str,\n endpoint: Optional[aiplatform.Endpoint] = None,\n deployed_model_display_name: Optional[str] = None,\n traffic_percentage: Optional[int] = 0,\n traffic_split: Optional[Dict[str, int]] = None,\n min_replica_count: int = 1,\n max_replica_count: int = 1,\n metadata: Optional[Sequence[Tuple[str, str]]] = (),\n sync: bool = True,\n):\n \"\"\"\n model_name: A fully-qualified model resource name or model ID.\n Example: \"projects/123/locations/us-central1/models/456\" or\n \"456\" when project and location are initialized or passed.\n \"\"\"\n\n aiplatform.init(project=project, location=location)\n\n model = aiplatform.Model(model_name=model_name)\n model.deploy(\n endpoint=endpoint,\n )\n model.wait()\n print(model.display_name)\n print(model.resource_name)\n return model\n\nmodel_id = \"\"\nmodel = deploy_model_with_automatic_resources(PROJECT_ID, LOCATION, model_id, endpoint)",
"Prediction\n<a name=\"section-10\"></a>\nAfter deploying the model to an endpoint use the Vertex AI API to request an online prediction. Filter the data that you haven't used for the training and pick longer reviews to test the model.",
"def predict_text_sentiment_analysis_sample(endpoint, content):\n print(content)\n response = endpoint.predict(instances=[{\"content\": content}], parameters={})\n\n for prediction_ in response.predictions:\n print(prediction_)\n\ntest_data_pos = df[df[\"label\"].isin([POSITIVE])].reset_index(drop=True)\ntest_data_neg = df[df[\"label\"].isin([NEGATIVE])].reset_index(drop=True)\n\ntest_data_neg = test_data_neg.text.values\ntest_data_neg = [i for i in test_data_neg if len(i) > 200]\nrandom.shuffle(test_data_neg)\n\ntest_data_pos = test_data_pos.text.values\ntest_data_pos = [i for i in test_data_pos if len(i) > 200]\nrandom.shuffle(test_data_pos)",
"Here is the prediction results on the positive samples. The model did a good job on predicting positive sentiment for positive reviews. The first and last review predictions are false negatives.",
"for review in test_data_pos[0:10]:\n predict_text_sentiment_analysis_sample(endpoint, review)",
"Here is the prediction results on the negative reviews. Out of 10 reviews below 7 negative reviews are correctly predicted with negative sentiment",
"for review in test_data_neg[0:10]:\n predict_text_sentiment_analysis_sample(endpoint, review)",
"Review visualization\n<a name=\"section-11\"></a>\nVisualize the positive and negative reviews in the data.",
"data_pos = df[df[\"label\"].isin([VERYPOSITIVE])].reset_index(drop=True)\ndata_neg = df[df[\"label\"].isin([VERYNEGATIVE])].reset_index(drop=True)\n\ndata_neg = data_neg.text.values\n\ndata_pos = data_pos.text.values",
"Create the word cloud by removing the common words to highlight the words representing positive and negative samples.",
"# Python program to generate WordCloud\ndef plot_word_cloud(data, common_words):\n comment_words = \"\"\n stopwords = set(STOPWORDS)\n for val in data:\n tokens = val.split()\n for i in range(len(tokens)):\n tokens[i] = tokens[i].lower()\n for each in common_words:\n if each in tokens[i]:\n tokens[i] = \"\"\n break\n\n comment_words += \" \".join(tokens) + \" \"\n\n wordcloud = WordCloud(\n width=800,\n height=800,\n background_color=\"white\",\n stopwords=stopwords,\n min_font_size=10,\n ).generate(comment_words)\n\n plt.figure(figsize=(8, 8), facecolor=None)\n plt.imshow(wordcloud)\n plt.axis(\"off\")\n plt.tight_layout(pad=0)\n\n plt.show()",
"Plot a word cloud of negative reviews.",
"plot_word_cloud(\n data_neg,\n [\n \"movie\",\n \"film\",\n \"story\",\n \"audience\",\n \"director\",\n \"watch\",\n \"seem\",\n \"world\",\n \"one\",\n \"make\",\n \"way\",\n \"character\",\n \"much\",\n \"time\",\n \"even\",\n \"take\",\n \"s\",\n \"n't\",\n \"will\",\n \"may\",\n \"re\",\n \"plot\",\n \"good\",\n \"comedy\",\n \"made\",\n ],\n)",
"Plot a word cloud of positive reviews.",
"plot_word_cloud(\n data_pos,\n [\n \"movie\",\n \"film\",\n \"story\",\n \"audience\",\n \"director\",\n \"watch\",\n \"seem\",\n \"world\",\n \"one\",\n \"make\",\n \"way\",\n \"character\",\n \"much\",\n \"time\",\n \"even\",\n \"take\",\n \"s\",\n \"n't\",\n \"will\",\n \"may\",\n \"re\",\n \"plot\",\n \"made\",\n ],\n)",
"Clean up\n<a name=\"section-12\"></a>\nUndeploy the model from the endpoint.",
"DEPLOYED_MODEL_ID = \"\"\nendpoint.undeploy(deployed_model_id=DEPLOYED_MODEL_ID)",
"Delete the endpoint.",
"endpoint.delete()",
"Delete the dataset.",
"dataset.delete()",
"Delete the model.",
"model.delete()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
xpharry/Udacity-DLFoudation
|
tutorials/sentiment_network/Sentiment Classification - Project 1 Solution.ipynb
|
mit
|
[
"Sentiment Classification & How To \"Frame Problems\" for a Neural Network\nby Andrew Trask\n\nTwitter: @iamtrask\nBlog: http://iamtrask.github.io\n\nWhat You Should Already Know\n\nneural networks, forward and back-propagation\nstochastic gradient descent\nmean squared error\nand train/test splits\n\nWhere to Get Help if You Need it\n\nRe-watch previous Udacity Lectures\nLeverage the recommended Course Reading Material - Grokking Deep Learning (40% Off: traskud17)\nShoot me a tweet @iamtrask\n\nTutorial Outline:\n\n\nIntro: The Importance of \"Framing a Problem\"\n\n\nCurate a Dataset\n\nDeveloping a \"Predictive Theory\"\n\nPROJECT 1: Quick Theory Validation\n\n\nTransforming Text to Numbers\n\n\nPROJECT 2: Creating the Input/Output Data\n\n\nPutting it all together in a Neural Network\n\n\nPROJECT 3: Building our Neural Network\n\n\nUnderstanding Neural Noise\n\n\nPROJECT 4: Making Learning Faster by Reducing Noise\n\n\nAnalyzing Inefficiencies in our Network\n\n\nPROJECT 5: Making our Network Train and Run Faster\n\n\nFurther Noise Reduction\n\n\nPROJECT 6: Reducing Noise by Strategically Reducing the Vocabulary\n\n\nAnalysis: What's going on in the weights?\n\n\nLesson: Curate a Dataset",
"def pretty_print_review_and_label(i):\n print(labels[i] + \"\\t:\\t\" + reviews[i][:80] + \"...\")\n\ng = open('reviews.txt','r') # What we know!\nreviews = list(map(lambda x:x[:-1],g.readlines()))\ng.close()\n\ng = open('labels.txt','r') # What we WANT to know!\nlabels = list(map(lambda x:x[:-1].upper(),g.readlines()))\ng.close()\n\nlen(reviews)\n\nreviews[0]\n\nlabels[0]",
"Lesson: Develop a Predictive Theory",
"print(\"labels.txt \\t : \\t reviews.txt\\n\")\npretty_print_review_and_label(2137)\npretty_print_review_and_label(12816)\npretty_print_review_and_label(6267)\npretty_print_review_and_label(21934)\npretty_print_review_and_label(5297)\npretty_print_review_and_label(4998)",
"Project 1: Quick Theory Validation",
"from collections import Counter\nimport numpy as np\n\npositive_counts = Counter()\nnegative_counts = Counter()\ntotal_counts = Counter()\n\nfor i in range(len(reviews)):\n if(labels[i] == 'POSITIVE'):\n for word in reviews[i].split(\" \"):\n positive_counts[word] += 1\n total_counts[word] += 1\n else:\n for word in reviews[i].split(\" \"):\n negative_counts[word] += 1\n total_counts[word] += 1\n\npositive_counts.most_common()\n\npos_neg_ratios = Counter()\n\nfor term,cnt in list(total_counts.most_common()):\n if(cnt > 100):\n pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)\n pos_neg_ratios[term] = pos_neg_ratio\n\nfor word,ratio in pos_neg_ratios.most_common():\n if(ratio > 1):\n pos_neg_ratios[word] = np.log(ratio)\n else:\n pos_neg_ratios[word] = -np.log((1 / (ratio+0.01)))\n\n# words most frequently seen in a review with a \"POSITIVE\" label\npos_neg_ratios.most_common()\n\nreversed(pos_neg_ratios.most_common())\n\n# words most frequently seen in a review with a \"NEGATIVE\" label\nlist(reversed(pos_neg_ratios.most_common()))[0:30]"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
daniestevez/jupyter_notebooks
|
VoyagerGBT.ipynb
|
gpl-3.0
|
[
"Polarization in Voyager signal from Green Bank Telescope\nHere we extract the carrier signal from Voyager 1 from a recording done at Green Bank Telescope during the Breakthrough Listen experiment. We compute the Stokes parameters for the signal.",
"%matplotlib inline\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport collections\n\n# Larger figure size\nfig_size = [12, 9]\nplt.rcParams['figure.figsize'] = fig_size\n\nplt.rcParams['agg.path.chunksize'] = 10000 # avoids error on plots with many lines https://github.com/matplotlib/matplotlib/issues/5907",
"The raw recordings from Green Bank Telescope can be downladed from the Breakthrough Listen Open Data Archive. Here we are using this recording, which was made on 2015-12-30 00:00:00. You can find an analysis of this recording in the breakthrough Github repository.\nThe raw recordings are in GUPPI format. Below we have a simple class to read this format. For simplicity, the metadata of the file has to be specified, instead of being read from the file headers. A more complete implementation of a GUPPI file reader can be found here.",
"class GuppiFile():\n \"\"\"A class to read data from a file in GUPPI format\"\"\"\n # we only support 8bit files\n def __init__(self, filename, header_len, blocsize, npol, nchan, centre_freq, chan_bw):\n self.f = open(filename, 'rb')\n self.header_len = header_len\n self.blocsize = blocsize\n self.npol = npol\n self.nchan = nchan\n self.ntime = blocsize // (2 * npol * nchan)\n self.centre_freq = centre_freq\n self.chan_bw = chan_bw\n self.chan_len = 2 * npol * self.ntime\n \n def get_channel(self, freq):\n \"\"\"Return the channel number to which the frequency freq (in Hz) belongs\"\"\"\n return self.nchan - 1 - int((freq - self.centre_freq + self.chan_bw * self.nchan // 2) / self.chan_bw)\n \n def read_block_from_channel(self, block, channel):\n \"\"\"Read and return a given block from a channel\"\"\"\n self.f.seek((block + 1) * self.header_len + (block * self.nchan + channel) * self.chan_len)\n b = np.fromfile(self.f, dtype = np.int8, count = self.chan_len)\n b = b[::2] - 1j * b[1::2] # Convert to complex and invert frequency\n return b.reshape((self.npol, self.ntime), order = 'F')\n \n def centre_frequency(self, channel):\n \"\"\"Return the centre frequency for a given channel\"\"\"\n return self.centre_freq + (self.nchan // 2 - channel) * self.chan_bw",
"To run the code below, make sure that you have downloaded the GUPPI file and placed it in the same folder as this notebook. This opens the GUPPI file with the correct parameters.",
"guppi = GuppiFile('blc3_guppi_57386_VOYAGER1_0004.0000.raw',\n header_len = 0x1900, blocsize = 132251648,\n npol = 2, nchan = 64,\n centre_freq = 8493.75e6,\n chan_bw = 2.9296875e6)\n\nvoyager_freq = 8420.2164537e6\nvoyager_chan = guppi.get_channel(voyager_freq)",
"Now we read the first 50 blocks of the file, compute an FFT around the frequency of the Voyager 1 signal and show the power spectrums for both polarizations. Note that the Voyager 1 signal is left circularly polarized, so all the signal is in the first plot. The carrier and data sidebands are visible.",
"signal = np.concatenate([guppi.read_block_from_channel(x, voyager_chan) for x in range(50)], axis = 1)\nnsamples = signal.shape[1]\n\n# Amplitude balance between channels (to be used later)\namplitude_balance = np.sqrt(np.sum(np.abs(signal)**2, axis = 1))[::-1]\n\nhz_per_bin = guppi.chan_bw / nsamples\ncentre_bin = nsamples // 2 + round((voyager_freq - guppi.centre_frequency(voyager_chan))/hz_per_bin)\nwidth = 60e3\nbin_range = round(width/2/hz_per_bin)\n\nwindow = np.hamming(nsamples)\nfft = np.fft.fftshift(np.fft.fft(signal * window), axes = 1)[..., centre_bin - bin_range : centre_bin + bin_range]\n\n# delete unneeded variables to free some RAM\ndel signal\ndel window\n\npower_spectrum = np.abs(fft)**2\nfrequencies = np.fft.fftshift(np.fft.fftfreq(nsamples, 1/guppi.chan_bw))[centre_bin - bin_range : centre_bin + bin_range] + guppi.centre_frequency(voyager_chan)\nplt.title('LCP power spectrum')\nplt.xlabel('Frequency (Hz)')\nplt.ylabel('Power (dB)')\nplt.plot(frequencies, 10 * np.log10(power_spectrum[0,...]));\n\nplt.title('RCP power spectrum')\nplt.xlabel('Frequency (Hz)')\nplt.ylabel('Power (dB)')\nplt.plot(frequencies, 10 * np.log10(power_spectrum[1,...]));",
"Note that the two polarization channels have different gain. The noise floor in the left channel is around 110dB, while the noise floor in the right channel is around 100dB. To compensate for this, we multiply each channel by the average power of the other channel. This ensures that both channels have the same power.",
"corrected_fft = np.array(np.diag(amplitude_balance) * np.mat(fft))\ndel fft",
"Now we select a frequency range of 10Hz about the Voyager carrier. We will compute the Stokes parameters using this frequency range as a signal in the frequency domain.",
"width = 10\nselection = np.logical_and(voyager_freq - width/2 <= frequencies, frequencies <= voyager_freq + width/2)\nplt.title('Voyager 1 carrier (left is LCP, green is RCP)')\nplt.xlabel('Frequency (Hz)')\nplt.ylabel('Power (dB)')\nplt.plot(frequencies[selection], 20 * np.log10(np.abs(corrected_fft)[0,...][selection]))\nplt.plot(frequencies[selection], 20 * np.log10(np.abs(corrected_fft)[1,...][selection]));",
"The class below computes the Stokes parameters of a signal in (LCP, RCP) polarization. The signal can be in the time domain or in the frequency domain.",
"StokesParameters = collections.namedtuple('StokesParameters', ['I', 'Q', 'U', 'V'])\n\ndef compute_stokes_LR(x):\n \"\"\"Compute stokes parameters of signal in time or frequency domain in dual (LCP,RCP) polarization\"\"\"\n def combine_powers(x, coeffs):\n return np.dot(np.sum(np.abs(x)**2, axis = 1), coeffs)\n def transform(A, x):\n return np.array(A * np.mat(x))\n \n V_to_Q = np.matrix([[1/np.sqrt(2), 1/np.sqrt(2)], [1/(1j*np.sqrt(2)), -1/(1j*np.sqrt(2))]])\n Q_to_U = np.matrix([[1/np.sqrt(2), 1/np.sqrt(2)], [1/np.sqrt(2), -1/np.sqrt(2)]])\n \n I = combine_powers(x, [1,1])\n V = combine_powers(x, [1,-1])\n Q = combine_powers(transform(V_to_Q, x), [1,-1])\n U = combine_powers(transform(Q_to_U*V_to_Q, x), [1,-1])\n \n return StokesParameters(I,Q, U, V)",
"We use the class above to compute the Stokes parameters of the Voyager 1 carrier. We expect to obtain parameters matching those of a LCP signal (V positive and near I, Q and U small).",
"stokes = compute_stokes_LR(corrected_fft[...,selection])\nstokes",
"Now we can compute Ip, the polarized intensity, and the polarization degree. We see that the signal is highly polarized.",
"Ip = np.sqrt(stokes.V**2 + stokes.Q**2 + stokes.U**2)\npolarization_degree = Ip/stokes.I\npolarization_degree",
"Finally, we compute L, the complex intensity of linear polarization and show the degree of linear polarization (which is small, as expected) and the angle of the linear polarization.",
"L = stokes.Q + 1j*stokes.U\n(np.abs(L)/Ip, np.angle(L)/2*180/np.pi)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
christopherkullenberg/openscienceliterature
|
openscienceliterature.ipynb
|
mit
|
[
"<!-- Some HTML for ncie picture -->\n<p>\n <a href=\"https://commons.wikimedia.org/wiki/File:Open_Science_-_Prinzipien.png#/media/File:Open_Science_-_Prinzipien.png\"><img src=\"https://upload.wikimedia.org/wikipedia/commons/9/9c/Open_Science_-_Prinzipien.png\" alt=\"Open Science - Prinzipien.png\" width=\"400\"></a>\n <br>\n</p>\n\n<center>Image by Andreas E. Neuhold, CC BY 3.0</center>\nBibliometric probing of the concept 'open science' - a notebook\nby Christopher Kullenberg<sup>1</sup> (2017)\nE-mail: christopher#kullenberg§gu#se \nAbstract. To date there is only a small number of scientific articles that have been written on the topic of \"open science\", at least when considering the recent trend in science policy and public discourse. The Web of Science only contains 544 records with the term \"open science\" in the title, keywords or abstract, and Scopus ammounts to 769 records. Based on article keywords, open science is primarily connected with the concepts of \"open access\", \"open data\", \"reproducible research\", and \"data sharing\". It occurs most frequently in biomedical-, interdisciplinary- and natural science journals, however, some of the most cited articles have been published in science policy journals.\nContents\n\nIntroduction\nMethod\nSoftware\nMain results, a summary\nThe semantic connections: keywords\nMost cited articles\nMost common journals\nCode, data and figures. \nWeb of Science, recursion 1. \nScopus, recursion 1. \nScopus, recursion 2.\n\n1. Introduction\nThe purpose of this notebook is to produce an overview of scientific literature related to the phenomenon of open science. This is a preliminary research note, not a definite result. Feel free to use and modify this notebook in anyway you want, as long as you cite it as follows:\n<sup>1</sup> Kullenberg, Christopher (2017) \"Bibliometric probing of the concept 'open science' - a notebook\", https://github.com/christopherkullenberg/openscienceliterature, accessed YY-MM-DD. \nThis notebook uses bibliometric data from the Web of Science and Scopus databases. Since these databases are proprietary and require a subscription, the source data cannot be redistributed here. However, the search strings and dates of retrieval are noted each time a dataset is used, so if you have access to these databases, it should be quite possible to replicate every step. \n2. Method\nSince open science is very loosely defined and can have multiple meanings, it is difficult to craft a precise search string in the bibliometric databases. Thus, in this notebook, I probe recursively into the Web of Science and Scopus databases with recursive searches (See Kullenberg & Kasperowski 2016) to generate search terms that can be relevant for getting a better picture of the open science phenomenon. \n|Database|Records|\n| --- | --- |\n| Web of Science | 544 |\n| Scopus 1st search| 769 |\n| Scopus 2nd search| 14.146 |\nThe recursive search works as follows. First each database is queried in the abstract, title and keywords with the search term \"open science\". Then the keywords of this first database is analysed in terms of frequency and co-occurrence. This gives a first overview of what other keywords are relevant. \nIn the next step the keywords from the first iteration are inspected and sorted. I filter out those keywords that are too general in scope, for example \"open source\" and \"reproducibility\", in order to craft a more precise combination of search terms. This is a qualitative process, so for every keyword, I make choices on what is relevant and what is not. As this notebook is explorative in nature, this should be regarded as an interpretative practice, where one might return at a later stage and then make other decisions due to increased knowledge about the field. \nA. Software\nThis Python 3 notebook uses the Pandas dataframe as a method for parsing the bibliometric records into a convenient data structure. For plotting, the Seaborn high-level library for interfacing with Matplotlib is used. Wielding such software for bibliometric analyses might seem a bit strange, but there are many advantages when it comes to reproducibility, since every step can be traced and the software is free. To install all software swiftly, I \nrecommend warmly the Anaconda distribution. \n3. Main results, a summary\nAs the figure below shows, the notion of \"open science\" is practically non-existent before the turn of the millennium (in quantitative terms). During the past two or tree years, however, the concept seems to have gained a little traction. Note - the slight decrease in publications for 2016 is only a \"lag effect\" of the update process of the databases.",
"maketimeseries() # Load this function from bottom of notebook to print.",
"A. The semantic connections: keywords\nBased on a first iterative search on \"open science\", the term frequently occurs in connection with:\n\nopen access\nopen data\nreproducible research\ndata sharing\nreproducibility\nbig data\ncollaboration\nmetadata\nmeta-analysis\nresearch data\n\nHere it is reasonable to conclude that \"open science\" is largely defined from an epistemic point of view, as these terms refer to the scientific practice of sharing research findings and data openly (open access, open data), ensuring the reproducibility of research and achieving a better collaboration between scientists. Also, the notions of \"meta-data\" and \"meta-analysis\" indicate a concern for standardizing data for the purpose of aggregation (big data). \nB. Most cited articles\nFinding the most cited articles can be a good way of finding obligatory points of passage in the literature. However, one must be careful not to draw too far-fetched conclusions from such numbers. After all, the practice of citation cannot be reduced to a single motivational factor. \nWeb of Science, 10 most cited articles.\n| Author | Year | Title | Journal | Times Cited | DOI/URL |\n| --- | --- | --- | --- | --- | --- |\n| Nooner, KB; Colcombe, SJ; Tobe, RH; Mennes, M;... | 2012.0 | The NKI-Rockland sample: a model for accelerat... | FRONTIERS IN NEUROSCIENCE | 82.0 | https://dx.doi.org/10.3389/fnins.2012.00152\n| Fabrizio, KR; Di Minin, A | 2008.0 | Commercializing the laboratory: Faculty patent... | RESEARCH POLICY | 81.0 | https://dx.doi.org/10.1016/j.respol.2008.01.010\n| Castellanos, FX; Di Martino, A; Craddock, RC; ... | 2013.0 | Clinical applications of the functional connec... | NEUROIMAGE | 79.0 | https://dx.doi.org/10.1016/j.neuroimage.2013.04.083\n| Markman, GD; Siegel, DS; Wright, M | 2008.0 | Research and Technology Commercialization | JOURNAL OF MANAGEMENT STUDIES | 75.0 | https://dx.doi.org/10.1111/j.1467-6486.2008.00803.x\n| Newman, G; Wiggins, A; Crall, A; Graham, E; Ne... | 2012.0 | The future of citizen science: emerging techno... | FRONTIERS IN ECOLOGY AND THE ENVIRONMENT | 74.0 | https://dx.doi.org/10.1890/110294\n| Mello, MM; Francer, JK; Wilenzick, M; Teden, P... | 2013.0 | Preparing for Responsible Sharing of Clinical ... | NEW ENGLAND JOURNAL OF MEDICINE | 57.0 | https://dx.doi.org/10.1056/NEJMhle1309073\n| Breschi, S; Catalini, C | 2010.0 | Tracing the links between science and technolo... | RESEARCH POLICY | 48.0 | https://dx.doi.org/10.1016/j.respol.2009.11.004\n| Procter, R; Williams, R; Stewart, J; Poschen, ... | 2010.0 | Adoption and use of Web 2.0 in scholarly commu... | PHILOSOPHICAL TRANSACTIONS OF THE ROYAL SOCIET... | 44.0 | https://dx.doi.org/10.1098/rsta.2010.0155\n| Mueller, ST; Piper, BJ | 2014.0 | The Psychology Experiment Building Language (P... | JOURNAL OF NEUROSCIENCE METHODS | 43.0 | https://dx.doi.org/10.1016/j.jneumeth.2013.10.024\n| Mennes, M; Biswal, BB; Castellanos, FX; Milham... | 2013.0 | Making data sharing work: The FCP/INDI experience | NEUROIMAGE | 42.0 | https://dx.doi.org/10.1016/j.neuroimage.2012.10.064\nScopus, 10 most cited articles.\n| Author | Year | Title | Journal | Times Cited | DOI/URL |\n| --- | --- | --- | --- | --- | --- |\n| Partha D., David P.A. | 1994 | Toward a new economics of science | Research Policy | 926.0 | https://dx.doi.org/10.1016/0048-7333(94)01002-1 |\n| Mix A.C., Bard E., Schneider R. | 2001 | Environmental processes of the ice age: Land, ... | Quaternary Science Reviews | 479.0 | https://dx.doi.org/10.1016/S0277-3791(00)00145-1 |\n| Balconi M., Breschi S., Lissoni F. | 2004 | Networks of inventors and the role of academia... | Research Policy | 216.0 | https://dx.doi.org/10.1016/S0048-7333(03)00108-2 |\n| Veugelers R., Cassiman B. | 2005 | R&D cooperation between firms and universities... | International Journal of Industrial Organization | 189.0 | https://dx.doi.org/10.1016/j.ijindorg.2005.01.008 |\n| Dosi G., Llerena P., Labini M.S. | 2006 | The relationships between science, technologie... | Research Policy | 173.0 | https://dx.doi.org/10.1016/j.respol.2006.09.012 |\n| Pordes R., Petravick D., Kramer B., Olson D., ... | 2007 | The open science grid | Journal of Physics: Conference Series | 112.0 | https://dx.doi.org/10.1088/1742-6596/78/1/012057 |\n| Agrawal A. | 2006 | Engaging the inventor: Exploring licensing str... | Strategic Management Journal | 111.0 | https://dx.doi.org/10.1002/smj.508 |\n| Markman G.D., Siegel D.S., Wright M. | 2008 | Research and technology commercialization | Journal of Management Studies | 110.0 | https://dx.doi.org/10.1111/j.1467-6486.2008.00803.x |\n| Nooner K.B., Colcombe S.J., Tobe R.H., Mennes ... | 2012 | The NKI-Rockland sample: A model for accelerat... | Frontiers in Neuroscience | 105.0 | https://dx.doi.org/10.3389/fnins.2012.00152 |\n| Fabrizio K.R., Di Minin A. | 2008 | Commercializing the laboratory: Faculty patent... | Research Policy | 92.0 | https://dx.doi.org/10.1016/j.respol.2008.01.010\nC. Most common journals\nDepending on whether you query Scopus or Web of Science, you get different results concerning the distribution of records that are conference proceedings or journal articles. Here I have selected to include only journal articles, not conference papers, reviews or opinion pieces. The most common journals are: \n\nElife\nResearch policy\nPlos ONE\nJournal of Technology Transfer\nPeerj\nScience\n\nConclusion: The dominant fields for \"open science\" appears to be biomedical-, multidisciplinary-, and natural sciences. \n\n4. Code and Data",
"# General libraries\n%matplotlib inline\nimport pandas as pd\nimport warnings\nwarnings.simplefilter(action = \"ignore\", category = FutureWarning) # Supress some meaningless warnings. \n#from tabulate import tabulate\nfrom collections import Counter\nimport seaborn as sns\nimport numpy as np\nfrom itertools import combinations\nimport matplotlib.pyplot as plt\nfrom matplotlib.pylab import rcParams\nrcParams['figure.figsize'] = 12, 7 # Make figures a little bigger\n\n# Load up this cell to enable the keywordcooccurrence function. \ndef cooccurrence(column):\n '''\n Input: a dataframe column containing keywords that are separated by semicolons\n Example: df.DE (Web of Science) or df[Author keywords] (Scopus)\n Output: A list of co-occurring keywords that can be ranked with the Counter function\n '''\n cooccurrencelist = []\n for keyword in column:\n k = str(keyword)\n keywordsperarticle = k.split('; ')\n keywordsperarticle = [word.lower() for word in keywordsperarticle] # Lowers all keywords in each list.\n cooccurrence = list(combinations(keywordsperarticle, 2))\n for c in cooccurrence:\n cooccurrencelist.append(c)\n return(cooccurrencelist)\n\n# This function returns a list of journals that can be easily counted\ndef frequentjournals(column):\n '''\n Input: a dataframe column containing journal names.\n Example: df.SO (Web of Science) or df[Source title] (Scopus)\n Output: A list of journal names that can be ranked with the Counter function\n '''\n journallist = []\n for journal in column:\n #print(len(journal))\n journallist.append(journal.lower()) # Lower names. Looks bad, but computes well. \n return(journallist)\n",
"A. Web of Science - Recursion 1.\nSearch details\nDate: \n20170121, General Search. \nSearch string: \nTS=\"open science\"\nResult:\n544 Records. \nImportant note on parsing the WoS tsv files\nDelete the column BJ since it is erronneous. The separator is tab, and there is no delimiter.",
"#fel data , error_bad_lines=False\ndf = pd.read_csv('.data/WoS549recs20170121.tsv', sep=\"\\t\", encoding='utf-8') # Input: web of science tsv file, utf-8 encoding. \n\ndf.head(3)\n\n# Print this for explanation of WoS columns\nwoskeyfile = open('woskeys.txt')\nwoskeys = woskeyfile.read()\n#print(woskeys)\n\ndfTC = df.sort('TC', ascending=False) # Order dataframe by times cited\n\ndfTC[['AU', 'PY', 'TI', 'SO', 'TC', 'DI']].head(10) # Ten most cited articles. \n\npublicationyears = sns.factorplot('PY', data=df, kind='count', size=10, aspect=2)",
"Keyword analysis",
"# Read all keywords into a list. \nallkeywords = []\nfor keyword in df.DE:\n k = str(keyword)\n keywordsperarticle = k.split('; ')\n for word in keywordsperarticle:\n allkeywords.append(word.lower()) # make all lower case for better string matching. \n \nprint(\"Total number of keywords: \" + str(len(allkeywords)))\n\n# Find the most common keywords\ncommonkeywords = Counter(allkeywords).most_common(10) # Increase if you want more results\nfor word in commonkeywords:\n if word[0] != \"nan\": # Clean out empty fields. \n print(word[0] + \"\\t\" + str(word[1]))\n\nCounter(cooccurrence(df.DE)).most_common(10)\n\nkeywordsDF = pd.DataFrame(commonkeywords, columns=[\"keyword\", \"freq\"])\n# Plot figure while excluding \"nan\" values in Dataframe\nkeywordsWoS = sns.factorplot(x='keyword', y='freq', kind=\"bar\", data=keywordsDF[keywordsDF.keyword.str.contains(\"nan\") == False], size=8, aspect=2)\nkeywordsWoS.set_xticklabels(rotation=45)",
"Journal analysis",
"# To get only journal articles, select df.SO[df['PT'] == 'J']\nfor journal in Counter(frequentjournals(df.SO[df['PT'] == 'J'] )).most_common(10):\n print(journal)",
"Scopus - Recursive search: Iteration 1\nSearch date:\n20170120 (TITLE-ABS-KEY\"\nSearch string:\n\"open science\"\nRecords:\n769",
"df2 = pd.read_csv('.data/scopusRecursionOne769recs20170120.csv', encoding=\"utf-8\")\n\n# Print this for Scopus column names\n#for header in list(df2.columns.values):\n# print(header)\n#df2['Document Type']\n\ndf2TC = df2.sort('Cited by', ascending=False) # Order dataframe by times cited\ndf2TC.tail(3)\n\n# NOTE: there is a cryptic character in front of the Authors column: \n# Sometimes 'Authors' works, sometimes 'Authors', depending on system locale settings. \ndf2TC[['Authors', 'Year', 'Title', 'Source title', 'Cited by', 'DOI']].head(10) # Ten most cited articles. \n\n# Create a time series of the publications. Some data cleaning is needed: \ndf2TCdropna = df2TC.Year.dropna() # Drop empty values in years\ndf2TCyears = pd.DataFrame(df2TCdropna.astype(int)) # Convert existing years to integers, make new dataframe\npublicationyearsScopus = sns.factorplot('Year', data=df2TCyears, kind='count', size=8, aspect=2)\n\n# Read all keywords into a list. \nallscopuskeywords = []\nfor keyword in df2['Author Keywords']:\n k = str(keyword)\n keywordsperarticle = k.split('; ')\n for word in keywordsperarticle:\n allscopuskeywords.append(word.lower()) # make all lower case for better string matching. \n \nprint(\"Total number of keywords: \" + str(len(allkeywords)))\n\n# Find the most common keywords\ncommonscopuskeywords = Counter(allscopuskeywords).most_common(20) # Increase if you want more results\nfor word in commonscopuskeywords:\n if word[0] != \"nan\": # Clean out empty fields. \n print(word[0] + \"\\t\" + str(word[1]))\n\n# Get co-occurrences\nCounter(cooccurrence(df2['Author Keywords'])).most_common(10)\n\nkeywordsScopusDF = pd.DataFrame(commonscopuskeywords, columns=[\"keyword\", \"freq\"])\n# Plot figure while excluding \"nan\" values in Dataframe\nkeywordsScP = sns.factorplot(x='keyword', y='freq', kind=\"bar\", data=keywordsScopusDF[keywordsScopusDF.keyword.str.contains(\"nan\") == False], size=6, aspect=2)\nkeywordsScP.set_xticklabels(rotation=45)\nkeywordsScP.fig.text(0.65, 0.7, \"Scopus - Recursion 1:\\nSearchstring: \\\n'open science'\\nMost frequent keywords\\nN=769\", ha ='left', fontsize = 15)",
"Journal analysis",
"# For journal articles only: df2['Source title'][df2['Document Type'] == 'Article']\nfor journal in Counter(frequentjournals(df2['Source title'][df2['Document Type'] == 'Article'])).most_common(10):\n print(journal)",
"C. Scopus Recursive search: Iteration 2\nScopus Search date:\n20170120 (TITLE-ABS-KEY)\nNote: Scopus will not include Author Keywords when exporting this large ammount of data. \nSearch string:\nIncluded:\n\"open science\" OR \"data sharing\" OR \"open data\" OR \"open science grid\"\nExcluded\n* \"open access\" - Too broad, gets around 40k hits. \n* \"open source\" - Too broad.\n* \"reproducibility\" - Too broad.\n* \"big data\" - Too broad.\n* \"collaboration\" - Too broad.\n* ... \nRecords:\n14.146",
"df3 = pd.read_csv('.data/scopusRecursionTwo14146recs20170120.csv')\n\ndf3.tail(3) # Verify all data is there. \n\n# Create a time series of the publications. Some data cleaning is needed: \ndf3dropna = df3.Year.dropna() # Drop empty values in years\ndf3years = pd.DataFrame(df3dropna.astype(int)) # Convert existing years to integers, make new dataframe\n\npublicationyearsScopus = sns.factorplot('Year', data=df3years, kind='count', size=8, aspect=2)\npublicationyearsScopus.set_xticklabels(rotation=45)",
"Journal analysis",
"for journal in Counter(frequentjournals(df3['Source title'][df3['Document Type'] == \"Article\"])).most_common(10):\n print(journal)\n\nWoSyears = []\n\nfor year in df.PY.dropna():\n if year > 1990.0:\n WoSyears.append(int(year))\n\n#print(sorted(WoSyears))\n \n\n \n \nScopusyears = []\n\nfor year in df2['Year'].dropna():\n if year > 1990.0:\n Scopusyears.append(year)\n\n\ndfWoSyears = pd.DataFrame.from_dict(Counter(WoSyears), orient='index', dtype=None)\ndfsorted = pd.DataFrame.sort_index(dfWoSyears)\ndfsorted.head()\n\ndfScopusyears = pd.DataFrame.from_dict(Counter(Scopusyears), orient='index', dtype=None)\ndfSsorted = pd.DataFrame.sort_index(dfScopusyears)\ndfSsorted.head()\n\ndef maketimeseries():\n plt.title('\"Open science\" - Published articles and proceedings, 1990-2016', fontsize=16)\n plt.xlabel('Year \\n', fontsize=16)\n plt.ylabel('Records', fontsize=16)\n plt.ylim([0, 150])\n plt.xlim(1990,2016)\n\n # Line styles: http://matplotlib.org/1.3.1/examples/pylab_examples/line_styles.html\n plt.plot(dfsorted, linestyle='--', marker='D', label=\"Web of Science\")\n plt.plot(dfSsorted, linestyle='-', marker='o', label=\"Scopus\")\n # legend guide: http://matplotlib.org/1.3.1/users/legend_guide.html\n plt.legend(loc=2, borderaxespad=0., fontsize=16)\n plt.savefig(\".data/fig1.png\")\n\n\nmaketimeseries()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
Chipe1/aima-python
|
notebooks/chapter24/Image Edge Detection.ipynb
|
mit
|
[
"Edge Detection\nEdge detection is one of the earliest and popular image processing tasks. Edges are straight lines or curves in the image plane across which there is a “significant” change in image brightness. The goal of edge detection is to abstract away from the messy, multi-megabyte image and towards a more compact, abstract representation.\nThere are multiple ways to detect an edge in an image but the most may be grouped into two categories, gradient, and Laplacian. Here we will introduce some algorithms among them and their intuitions. First, let's import the necessary packages.",
"import os, sys\nsys.path = [os.path.abspath(\"../../\")] + sys.path\nfrom perception4e import *\nfrom notebook4e import *",
"Gradient Edge Detection\nBecause edges correspond to locations in images where the brightness undergoes a sharp change, a naive idea would be to differentiate the image and look for places where the magnitude of the derivative is large. For many simple cases with regular geometry topologies, this simple method could work. \nHere we introduce a 2D function $f(x,y)$ to represent the pixel values on a 2D image plane. Thus this method follows the math intuition below:\n$$\\frac{\\partial f(x,y)}{\\partial x} = \\lim_{\\epsilon \\rightarrow 0} \\frac{f(x+\\epsilon,y)-\\partial f(x,y)}{\\epsilon}$$\nAbove is exactly the definition of the edges in an image. In real cases, $\\epsilon$ cannot be 0. We can only investigate the pixels in the neighborhood of the current one to get the derivation of a pixel. Thus the previous formula becomes\n$$\\frac{\\partial f(x,y)}{\\partial x} = \\lim_{\\epsilon \\rightarrow 0} \\frac{f(x+1,y)-\\partial f(x,y)}{1}$$\nTo implement the above formula, we can simply apply a filter $[1,-1]$ to extract the differentiated image. For the case of derivation in the y-direction, we can transpose the above filter and apply it to the original image. The relation of partial deviation of the direction of edges are summarized in the following picture:\n<img src=\"images/gradients.png\" width=\"700\"/>\nImplementation\nWe implemented an edge detector using a gradient method as gradient_edge_detector in perceptron.py. There are two filters defined as $[[1, -1]], [[1], [-1]]$ to extract edges in x and y directions respectively. The filters are applied to an image using convolve2d method in scipy.single package. The image passed into the function needs to be in the form of numpy.ndarray or an iterable object that can be transformed into a ndarray.\nTo view the detailed implementation, please execute the following block",
"psource(gradient_edge_detector)",
"Example\nNow let's try the detector for real case pictures. First, we will show the original picture before edge detection:\n<img src=\"images/stapler.png\" width=\"500\"/>\nWe will use matplotlib to read the image as a numpy ndarray:",
"import matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.image as mpimg\n\nim =mpimg.imread('images/stapler.png')\nprint(\"image height:\", len(im))\nprint(\"image width:\", len(im[0]))",
"The code shows we get an image with a size of $787*590$. gaussian_derivative_edge_detector can extract images in both x and y direction and then put them together in a ndarray:",
"edges = gradient_edge_detector(im)\nprint(\"image height:\", len(edges))\nprint(\"image width:\", len(edges[0]))",
"The edges are in the same shape of the original image. Now we will try print out the image, we implemented a show_edges function to do this:",
"show_edges(edges)",
"We can see that the edges are extracted well. We can use the result of this simple algorithm as a baseline and compare the results of other algorithms to it.\nDerivative of Gaussian\nWhen considering the situation when there is strong noise in an image, the ups and downs of the noise will induce strong peaks in the gradient profile. In order to be more noise-robust, an algorithm introduced a Gaussian filter before applying the gradient filer. In another way, convolving a gradient filter after a Gaussian filter equals to convolving a derivative of Gaussian filter directly to the image.\nHere is how this intuition is represented in math:\n$$(I\\bigotimes g)\\bigotimes h = I\\bigotimes (g\\bigotimes h) $$\nWhere $I$ is the image, $g$ is the gradient filter and $h$ is the Gaussian filter. A two dimensional derivative of Gaussian kernel is dipicted in the following figure:\n<img src=\"images/derivative_of_gaussian.png\" width=\"400\"/>\nImplementation\nIn our implementation, we initialize Gaussian filters by applying the 2D Gaussian function on a given size of the grid which is the same as the kernel size. Then the x and y direction image filters are calculated as the convolution of the Gaussian filter and the gradient filter:",
"x_filter = scipy.signal.convolve2d(gaussian_filter, np.asarray([[1, -1]]), 'same')\ny_filter = scipy.signal.convolve2d(gaussian_filter, np.asarray([[1], [-1]]), 'same')",
"Then both of the filters are applied to the input image to extract the x and y direction edges. For detailed implementation, please view by:",
"psource(gaussian_derivative_edge_detector)",
"Example\nNow let's try again on the stapler image and plot the extracted edges:",
"e = gaussian_derivative_edge_detector(im)\nshow_edges(e)",
"We can see that the extracted edges are more similar to the original one. The resulting edges are depending on the initial Gaussian kernel size and how it is initialized.\nLaplacian Edge Detector\nLaplacian is somewhat different from the methods we have discussed so far. Unlike the above kernels which are only using the first-order derivatives of the original image, the Laplacian edge detector uses the second-order derivatives of the image. Using the second derivatives also makes the detector very sensitive to noise. Thus the image is often Gaussian smoothed before applying the Laplacian filter.\nHere are how the Laplacian detector looks like:\n<img src=\"images/laplacian.png\" width=\"200\"/>\nImplementation\nThere are two commonly used small Laplacian kernels:\n<img src=\"images/laplacian_kernels.png\" width=\"300\"/>\nIn our implementation, we used the first one as the default kernel and convolve it with the original image using packages provided by scipy.\nExample\nNow let's use the Laplacian edge detector to extract edges of the staple example:",
"e = laplacian_edge_detector(im)\nshow_edges(e)",
"The edges are more subtle but meanwhile showing small zigzag structures that may be affected by noise. However, the overall performance of edge extracting is still promising."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
tensorflow/docs-l10n
|
site/ja/probability/examples/Gaussian_Copula.ipynb
|
apache-2.0
|
[
"Copyright 2018 The TensorFlow Probability Authors.\nLicensed under the Apache License, Version 2.0 (the \"License\");",
"#@title Licensed under the Apache License, Version 2.0 (the \"License\"); { display-mode: \"form\" }\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"コピュラ入門\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://www.tensorflow.org/probability/examples/Gaussian_Copula\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\">TensorFlow.org で表示</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/probability/examples/Gaussian_Copula.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\">Google Colab で実行</a></td>\n <td><a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ja/probability/examples/Gaussian_Copula.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\">GitHub でソースを表示</a></td>\n <td><a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/probability/examples/Gaussian_Copula.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\">ノートブックをダウンロード</a></td>\n</table>",
"import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow.compat.v2 as tf\ntf.enable_v2_behavior()\n\nimport tensorflow_probability as tfp\ntfd = tfp.distributions\ntfb = tfp.bijectors",
"[copula](https://en.wikipedia.org/wiki/Copula_(probability_theory%29)とは、ランダム変数間の依存関係を捉えるための古典的な手法です。より正式には、コピュラは、マージナリゼーションによって $U_i \\sim \\text{Uniform}(0, 1)$ が得られる多変量分布 $C(U_1, U_2, ...., U_n)$ です。\nコピュラを使用すると任意の周辺分布を持つ多変量分布を作成できるため、コピュラは非常に興味深い関数と言えます。以下にそのレシピを示します。\n\n確率積分変換を使って、任意の連続分布 R.V. $X$ を一様分布 $F_X(X)$ に変換します。$F_X$ は $X$ の CDF(累積分布関数)です。\nコピュラ(二変量とした場合)$C(U, V)$ がある場合、$U$ と $V$ には一様周辺分布があります。\nここで、関心のある R.V が $X, Y$ である場合に、$C'(X, Y) = C(F_X(X), F_Y(Y))$ という新しい分布を作成します。$X$ と $Y$ の周辺分布は目的どおりの分布です。\n\n周辺分布は単変量であるため、測定やモデル化が行いやすい場合があります。コピュラは周辺分布から開始することを可能にしますが、次元間の任意の相関関係を得ることも可能です。\nガウスコピュラ\nコピュラがどのように成立しているかを説明するために、多変量のガウス相関関係に従って依存関係を捉えるケースを考察しましょう。ガウスコピュラは $C(u_1, u_2, ...u_n) = \\Phi_\\Sigma(\\Phi^{-1}(u_1), \\Phi^{-1}(u_2), ... \\Phi^{-1}(u_n))$ によって得られるもので、$\\Phi_\\Sigma$ は多変量正規分布の CFD を表し、共分散 $\\Sigma$ と平均値 0 を持ち、$\\Phi^{-1}$ は標準正規分布の逆 CDF です。\n正規の逆 CFD を適用すると、一様次元をラップして正規に分布されるようになります。多変量正規分布の CDF を適用すると、分布が押しつぶされてわずかに均一になり、ガウス相関が得られます。\nしたがって、ガウスコピュラは、一様な周辺分布を持つ単位超立方体 $[0, 1]^n$ にわたる分布であることがわかります。\nこのように定義されると、ガウスコピュラは tfd.TransformedDistribution と適切な Bijector で実装することができます。つまり、tfb.NormalCDF バイジェクターによって実装された正規分布の逆 CDF を使用して、多変量正規分布を変換しているということです。\n以下では、共分散がコレスキー因子によってパラメータ化される(したがって、MultivariateNormalTriL の共分散)という 1 つの単純化した仮定で、ガウスコピュラを実装します。(ほかの tf.linalg.LinearOperators を使用して、行列のないさまざまな仮定をエンコードすることができます。)",
"class GaussianCopulaTriL(tfd.TransformedDistribution):\n \"\"\"Takes a location, and lower triangular matrix for the Cholesky factor.\"\"\"\n def __init__(self, loc, scale_tril):\n super(GaussianCopulaTriL, self).__init__(\n distribution=tfd.MultivariateNormalTriL(\n loc=loc,\n scale_tril=scale_tril),\n bijector=tfb.NormalCDF(),\n validate_args=False,\n name=\"GaussianCopulaTriLUniform\")\n\n\n# Plot an example of this.\nunit_interval = np.linspace(0.01, 0.99, num=200, dtype=np.float32)\nx_grid, y_grid = np.meshgrid(unit_interval, unit_interval)\ncoordinates = np.concatenate(\n [x_grid[..., np.newaxis],\n y_grid[..., np.newaxis]], axis=-1)\n\npdf = GaussianCopulaTriL(\n loc=[0., 0.],\n scale_tril=[[1., 0.8], [0., 0.6]],\n).prob(coordinates)\n\n# Plot its density.\n\nplt.contour(x_grid, y_grid, pdf, 100, cmap=plt.cm.jet);",
"しかし、このようなモデルの力は、確率積分変換を使用して任意の R.V. にコピュラを使用するところにあります。こうすることで、任意の周辺分布を指定し、コピュラを使って接合することができます。\n次のモデルから開始します。\n$$\\begin{align} X &\\sim \\text{Kumaraswamy}(a, b) \\ Y &\\sim \\text{Gumbel}(\\mu, \\beta) \\end{align}$$\nそして、コピュラを使って、二変量 R.V. $Z$ を取得します。これには、周辺分布の Kumaraswamy と Gumbel があります。\nまず、これらの 2 つの R.V. で生成された分布をプロットしましょう。これは、コピュラを適用したときの比較の基準としてのみ使用します。",
"a = 2.0\nb = 2.0\ngloc = 0.\ngscale = 1.\n\nx = tfd.Kumaraswamy(a, b)\ny = tfd.Gumbel(loc=gloc, scale=gscale)\n\n# Plot the distributions, assuming independence\nx_axis_interval = np.linspace(0.01, 0.99, num=200, dtype=np.float32)\ny_axis_interval = np.linspace(-2., 3., num=200, dtype=np.float32)\nx_grid, y_grid = np.meshgrid(x_axis_interval, y_axis_interval)\n\npdf = x.prob(x_grid) * y.prob(y_grid)\n\n# Plot its density\n\nplt.contour(x_grid, y_grid, pdf, 100, cmap=plt.cm.jet);",
"異なる周辺分布を使用した同時分布\n次に、ガウスコピュラを使用して分布を接合し、それをプロットします。ここでも、TransformedDistribution を適切な Bijector に適用して、目的の周辺分布を取得します。\n具体的には、さまざまなベクトルで異なるバイジェクターを適用(全単射変換のままです)する Blockwise バイジェクターを使用します。\nこれで、必要としているコピュラを定義できるようになりました。ターゲットとなる周辺分布のリスト(バイジェクターとしてエンコード)があれば、コピュラを使用し、指定された周辺分布を持つ新しい分布を簡単に構築できます。",
"class WarpedGaussianCopula(tfd.TransformedDistribution):\n \"\"\"Application of a Gaussian Copula on a list of target marginals.\n\n This implements an application of a Gaussian Copula. Given [x_0, ... x_n]\n which are distributed marginally (with CDF) [F_0, ... F_n],\n `GaussianCopula` represents an application of the Copula, such that the\n resulting multivariate distribution has the above specified marginals.\n\n The marginals are specified by `marginal_bijectors`: These are\n bijectors whose `inverse` encodes the CDF and `forward` the inverse CDF.\n\n block_sizes is a 1-D Tensor to determine splits for `marginal_bijectors`\n length should be same as length of `marginal_bijectors`.\n See tfb.Blockwise for details\n \"\"\"\n def __init__(self, loc, scale_tril, marginal_bijectors, block_sizes=None):\n super(WarpedGaussianCopula, self).__init__(\n distribution=GaussianCopulaTriL(loc=loc, scale_tril=scale_tril),\n bijector=tfb.Blockwise(bijectors=marginal_bijectors,\n block_sizes=block_sizes),\n validate_args=False,\n name=\"GaussianCopula\")",
"最後に、このガウスコピュラを実際に使用してみましょう。バリアンス 1 に対応する $\\begin{bmatrix}1 & 0\\rho & \\sqrt{(1-\\rho^2)}\\end{bmatrix}$ のコレスキー、そして多変量正規分布の相関 $\\rho$ を使用します。\nいくつかのケースを見てみましょう。",
"# Create our coordinates:\ncoordinates = np.concatenate(\n [x_grid[..., np.newaxis], y_grid[..., np.newaxis]], -1)\n\n\ndef create_gaussian_copula(correlation):\n # Use Gaussian Copula to add dependence.\n return WarpedGaussianCopula(\n loc=[0., 0.],\n scale_tril=[[1., 0.], [correlation, tf.sqrt(1. - correlation ** 2)]],\n # These encode the marginals we want. In this case we want X_0 has\n # Kumaraswamy marginal, and X_1 has Gumbel marginal.\n\n marginal_bijectors=[\n tfb.Invert(tfb.KumaraswamyCDF(a, b)),\n tfb.Invert(tfb.GumbelCDF(loc=0., scale=1.))])\n\n\n# Note that the zero case will correspond to independent marginals!\ncorrelations = [0., -0.8, 0.8]\ncopulas = []\nprobs = []\nfor correlation in correlations:\n copula = create_gaussian_copula(correlation)\n copulas.append(copula)\n probs.append(copula.prob(coordinates))\n\n\n# Plot it's density\n\nfor correlation, copula_prob in zip(correlations, probs):\n plt.figure()\n plt.contour(x_grid, y_grid, copula_prob, 100, cmap=plt.cm.jet)\n plt.title('Correlation {}'.format(correlation))",
"最後に、実際に求めていた周辺分布を実際に取得することを確認しましょう。",
"def kumaraswamy_pdf(x):\n return tfd.Kumaraswamy(a, b).prob(np.float32(x))\n\ndef gumbel_pdf(x):\n return tfd.Gumbel(gloc, gscale).prob(np.float32(x))\n\n\ncopula_samples = []\nfor copula in copulas:\n copula_samples.append(copula.sample(10000))\n\nplot_rows = len(correlations)\nplot_cols = 2 # for 2 densities [kumarswamy, gumbel]\nfig, axes = plt.subplots(plot_rows, plot_cols, sharex='col', figsize=(18,12))\n\n# Let's marginalize out on each, and plot the samples.\n\nfor i, (correlation, copula_sample) in enumerate(zip(correlations, copula_samples)):\n k = copula_sample[..., 0].numpy()\n g = copula_sample[..., 1].numpy()\n\n\n _, bins, _ = axes[i, 0].hist(k, bins=100, density=True)\n axes[i, 0].plot(bins, kumaraswamy_pdf(bins), 'r--')\n axes[i, 0].set_title('Kumaraswamy from Copula with correlation {}'.format(correlation))\n\n _, bins, _ = axes[i, 1].hist(g, bins=100, density=True)\n axes[i, 1].plot(bins, gumbel_pdf(bins), 'r--')\n axes[i, 1].set_title('Gumbel from Copula with correlation {}'.format(correlation))\n ",
"結論\n以上です!Bijector API を使用してガウスコピュラを構築できることをお見せしました。\n一般的には、Bijector API を使用してバイジェクターを記述し、分布でこれらを作成すると、柔軟にモデル化できる豊富な分布族を作成できます。"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
physion/ovation-python
|
examples/lab/workflow-activity-upload.ipynb
|
gpl-3.0
|
[
"import urllib\nimport ovation.lab.workflows as workflows\n\nfrom ovation.session import connect\n\nfrom tqdm import tqdm_notebook as tqdm\nfrom pprint import pprint\n\ns = connect(input('Email: '), api='https://services-staging.ovation.io', token='/api/v1/sessions')",
"Upload Activity results\nWorkflow activities are (optionally) uniquely labeled by the Workflow Definition. This label allows clients to upload metadata and files to activities by label. In this example, we create a new XX activity within an existing workflow.",
"# TODO source, destination\nmetadata = {'custom_attributes': {'my-attribute': 1}}\nresources = {'resource-label': '/Users/barry/Desktop/Ovation Demo Files/results.txt'}\n\nworkflow_id = input('Workflow ID: ')\n\nactivity_label = input('Activity label: ')",
"For illustration, we view the Workflow, showing links.activity_label.related, the URL where create_activity will POST the new Activity data",
"workflow = s.get(s.path('workflows', workflow_id))\npprint(workflow.workflow)",
"Now, let's create the activity.",
"activity = workflows.create_activity(s, workflow_id, activity_label, data=metadata, resources=resources, progress=tqdm)"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
eramirem/numerical-methods-pdes
|
07_ivp.ipynb
|
cc0-1.0
|
[
"<table>\n <tr align=left><td><img align=left src=\"https://i.creativecommons.org/l/by/4.0/88x31.png\">\n <td>Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license. (c) Kyle T. Mandli</td>\n</table>",
"%matplotlib inline\nimport numpy\nimport matplotlib.pyplot as plt",
"Numerical Methods for Initial Value Problems\nWe now turn towards time dependent PDEs. Before moving to the full PDEs we will explore numerical methods for systems of ODEs that are initial value problems of the general form\n$$\n \\frac{\\text{d} \\vec{u}}{\\text{d}t} = \\vec{f}(t, \\vec{u}) ~~~~ \\vec{u}(0) = \\vec{u}_0\n$$\nwhere\n - $\\vec{u}(t)$ is the state vector\n - $\\vec{f}(t, \\vec{u})$ is a vector-valued function that controls the growth of $\\vec{u}$ with time\n - $\\vec{u}(0)$ is the initial condition at time $t = 0$\nNote that the right hand side function $f$ could in actuality be the discretization in space of a PDE, i.e. a system of equations.\nExamples: Simple radioactive decay\n$\\vec{u} = [c]$\n$$\\frac{\\text{d} c}{\\text{d}t} = -\\lambda c ~~~~ c(0) = c_0$$\nwhich has solutions of the form $c(t) = c_0 e^{-\\lambda t}$",
"t = numpy.linspace(0.0, 1.6e3, 100)\nc_0 = 1.0\ndecay_constant = numpy.log(2.0) / 1600.0\n\nfig = plt.figure()\naxes = fig.add_subplot(1, 1, 1)\naxes.plot(t, 1.0 * numpy.exp(-decay_constant * t))\n\naxes.set_title(\"Radioactive Decay with $t_{1/2} = 1600$ years\")\naxes.set_xlabel('t (years)')\naxes.set_ylabel('$c$')\naxes.set_ylim((0.5,1.0))\nplt.show()",
"Examples: Complex radioactive decay (or chemical system).\nChain of decays from one species to another.\n$$\\begin{aligned}\n \\frac{\\text{d} c_1}{\\text{d}t} &= -\\lambda_1 c_1 \\\n \\frac{\\text{d} c_2}{\\text{d}t} &= \\lambda_1 c_1 - \\lambda_2 c_2 \\\n \\frac{\\text{d} c_2}{\\text{d}t} &= \\lambda_2 c_3 - \\lambda_3 c_3 \n\\end{aligned}$$\n$$\\frac{\\text{d} \\vec{u}}{\\text{d}t} = \\frac{\\text{d}}{\\text{d}t}\\begin{bmatrix} c_1 \\ c_2 \\ c_3 \\end{bmatrix} = \n\\begin{bmatrix} \n -\\lambda_1 & 0 & 0 \\\n \\lambda_1 & -\\lambda_2 & 0 \\\n 0 & \\lambda_2 & -\\lambda_3\n\\end{bmatrix} \\begin{bmatrix} c_1 \\ c_2 \\ c_3 \\end{bmatrix}$$\n$$\\frac{\\text{d} \\vec{u}}{\\text{d}t} = A \\vec{u}$$\nFor systems of equations like this the general solution to the ODE is the matrix exponential:\n$$\\vec{u}(t) = \\vec{u}_0 e^{A t}$$\nExamples: Van der Pol Oscillator\n$$y'' - \\mu (1 - y^2) y' + y = 0~~~~~\\text{with}~~~~ y(0) = y_0, ~~~y'(0) = v_0$$\n$$\\vec{u} = \\begin{bmatrix} y \\ y' \\end{bmatrix} = \\begin{bmatrix} u_1 \\ u_2 \\end{bmatrix}$$\n$$\\frac{\\text{d}}{\\text{d}t} \\begin{bmatrix} u_1 \\ u_2 \\end{bmatrix} = \\begin{bmatrix} u_2 \\ \\mu (1 - u_1^2) u_2 - u_1 \\end{bmatrix} = \\vec{f}(t, \\vec{u})$$",
"import scipy.integrate as integrate\n\ndef f(t, u, mu=5):\n return numpy.array([u[1], mu * (1.0 - u[0]**2) * u[1] - u[0]])\n\nt = numpy.linspace(0.0, 100, 1000)\nu = numpy.empty((2, t.shape[0]))\nu[:, 0] = [0.1, 0.0]\n\nintegrator = integrate.ode(f)\nintegrator.set_integrator(\"dopri5\")\nintegrator.set_initial_value(u[:, 0])\n\nfor (n, t_n) in enumerate(t[1:]):\n integrator.integrate(t_n)\n if not integrator.successful():\n break\n u[:, n + 1] = integrator.y\n \nfig = plt.figure()\naxes = fig.add_subplot(1, 1, 1)\n\naxes.plot(t, u[0,:])\naxes.set_title(\"Solution to Van der Pol Oscillator\")\naxes.set_xlabel(\"t\")\naxes.set_ylabel(\"y(t)\")\n\nfig = plt.figure()\naxes = fig.add_subplot(1, 1, 1)\n\naxes.plot(u[0,:], u[1, :])\naxes.set_title(\"Phase Diagram for Van der Pol Oscillator\")\naxes.set_xlabel(\"y(t)\")\naxes.set_ylabel(\"y'(t)\")\n\nplt.show()",
"Examples: Heat Equation\nLet's try to construct a system of ODEs that represents the heat equation\n$$\n u_t = u_{xx}.\n$$\nIf we discretize the right hand side with second order, centered differences with $m$ points we would have\n$$\n \\frac{\\text{d}}{\\text{d} t} U_i(t) = \\frac{U_{i+1}(t) - 2 U_i(t) + U_{i-1}(t)}{\\Delta x^2}\n$$\nwhere we now have $m$ unknown, time dependent functions to solve for. This approach to discretizing a PDE is sometimes called a method-of-lines approach.\nExistence and Uniqueness of Solutions\nOne important step before diving into the numerical methods for IVP ODE problems is to understand what the behavior of the solutions are, whether they exist, and if they might be unique.\nLinear Systems\nFor linear ODEs we have the generic system \n$$\n u'(t) = A(t) u + g(t)\n$$\nwhere $A$ is time-dependent matrix and $g$ a vector. Note that linear systems always have a unique solution\nIf $g(t) = 0$ for all $t$ we say the ODE is homogeneous and the matrix $A$ is time independent (then implying that it is also autonomous) then the solution to this ODE is \n$$\n u(t) = u(t_0) e^{A(t - t_0)}.\n$$\nIn the case where $g(t) \\neq 0$ for all $t$ the ODE is inhomogeneous we can use Duhamel's which tells us\n$$\n u(t) = u(t_0) e^{A(t-t_0)} + \\int^t_{t_0} e^{A(t - \\tau)} g(\\tau) d\\tau.\n$$\nWe can think of the operator $e^{A(t-\\tau)}$ as the solution operator for the homogeneous ODE which can map the solution at time $\\tau$ to the solution at time $t$ giving this form of the solution a Green's function type property.\nNon-linear Existance and Uniqueness\nLipschitz Continuity\nGeneralizing uniqueness to non-linear ODEs requires a special type of continuity called Lipschitz continuity. Consider the ODE\n$$\n u'(t) = f(u,t), ~~~~ u(t_0) = u_0,\n$$\nwe will require a certain amount of smoothness in the right hand side function $f(u,t)$. \nWe say that $f$ is Lipshitz continuous in $u$ over some domain\n$$\n \\Omega = {(u,t) : |u - u_0| \\leq a, t_0 \\leq t \\leq t_1 }\n$$\nif there exists a constant $L > 0$ such that\n$$\n |f(u,t) - f(u^\\ast, t)| \\leq L |u - u^\\ast| ~~~ \\forall ~~~ (u,t) ~\\text{and}~ (u^\\ast,t) \\in \\Omega.\n$$\nIf $f(u,t)$ is differentiable with respect to $u$ in $\\Omega$, i.e. the Jacobian $f_u = \\partial f / \\partial u$ exists, and is bounded then we can say\n$$\n L = \\max_{(u,t) \\in \\Omega} |f_u(u,t)|.\n$$\nWe can use this bound since\n$$\n f(u,t) = f(u^\\ast, t) + f_u(v,t)(u-u^\\ast)\n$$\nfor some $v$ chosen to be in-between $u$ and $u^\\ast$ which is effectively the Taylor series error bound and implies smoothness of $f$.\nWith Lipshitz continuity of $f$ we can guarantee a unique solution the IVP at least to time $T = \\min(t_1, t_0 + a/S)$ where \n$$\n S = \\max_{(u,t)\\in\\Omega} |f(u,t)|.\n$$\nThis value $S$ is the modulus of the maximum slope that the solution $u(t)$ can obtain in $\\Omega$ and guarantees that we remain in $\\Omega$.\nExample\nConsider $u'(t) = (u(t))^2, ~~~ u(0) = u_0 > 0$. If we define our domain of interest as above we can compute the Lipshitz constant as\n$$\n L = \\max_{(u,t) \\in \\Omega} | 2 u | = 2 (u_0 + a)\n$$\nwhere we have used the restriction from $\\Omega$ that $|u - u_0| \\leq a$. \nSimilarly we can compute $S$ to find\n$$\n S = \\max_{(u,t)\\in\\Omega} |f(u,t)| = (u_0 + a)^2\n$$\nso that we can guarantee a unique solution up until $T = a / (u_0 + a)^2$. Given that we can choose $a$ we can simply choose a value that maximized $T$, in this case $a = u_0$ does this and we conclude that we have a unique solution up until $T = 1 / 4 u_0$.\nSince we also know the exact solution to the ODE above,\n$$\n u(t) = \\frac{1}{1/u_0 - t},\n$$\nwe can see that $|u(t)| < \\infty$ as long as $t \\neq 1/u_0$. Note that once we reach the pole in the denominator there is no longer a solution possible for the IVP past this point.\nExample\nConsider the IVP\n$$\n u' = \\sqrt{u} ~~~~ u(0) = 0.\n$$\nWhere is this $f$ Lipshitz continuous?\nComputing the derivative we find\n$$\n f_u = \\frac{1}{2\\sqrt{u}}\n$$\nwhich goes to infinity as $u \\rightarrow 0$. We can therefore not guarantee a unique solution near the given initial condition. In fact we know this as the ODE has two solutions\n$$\n u(t) = 0 ~~~ \\text{and} ~~~ u(t) = \\frac{1}{4} t^2.\n$$\nSystems of Equations\nA similar notion for Lipschitz continuity exists in a particular norm $||\\cdot||$ if there is a constant $L$ such that\n$$\n ||f(u,t) - f(u^\\ast,t)|| \\leq L ||u - u^\\ast||\n$$\nfor all $(u,t)$ and $(u^\\ast,t)$ in the domain $\\Omega = {(u,t) : ||u-u_0|| \\leq a, t_0 \\leq t \\leq t_1 }$. Note that if the function $f$ is Lipschitz continuous in one norm it continuous in any norm.\nBasic Stepping Schemes\nLooking back at our work on numerical differentiation why not approximate the derivative as a finite difference:\n$$\n \\frac{u(t + \\Delta t) - u(t)}{\\Delta t} = f(t, u)\n$$\nWe still need to decide how to evaluate the $f(t, u)$ term however. \nLets look at this from a perspective of quadrature, take the intergral of both sides:\n$$\\begin{aligned}\n \\int^{t + \\Delta t}_t \\frac{\\text{d} u}{\\text{d}\\tilde{t}} d\\tilde{t} &= \\int^{t + \\Delta t}_t f(t, u) d\\tilde{t} \\ ~ \\\nu(t + \\Delta t) - u(t) &= \\Delta t ~f(t, u(t)) \\ ~ \\\n\\frac{u(t + \\Delta t) - u(t)}{\\Delta t} &= f(t, u(t))\n\\end{aligned}$$\nwhere we have used a left-sided quadrature rule for the integral on the right. \nIntroducing some notation to simpify things\n$$\n t_0 = 0 ~~~~~~~ t_1 = t_0 + \\Delta t ~~~~~~~ t_n = t_{n-1} + \\Delta t = n \\Delta t + t_0\n$$\n$$\n U^0 = u(t_0) ~~~~~~~ U^1 = u(t_1) ~~~~~~~ U^n = u(t_n)\n$$\nwe can rewrite our scheme as\n$$\n \\frac{U^{n+1} - U^n}{\\Delta t} = f(t_n, U^n)\n$$\nor\n$$\n U^{n+1} = U^n + \\Delta t f(t_n, U^n)\n$$\nwhich is known as the forward Euler method. In essence we are approximating the derivative with the value of the function at the point we are at $t_n$.",
"t = numpy.linspace(0.0, 1.6e3, 100)\nc_0 = 1.0\ndecay_constant = numpy.log(2.0) / 1600.0\n\nfig = plt.figure()\naxes = fig.add_subplot(1, 1, 1)\naxes.plot(t, c_0 * numpy.exp(-decay_constant * t), label=\"True Solution\")\n\n# Plot Euler step\ndt = 1e3\nu_np = c_0 + dt * (-decay_constant * c_0)\naxes.plot((0.0, dt), (c_0, u_np), 'k')\naxes.plot((dt, dt), (u_np, c_0 * numpy.exp(-decay_constant * dt)), 'k--')\naxes.plot((0.0, 0.0), (c_0, u_np), 'k--')\naxes.plot((0.0, dt), (u_np, u_np), 'k--')\naxes.text(400, u_np - 0.05, '$\\Delta t$', fontsize=16)\n\naxes.set_title(\"Radioactive Decay with $t_{1/2} = 1600$ years\")\naxes.set_xlabel('t (years)')\naxes.set_ylabel('$c$')\naxes.set_xlim(-1e2, 1.6e3)\naxes.set_ylim((0.5,1.0))\nplt.show()\n\nc_0 = 1.0\ndecay_constant = numpy.log(2.0) / 1600.0\nf = lambda t, u: -decay_constant * u\n\nt_exact = numpy.linspace(0.0, 1.6e3, 100)\nu_exact = c_0 * numpy.exp(-decay_constant * t_exact)\n\n# Implement Euler\nt_euler = numpy.linspace(0.0, 1.6e3, 10)\ndelta_t = t_euler[1] - t_euler[0]\nu_euler = numpy.empty(t_euler.shape)\nu_euler[0] = c_0\nfor (n, t_n) in enumerate(t_euler[:-1]):\n u_euler[n + 1] = u_euler[n] + delta_t * f(t_n, u_euler[n])\n\nfig = plt.figure()\naxes = fig.add_subplot(1, 1, 1)\naxes.plot(t_euler, u_euler, 'or', label=\"Euler\")\naxes.plot(t_exact, u_exact, 'k--', label=\"True Solution\")\n\naxes.set_title(\"Forward Euler\")\naxes.set_xlabel(\"t (years)\")\naxes.set_xlabel(\"$c(t)$\")\naxes.set_ylim((0.4,1.1))\naxes.legend()\nplt.show()",
"A similar method can be derived if we consider instead using the second order accurate central difference:\n$$\\frac{U^{n+1} - U^{n-1}}{2\\Delta t} = f(t_{n}, U^{n})$$\nthis method is known as the midpoint or leap-frog method. Note that the way we have written this method requires a previous function evaluation and technically is a \"multi-step\" method although we do not actually use the current evaluation.",
"t = numpy.linspace(0.0, 1.6e3, 100)\nc_0 = 1.0\ndecay_constant = numpy.log(2.0) / 1600.0\n\nfig = plt.figure()\naxes = fig.add_subplot(1, 1, 1)\naxes.plot(t, c_0 * numpy.exp(-decay_constant * t), label=\"True Solution\")\n\n# Plot Euler step\ndt = 1e3\nu_np = c_0 + dt * (-decay_constant * c_0 * numpy.exp(-decay_constant * dt / 2.0))\naxes.plot((0.0, dt), (c_0, u_np), 'k')\naxes.plot((dt, dt), (u_np, c_0 * numpy.exp(-decay_constant * dt)), 'k--')\naxes.plot((0.0, 0.0), (c_0, u_np), 'k--')\naxes.plot((0.0, dt), (u_np, u_np), 'k--')\naxes.text(400, u_np - 0.05, '$\\Delta t$', fontsize=16)\n\naxes.set_title(\"Radioactive Decay with $t_{1/2} = 1600$ years\")\naxes.set_xlabel('t (years)')\naxes.set_ylabel('$c$')\naxes.set_xlim(-1e2, 1.6e3)\naxes.set_ylim((0.5,1.0))\nplt.show()\n\nc_0 = 1.0\ndecay_constant = numpy.log(2.0) / 1600.0\nf = lambda t, u: -decay_constant * u\n\nt_exact = numpy.linspace(0.0, 1.6e3, 100)\nu_exact = c_0 * numpy.exp(-decay_constant * t_exact)\n\n# Implement leap-frog\nt_leapfrog = numpy.linspace(0.0, 1.6e3, 10)\ndelta_t = t_leapfrog[1] - t_leapfrog[0]\nu_leapfrog = numpy.empty(t_leapfrog.shape)\nu_leapfrog[0] = c_0\n# First evaluation use Euler to get us going\nu_leapfrog[1] = u_leapfrog[0] + delta_t * f(t_leapfrog[0], u_leapfrog[0])\nfor n in xrange(1, t_leapfrog.shape[0] - 1):\n u_leapfrog[n + 1] = u_leapfrog[n - 1] + 2.0 * delta_t * f(t[n], u_leapfrog[n])\n\nfig = plt.figure()\naxes = fig.add_subplot(1, 1, 1)\naxes.plot(t_leapfrog, u_leapfrog, 'or', label=\"Leap-Frog\")\naxes.plot(t_exact, u_exact, 'k--', label=\"True Solution\")\n\naxes.set_title(\"Leap-Frog\")\naxes.set_xlabel(\"t (years)\")\naxes.set_xlabel(\"$c(t)$\")\naxes.set_ylim((0.4,1.1))\naxes.legend()\nplt.show()",
"Similar to forward Euler is the backward Euler method which, as you may have guessed, evaluates the function $f$ at the updated time so that\n$$\n U^{n+1} = U^n + \\Delta t f(t_{n+1}, U^{n+1}).\n$$\nSchemes where the function $f$ is evaluated at the unknown time are called implicit methods.",
"t = numpy.linspace(0.0, 1.6e3, 100)\nc_0 = 1.0\ndecay_constant = numpy.log(2.0) / 1600.0\n\nfig = plt.figure()\naxes = fig.add_subplot(1, 1, 1)\naxes.plot(t, c_0 * numpy.exp(-decay_constant * t), label=\"True Solution\")\n\n# Plot Euler step\ndt = 1e3\nu_np = c_0 + dt * (-decay_constant * c_0 * numpy.exp(-decay_constant * dt))\naxes.plot((0.0, dt), (c_0, u_np), 'k')\naxes.plot((dt, dt), (u_np, c_0 * numpy.exp(-decay_constant * dt)), 'k--')\naxes.plot((0.0, 0.0), (c_0, c_0 * numpy.exp(-decay_constant * dt)), 'k--')\naxes.plot((0.0, dt), (c_0 * numpy.exp(-decay_constant * dt), c_0 * numpy.exp(-decay_constant * dt)), 'k--')\naxes.text(400, u_np - 0.05, '$\\Delta t$', fontsize=16)\n\naxes.set_title(\"Radioactive Decay with $t_{1/2} = 1600$ years\")\naxes.set_xlabel('t (years)')\naxes.set_ylabel('$c$')\naxes.set_xlim(-1e2, 1.6e3)\naxes.set_ylim((0.5,1.0))\nplt.show()\n\nc_0 = 1.0\ndecay_constant = numpy.log(2.0) / 1600.0\nf = lambda t, u: -decay_constant * u\n\nt_exact = numpy.linspace(0.0, 1.6e3, 100)\nu_exact = c_0 * numpy.exp(-decay_constant * t_exact)\n\n# Implement backwards Euler\nt_backwards = numpy.linspace(0.0, 1.6e3, 10)\ndelta_t = t_backwards[1] - t_backwards[0]\nu_backwards = numpy.empty(t_backwards.shape)\nu_backwards[0] = c_0\nfor n in xrange(0, t_backwards.shape[0] - 1):\n u_backwards[n + 1] = u_backwards[n] / (1.0 + decay_constant * delta_t)\n\nfig = plt.figure()\naxes = fig.add_subplot(1, 1, 1)\naxes.plot(t_backwards, u_backwards, 'or', label=\"Backwards Euler\")\naxes.plot(t_exact, u_exact, 'k--', label=\"True Solution\")\n\naxes.set_title(\"Backwards Euler\")\naxes.set_xlabel(\"t (years)\")\naxes.set_xlabel(\"$c(t)$\")\naxes.set_ylim((0.4,1.1))\naxes.legend()\nplt.show()",
"Another simple implicit method is based on integration using the trapezoidal method. The scheme is\n$$\n \\frac{U^{n+1} - U^{n}}{\\Delta t} = \\frac{1}{2} (f(U^n) + f(U^{n+1}))\n$$",
"c_0 = 1.0\ndecay_constant = numpy.log(2.0) / 1600.0\nt_exact = numpy.linspace(0.0, 1.6e3, 100)\nu_exact = c_0 * numpy.exp(-decay_constant * t_exact)\n\n# Implement trapezoidal method\nt = numpy.linspace(0.0, 1.6e3, 10)\ndelta_t = t[1] - t[0]\nu = numpy.empty(t.shape)\nu[0] = c_0\nintegration_constant = (1.0 - decay_constant * delta_t / 2.0) / (1.0 + decay_constant * delta_t / 2.0)\nfor n in xrange(t.shape[0] - 1):\n u[n + 1] = u[n] * integration_constant\n\nfig = plt.figure()\naxes = fig.add_subplot(1, 1, 1)\naxes.plot(t, u, 'or', label=\"Trapezoidal\")\naxes.plot(t_exact, u_exact, 'k--', label=\"True Solution\")\n\naxes.set_title(\"Trapezoidal\")\naxes.set_xlabel(\"t (years)\")\naxes.set_xlabel(\"$c(t)$\")\naxes.set_ylim((0.4,1.1))\naxes.legend()\nplt.show()",
"Error Analysis\nTruncation Errors\nWe can define truncation errors the same as we did before where we insert the true solution of the ODE into the difference equation and use Taylor series expansions.\n$$\n \\tau^n = \\frac{1}{\\Delta t} [U^{n+1} - u(t + \\Delta t)]\n$$\nSimilarly if we know\n$$\n \\lim_{\\Delta t \\rightarrow 0} \\tau^n = 0\n$$\nthen the discretized equation is considered consistent.\nOrder of accuracy is also defined the same way as before. If\n$$\n || \\tau || \\leq C \\Delta t^p\n$$\nuniformly on $t \\in [0, T]$ then the discretization is $p$th order accurate. Note that a method is consistent if $p > 0$.\nError Analysis of Forward Euler\nWe can analyze the error and convergence order of forward Euler by considering the Taylor series centered at $t_n$:\n$$\n u(t) = u(t_n) + (t - t_n) u'(t_n) + \\frac{u''(t_n)}{2} (t - t_n)^2 + \\mathcal{O}((t-t_n)^3)\n$$\nCompute the LTE for forward Euler's method.\nEvaluating this series at $t_{n+1}$ gives\n$$\\begin{aligned}\nu(t_{n+1}) &= u(t_n) + (t_{n+1} - t_n) u'(t_n) + \\frac{u''(t_n)}{2} (t_{n+1} - t_n)^2 + \\mathcal{O}((t_{n+1}-t_n)^3)\\\n&=u(t_n) + \\Delta t f(t_n, u(t_n)) + \\frac{u''(t_n)}{2} \\Delta t^2 + \\mathcal{O}(\\Delta t^3)\n\\end{aligned}$$\nFrom the definition of truncation error we can use our Taylor series expression and find the truncation error to be\n$$\\begin{aligned}\n \\tau^n &= \\frac{u(t_{n+1}) - u(t_n)}{\\Delta t} - f(t_n, u(t_n)) \\\n &= \\frac{1}{\\Delta t} \\left [u(t_n) + \\Delta t ~ f(t_n, u(t_n)) + \\frac{u''(t_n)}{2} \\Delta t^2 + \\mathcal{O}(\\Delta t^3) - u(t_n) - \\Delta t ~ f(t_n, u(t_n)) \\right ]\\\n &= \\frac{1}{\\Delta t} \\left [ \\frac{u''(t_n)}{2} \\Delta t^2 + \\mathcal{O}(\\Delta t^3) \\right ] \\\n &= \\frac{u''(t_n)}{2} \\Delta t + \\mathcal{O}(\\Delta t^2)\n\\end{aligned}$$\nThis implies that forwar Euler is first order accurate and therefore consistent.\nError Analysis of Leap-Frog Method\nTo easily analyze this method we will expand the Taylor series from before to another order and evaluate at both the needed positions:\n$$\n u(t) = u(t_n) + (t - t_n) u'(t_n) + (t - t_n)^2 \\frac{u''(t_n)}{2} + (t - t_n)^3 \\frac{u'''(t_n)}{6} + \\mathcal{O}((t-t_n)^4)\n$$\nleading to \n$$\\begin{aligned}\nu(t_{n+1}) &= u(t_n) + \\Delta t f_n + \\Delta t^2 \\frac{u''(t_n)}{2} + \\Delta t^3 \\frac{u'''(t_n)}{6} + \\mathcal{O}(\\Delta t^4)\\\nu(t_{n-1}) &= u(t_n) - \\Delta t f_n + \\Delta t^2 \\frac{u''(t_n)}{2} - \\Delta t^3 \\frac{u'''(t_n)}{6} + \\mathcal{O}(\\Delta t^4)\n\\end{aligned}$$\nSee if you can compute the LTE in this case.\nPlugging this into our definition of the truncation error along with the leap-frog method definition leads to\n$$\\begin{aligned}\n \\tau^n &= \\frac{u(t_{n+1}) - u(t_{n-1})}{2 \\Delta t} - f(t_n, u(t_n)) \\\n &=\\frac{1}{\\Delta t} \\left[\\frac{1}{2}\\left( u(t_n) + \\Delta t f_n + \\Delta t^2 \\frac{u''(t_n)}{2} + \\Delta t^3 \\frac{u'''(t_n)}{6} + \\mathcal{O}(\\Delta t^4)\\right) \\right . \\\n &~~~~~~~~~~\\left . - \\frac{1}{2} \\left ( u(t_n) - \\Delta t f_n + \\Delta t^2 \\frac{u''(t_n)}{2} - \\Delta t^3 \\frac{u'''(t_n)}{6} + \\mathcal{O}(\\Delta t^4)\\right ) - \\Delta t~ f(t_n, u(t_n)) \\right ] \\\n &= \\frac{1}{\\Delta t} \\left [\\Delta t^3 \\frac{u'''(t_n)}{6} + \\mathcal{O}(\\Delta t^5)\\right ] \\\n &= \\Delta t^2 \\frac{u'''(t_n)}{6} + \\mathcal{O}(\\Delta t^4)\n\\end{aligned}$$\nTherefore the method is second order accurate and is consistent.",
"# Compare accuracy between Euler and Leap-Frog\nf = lambda t, u: -u\nu_exact = lambda t: numpy.exp(-t)\nu_0 = 1.0\n\nt_f = 10.0\nnum_steps = [2**n for n in xrange(4,10)]\ndelta_t = numpy.empty(len(num_steps))\nerror_euler = numpy.empty(len(num_steps))\nerror_trap = numpy.empty(len(num_steps))\nerror_leapfrog = numpy.empty(len(num_steps))\n\nfor (i, N) in enumerate(num_steps):\n t = numpy.linspace(0, t_f, N)\n delta_t[i] = t[1] - t[0]\n \n # Compute Euler solution\n u_euler = numpy.empty(t.shape)\n u_euler[0] = u_0\n for n in xrange(t.shape[0] - 1):\n u_euler[n+1] = u_euler[n] + delta_t[i] * f(t[n], u_euler[n])\n \n # Compute trapezoidal\n u_trap = numpy.empty(t.shape)\n u_trap[0] = u_0\n integration_constant = (1.0 - delta_t[i] / 2.0) / (1.0 + delta_t[i] / 2.0)\n for n in xrange(t.shape[0] - 1):\n u_trap[n + 1] = u_trap[n] * integration_constant\n \n # Compute Leap-Frog\n u_leapfrog = numpy.empty(t.shape)\n u_leapfrog[0] = 1.0\n u_leapfrog[1] = u_euler[1]\n for n in xrange(1, t.shape[0] - 1):\n u_leapfrog[n+1] = u_leapfrog[n-1] + 2.0 * delta_t[i] * f(t[n], u_leapfrog[n])\n \n # Compute error for each\n error_euler[i] = numpy.linalg.norm(delta_t[i] * (u_euler - u_exact(t)), ord=1)\n error_trap[i] = numpy.linalg.norm(delta_t[i] * (u_trap - u_exact(t)), ord=1)\n error_leapfrog[i] = numpy.linalg.norm(delta_t[i] * (u_leapfrog - u_exact(t)), ord=1)\n \n# Plot error vs. delta_t\nfig = plt.figure()\naxes = fig.add_subplot(1, 1, 1)\n\norder_C = lambda delta_x, error, order: numpy.exp(numpy.log(error) - order * numpy.log(delta_x))\naxes.loglog(delta_t, error_euler, 'bo', label='Forward Euler')\naxes.loglog(delta_t, error_trap, 'go', label='Trapezoidal')\naxes.loglog(delta_t, error_leapfrog, 'ro', label=\"Leap-Frog\")\n\naxes.loglog(delta_t, order_C(delta_t[2], error_euler[2], 1.0) * delta_t**1.0, '--b')\naxes.loglog(delta_t, order_C(delta_t[2], error_trap[2], 2.0) * delta_t**2.0, '--r')\naxes.loglog(delta_t, order_C(delta_t[2], error_leapfrog[2], 2.0) * delta_t**2.0, '--r')\n\naxes.legend(loc=2)\naxes.set_title(\"Comparison of Errors\")\naxes.set_xlabel(\"$\\Delta t$\")\naxes.set_ylabel(\"$|U(t_f) - u(t_f)|$\")\n\nplt.show()",
"One-Step Errors\nThere is another definition of local truncation error sometimes used in ODE numerical methods called the one-step error which is slightly different than our local truncation error definition. Our definition uses the direct discretization of the derivatives to find the LTE where as this alternative bases the error on a form that looks like it is updating the previous value. As an example consider the midpoint method, the LTE we found before was based on\n$$\n \\frac{U_{n+1} - U_{n-1}}{2 \\Delta t} = f(U_n)\n$$\nleading us to a second order LTE. For the one-step error we consider instead \n$$\n U_{n+1} = U_{n-1} + 2 \\Delta t f(U_n)\n$$\nwhich leads to the one-step error $\\mathcal{O}(\\Delta t^3)$ instead!\n$$\\begin{aligned}\n \\mathcal{L}^n &= u(t_{n+1}) - u(t_{n-1}) - 2 \\Delta t f(u(t_n)) \\\n &= \\frac{1}{3} \\Delta t^3 u'''(t_n) + \\mathcal{O}(\\Delta t^5) \\\n &= 2 ~\\Delta t ~\\tau^n.\n\\end{aligned}$$\nThis one-step error is suggestively named to indicate that perhaps this is the error for one time step where as the global error may be higher. To remain consistent with our previous discussion of convergences we will continue to use our previous definition of the LTE. We will show that with the appropriate definition of stability and a $p$ order LTE we can expect a $p$th order global error. In general for a $p+1$th order one-step error the global error will be $p$th order.\nTaylor Series Methods\nA Taylor series method can be derived by direct substitution of the right-hand-side function $f(t, u)$ and it's appropriate derivatives into the Taylor series expansion for $u(t_{n+1})$. For a $p$th order method we would look at the Taylor series up to that order and replace all the derivatives of $u$ with derivatives of $f$ instead. \nFor the general case we have\n\\begin{align}\n u(t_{n+1}) = u(t_n) + \\Delta t u'(t_n) + \\frac{\\Delta t^2}{2} u''(t_n) + \\frac{\\Delta t^3}{6} u'''(t_n) + \\cdots + \\frac{\\Delta t^p}{p!} u^{(p)}(t_n)\n\\end{align}\nwhich contains derivatives of $u$ up to $p$th order. We then replace these derivatives with the appropriate derivative of $f$ which will always be one less than the derivative of $u$ (due to the original ODE)\n[\n u^{(p)}(t_n) = f^{(p-1)}(t_n, u(t_n))\n]\nleading to the method\n[\n u(t_{n+1}) = u(t_n) + \\Delta t f(t_n, u(t_n)) + \\frac{\\Delta t^2}{2} f'(t_n, u(t_n)) + \\frac{\\Delta t^3}{6} f''(t_n, u(t_n)) + \\cdots + \\frac{\\Delta t^p}{p!} f^{(p-1)}(t_n, u(t_n)).\n]\nThe drawback to these methods is that we have to derive a new one each time we have a new $f$ and we also need $p-1$ derivatives of $f$.\n2nd Order Taylor Series Method\nWe want terms up to second order so we need to take the derivative of $u' = f(t, u)$ once to find $u'' = f'(t, u)$. See if you can derive the method.\n\\begin{align}\n u(t_{n+1}) &= u(t_n) + \\Delta t u'(t_n) + \\frac{\\Delta t^2}{2} u''(t_n) \\\n &=u(t_n) + \\Delta t f(t_n, u(t_n)) + \\frac{\\Delta t^2}{2} f'(t_n, u(t_n)) ~~~ \\text{or} \\\n U^{n+1} &= U^n + \\Delta t f(t_n, U^n) + \\frac{\\Delta t^2}{2} f'(t_n, U^n).\n\\end{align}\nRunge-Kutta Methods\nOne way to derive higher-order ODE solvers is by computing intermediate stages. These are not multi-step methods as they still only require information from the current time step but they raise the order of accuracy by adding stages. These types of methods are called Runge-Kutta methods.\nExample: Two-stage Runge-Kutta Methods\nThe basic idea behind the simplest of the Runge-Kutta methods is to approximate the solution at $t_n + \\Delta t / 2$ via Euler's method and use this in the function evaluation for the final update.\n$$\\begin{aligned}\n U^ &= U^n + \\frac{1}{2} \\Delta t f(U^n) \\\n U^{n+1} &= U^n + \\Delta t f(U^) = U^n + \\Delta t f(U^n + \\frac{1}{2} \\Delta t f(U^n))\n\\end{aligned}$$\nThe truncation error can be computed similarly to how we did so before but we do need to figure out how to compute the derivative inside of the function. Note that due to $f(u(t_n)) = u'(t_n)$ that differentiating this leads to $f'(u(t_n)) u'(t_n) = u''(t_n)$ leading to\n$$\\begin{aligned}\n f\\left(u(t_n) + \\frac{1}{2} \\Delta t f(u(t_n)) \\right ) &= f\\left(u(t_n) +\\frac{1}{2} \\Delta t u'(t_n) \\right ) \\\n &= f(u(t_n)) + \\frac{1}{2} \\Delta t u'(t_n) f'(u(t_n)) + \\frac{1}{8} \\Delta t^2 (u'(t_n))^2 f''(u(t_n)) + \\mathcal{O}(\\Delta t^3) \\\n &=u'(t_n) + \\frac{1}{2} \\Delta t u''(t_n) + \\mathcal{O}(\\Delta t^2)\n\\end{aligned}$$\nGoing back to the truncation error we have\n$$\\begin{aligned}\n \\tau^n &= \\frac{1}{\\Delta t} \\left[u(t_n) + \\Delta t f\\left(u(t_n) + \\frac{1}{2} \\Delta t f(u(t_n))\\right) - \\left(u(t_n) + \\Delta t f(t_n, u(t_n)) + \\frac{u''(t_n)}{2} \\Delta t^2 + \\mathcal{O}(\\Delta t^3) \\right ) \\right] \\\n &=\\frac{1}{\\Delta t} \\left[\\Delta t u'(t_n) + \\frac{1}{2} \\Delta t^2 u''(t_n) + \\mathcal{O}(\\Delta t^3) - \\Delta t u'(t_n) - \\frac{u''(t_n)}{2} \\Delta t^2 + \\mathcal{O}(\\Delta t^3) \\right] \\\n &= \\mathcal{O}(\\Delta t^2)\n\\end{aligned}$$\nso this method is second order accurate.\nExample: 4-stage Runge-Kutta Method\n$\\begin{aligned}\n Y_1 &= U^n \\\n Y_2 &= U^n + \\frac{1}{2} \\Delta t f(Y_1, t_n) \\\n Y_3 &= U^n + \\frac{1}{2} \\Delta t f(Y_2, t_n + \\Delta t / 2) \\\n Y_4 &= U^n + \\Delta t f(Y_3, t_n + \\Delta t / 2) \\\n U^{n+1} &= U^n + \\frac{\\Delta t}{6} \\left [f(Y_1, t_n) + 2 f(Y_2, t_n + \\Delta t / 2) + 2 f(Y_3, t_n + \\Delta t/2) + f(Y_4, t_n + \\Delta t) \\right ]\n\\end{aligned}$",
"# Implement and compare the two-stage and 4-stage Runge-Kutta methods\nf = lambda t, u: -u\n\nt_exact = numpy.linspace(0.0, 10.0, 100)\nu_exact = numpy.exp(-t_exact)\n\nN = 50\nt = numpy.linspace(0, 10.0, N)\ndelta_t = t[1] - t[0]\nU_2 = numpy.empty(t.shape)\nU_4 = numpy.empty(t.shape)\nU_2[0] = 1.0\nU_4[0] = 1.0\n\nfor (n, t_n) in enumerate(t[1:]):\n U_2[n+1] = U_2[n] + 0.5 * delta_t * f(t_n, U_2[n])\n U_2[n+1] = U_2[n] + delta_t * f(t_n + 0.5 * delta_t, U_2[n+1])\n y_1 = U_4[n]\n y_2 = U_4[n] + 0.5 * delta_t * f(t_n, y_1)\n y_3 = U_4[n] + 0.5 * delta_t * f(t_n + 0.5 * delta_t, y_2)\n y_4 = U_4[n] + delta_t * f(t_n + 0.5 * delta_t, y_3)\n U_4[n+1] = U_4[n] + delta_t / 6.0 * (f(t_n, y_1) + 2.0 * f(t_n + 0.5 * delta_t, y_2) + 2.0 * f(t_n + 0.5 * delta_t, y_3) + f(t_n + delta_t, y_4))\n \nfig = plt.figure()\naxes = fig.add_subplot(1, 1, 1)\n\naxes.plot(t_exact, u_exact, 'k', label=\"True\")\naxes.plot(t, U_2, 'ro', label=\"2-Stage\")\naxes.plot(t, U_4, 'bo', label=\"4-Stage\")\naxes.legend(loc=1)\n\nplt.show()\n\n# Compare accuracy between Euler and RK\nf = lambda t, u: -u\nu_exact = lambda t: numpy.exp(-t)\n\nt_f = 10.0\nnum_steps = [2**n for n in xrange(5,12)]\ndelta_t = numpy.empty(len(num_steps))\nerror_euler = numpy.empty(len(num_steps))\nerror_2 = numpy.empty(len(num_steps))\nerror_4 = numpy.empty(len(num_steps))\n\nfor (i, N) in enumerate(num_steps):\n t = numpy.linspace(0, t_f, N)\n delta_t[i] = t[1] - t[0]\n \n # Compute Euler solution\n U_euler = numpy.empty(t.shape)\n U_euler[0] = 1.0\n for (n, t_n) in enumerate(t[1:]):\n U_euler[n+1] = U_euler[n] + delta_t[i] * f(t_n, U_euler[n])\n \n # Compute 2 and 4-stage\n U_2 = numpy.empty(t.shape)\n U_4 = numpy.empty(t.shape)\n U_2[0] = 1.0\n U_4[0] = 1.0\n for (n, t_n) in enumerate(t[1:]):\n U_2[n+1] = U_2[n] + 0.5 * delta_t[i] * f(t_n, U_2[n])\n U_2[n+1] = U_2[n] + delta_t[i] * f(t_n, U_2[n+1])\n y_1 = U_4[n]\n y_2 = U_4[n] + 0.5 * delta_t[i] * f(t_n, y_1)\n y_3 = U_4[n] + 0.5 * delta_t[i] * f(t_n + 0.5 * delta_t[i], y_2)\n y_4 = U_4[n] + delta_t[i] * f(t_n + 0.5 * delta_t[i], y_3)\n U_4[n+1] = U_4[n] + delta_t[i] / 6.0 * (f(t_n, y_1) + 2.0 * f(t_n + 0.5 * delta_t[i], y_2) + 2.0 * f(t_n + 0.5 * delta_t[i], y_3) + f(t_n + delta_t[i], y_4))\n \n # Compute error for each\n error_euler[i] = numpy.abs(U_euler[-1] - u_exact(t_f)) / numpy.abs(u_exact(t_f))\n error_2[i] = numpy.abs(U_2[-1] - u_exact(t_f)) / numpy.abs(u_exact(t_f))\n error_4[i] = numpy.abs(U_4[-1] - u_exact(t_f)) / numpy.abs(u_exact(t_f))\n \n# Plot error vs. delta_t\nfig = plt.figure()\naxes = fig.add_subplot(1, 1, 1)\n\naxes.loglog(delta_t, error_euler, 'bo', label='Forward Euler')\naxes.loglog(delta_t, error_2, 'ro', label='2-stage')\naxes.loglog(delta_t, error_4, 'go', label=\"4-stage\")\n\norder_C = lambda delta_x, error, order: numpy.exp(numpy.log(error) - order * numpy.log(delta_x))\naxes.loglog(delta_t, order_C(delta_t[1], error_euler[1], 1.0) * delta_t**1.0, '--b')\naxes.loglog(delta_t, order_C(delta_t[1], error_2[1], 2.0) * delta_t**2.0, '--r')\naxes.loglog(delta_t, order_C(delta_t[1], error_4[1], 4.0) * delta_t**4.0, '--g')\n\naxes.legend(loc=4)\naxes.set_title(\"Comparison of Errors\")\naxes.set_xlabel(\"$\\Delta t$\")\naxes.set_ylabel(\"$|U(t_f) - u(t_f)|$\")\n\nplt.show()",
"Linear Multi-Step Methods\nMulti-step methods (as introduced via the leap-frog method) are ODE methods that require multiple time step evaluations to work. Some of the advanatages of using a multi-step method rather than one-step method included\n\nTaylor series methods require differentiating the given equation which can be cumbersome and difficult to impelent\nOne-step methods at higher order often require the evaluation of the function $f$ many times\n\nDisadvantages\n\nMethods are not self-starting, i.e. they require other methods to find the initial values\nThe time step $\\Delta t$ in one-step methods can be changed at any time while multi-step methods this is much more complex\n\nGeneral Linear Multi-Step Methods\nAll linear multi-step methods can be written as the linear combination of past, present and future solutions:\n$$\n \\sum^r_{j=0} \\alpha_j U^{n+j} = \\Delta t \\sum^r_{j=0} \\beta_j f(U^{n+j}, t_{n+j})\n$$\nIf $\\beta_r = 0$ then the method is explicit (only requires previous time steps). Note that the coefficients are not unique as we can multiply both sides by a constant. In practice a normalization of $\\alpha_r = 1$ is used.\nExample: Adams Methods\n$$\n U^{n+r} = U^{n+r-1} + \\Delta t \\sum^r_{j=0} \\beta_j f(U^{n+j})\n$$\nAll these methods have $\\alpha_r = 1$, $\\alpha_{r-1} = -1$ and $\\alpha_j=0$ for $j < r - 1$ leaving the method to be specified by how the evaluations of $f$ is done determining the $\\beta_j$.\nAdams-Bashforth Methods\nThe Adams-Bashforth methods are explicit solvers that maximize the order of accuracy given a number of steps $r$. This is accomplished by looking at the Taylor series and picking the coefficients $\\beta_j$ to elliminate as many terms in the Taylor series as possible.\n$$\\begin{aligned}\n &\\text{1-step:}& &U^{n+1} = U^n &+& \\Delta t ~ f(U^n) \\\n &\\text{2-step:}& &U^{n+2} = U^{n+1} &+& \\frac{\\Delta t}{2} ~ (-f(U^n) + 3 f(U^{n+1})) \\\n &\\text{3-step:}& &U^{n+3} = U^{n+2} &+& \\frac{\\Delta t}{12} ~ (5 f(U^n) - 16 f(U^{n+1}) + 23 f(U^{n+2})) \\\n &\\text{4-step:}& &U^{n+4} = U^{n+3} &+& \\frac{\\Delta t}{24} ~ (-9 f(U^n) + 37 f(U^{n+1}) -59 f(U^{n+2}) + 55 f(U^{n+3}))\n\\end{aligned}$$",
"# Use 2-step Adams-Bashforth to compute solution\nf = lambda t, u: -u\n\nt_exact = numpy.linspace(0.0, 10.0, 100)\nu_exact = numpy.exp(-t_exact)\n\nN = 100\nt = numpy.linspace(0, 10.0, N)\ndelta_t = t[1] - t[0]\nU = numpy.empty(t.shape)\n\n# Use RK-2 to start the method\nU[0] = 1.0\nU[1] = U[0] + 0.5 * delta_t * f(t[0], U[0])\nU[1] = U[0] + delta_t * f(t[0], U[1])\nfor n in xrange(0,len(t)-2):\n U[n+2] = U[n + 1] + delta_t / 2.0 * (-f(t[n], U[n]) + 3.0 * f(t[n+1], U[n+1]))\n \nfig = plt.figure()\naxes = fig.add_subplot(1, 1, 1)\n\naxes.plot(t_exact, u_exact, 'k', label=\"True\")\naxes.plot(t, U, 'ro', label=\"2-step A-B\")\n\naxes.set_title(\"Adams-Bashforth Method\")\naxes.set_xlabel(\"t\")\naxes.set_xlabel(\"u(t)\")\naxes.legend(loc=1)\n\nplt.show()",
"Adams-Moulton Methods\nThe Adams-Moulton methods are the implicit versions of the Adams-Bashforth methods. Since this gives one additional parameter to use $\\beta_r$ these methods are generally one order of accuracy greater than their counterparts.\n$$\\begin{aligned}\n&\\text{1-step:}& &U^{n+1} = U^n &+& \\frac{\\Delta t}{2}(f(U^n) + f(U^{n+1})) \\\n&\\text{2-step:}& &U^{n+2} = U^{n+1} &+& \\frac{\\Delta t}{12} (-f(U^n) + 8f(U^{n+1}) + 5f(U^{n+2})) \\\n&\\text{3-step:}& &U^{n+3} = U^{n+2} &+& \\frac{\\Delta t}{24} (f(U^n) - 5f(U^{n+1}) + 19f(U^{n+2}) + 9f(U^{n+3})) \\\n&\\text{3-step:}& &U^{n+4} = U^{n+3} &+& \\frac{\\Delta t}{720} (-19 f(U^n) + 106 f(U^{n+1}) -264 f(U^{n+2}) + 646 f(U^{n+3}) + 251 f(U^{n+4})) \n\\end{aligned}$$",
"# Use 2-step Adams-Moulton to compute solution\n# u' = - decay u\ndecay_constant = 1.0\nf = lambda t, u: -decay_constant * u\n\nt_exact = numpy.linspace(0.0, 10.0, 100)\nu_exact = numpy.exp(-t_exact)\n\nN = 20\nt = numpy.linspace(0, 10.0, N)\ndelta_t = t[1] - t[0]\nU = numpy.empty(t.shape)\nU[0] = 1.0\nU[1] = U[0] + 0.5 * delta_t * f(t[0], U[0])\nU[1] = U[0] + delta_t * f(t[0], U[1]) \nintegration_constant = 1.0 / (1.0 + 5.0 * decay_constant * delta_t / 12.0)\nfor n in xrange(t.shape[0] - 2):\n U[n+2] = (U[n+1] + decay_constant * delta_t / 12.0 * (U[n] - 8.0 * U[n+1])) * integration_constant\n\nfig = plt.figure()\naxes = fig.add_subplot(1, 1, 1)\n\naxes.plot(t_exact, u_exact, 'k', label=\"True\")\naxes.plot(t, U, 'ro', label=\"2-step A-M\")\n\naxes.set_title(\"Adams-Moulton Method\")\naxes.set_xlabel(\"t\")\naxes.set_xlabel(\"u(t)\")\naxes.legend(loc=1)\n\nplt.show()",
"Truncation Error for Multi-Step Methods\nWe can again find the truncation error in general for linear multi-step methods:\n$$\\begin{aligned}\n \\tau^n &= \\frac{1}{\\Delta t} \\left [\\sum^r_{j=0} \\alpha_j u(t_{n+j}) - \\Delta t \\sum^r_{j=0} \\beta_j f(t_{n+j}, u(t_{n+j})) \\right ]\n\\end{aligned}$$\nUsing the general expansion and evalution of the Taylor series about $t_n$ we have\n$$\\begin{aligned}\n u(t_{n+j}) &= u(t_n) + j \\Delta t u'(t_n) + \\frac{1}{2} (j \\Delta t)^2 u''(t_n) + \\mathcal{O}(\\Delta t^3) \\\n u'(t_{n+j}) &= u'(t_n) + j \\Delta t u''(t_n) + \\frac{1}{2} (j \\Delta t)^2 u'''(t_n) + \\mathcal{O}(\\Delta t^3)\n\\end{aligned}$$\nleading to\n$$\\begin{aligned}\n\\tau^n &= \\frac{1}{\\Delta t}\\left( \\sum^r_{j=0} \\alpha_j\\right) u(t_{n+j}) + \\left(\\sum^r_{j=0} (j\\alpha_j - \\beta_j)\\right) u'(t_n) + \\Delta t \\left(\\sum^r_{j=0} \\left (\\frac{1}{2}j^2 \\alpha_j - j \\beta_j \\right) \\right) u''(t_n) \\\n&~~~~~~~ + \\cdots + \\Delta t^{q - 1} \\left (\\frac{1}{q!} \\left(j^q \\alpha_j - \\frac{1}{(q-1)!} j^{q-1} \\beta_j \\right) \\right) u^{(q)}(t_n) + \\cdots\n\\end{aligned}$$\nThe method is consistent if the first two terms of the expansion vanish, i.e. $\\sum^r_{j=0} \\alpha_j = 0$ and $\\sum^r_{j=0} j \\alpha_j = \\sum^r_{j=0} \\beta_j$.",
"# Compare accuracy between RK-2, AB-2 and AM-2\nf = lambda t, u: -u\nu_exact = lambda t: numpy.exp(-t)\n\nt_f = 10.0\nnum_steps = [2**n for n in xrange(4,10)]\ndelta_t = numpy.empty(len(num_steps))\nerror_rk = numpy.empty(len(num_steps))\nerror_ab = numpy.empty(len(num_steps))\nerror_am = numpy.empty(len(num_steps))\n\nfor (i, N) in enumerate(num_steps):\n t = numpy.linspace(0, t_f, N)\n delta_t[i] = t[1] - t[0]\n \n # Compute RK2\n U_rk = numpy.empty(t.shape)\n U_rk[0] = 1.0\n for n in xrange(t.shape[0]-1):\n U_rk[n+1] = U_rk[n] + 0.5 * delta_t[i] * f(t[n], U_rk[n])\n U_rk[n+1] = U_rk[n] + delta_t[i] * f(t[n], U_rk[n+1])\n \n # Compute Adams-Bashforth 2-stage\n U_ab = numpy.empty(t.shape)\n U_ab[:2] = U_rk[:2]\n for n in xrange(t.shape[0] - 2):\n U_ab[n+2] = U_ab[n + 1] + delta_t[i] / 2.0 * (-f(t[n], U_ab[n]) + 3.0 * f(t[n+1], U_ab[n+1]))\n \n # Compute Adama-Moulton 2-stage\n U_am = numpy.empty(t.shape)\n U_am[:2] = U_rk[:2]\n decay_constant = 1.0\n integration_constant = 1.0 / (1.0 + 5.0 * decay_constant * delta_t[i] / 12.0)\n for n in xrange(t.shape[0] - 2):\n U_am[n+2] = (U_am[n+1] + decay_constant * delta_t[i] / 12.0 * (U_am[n] - 8.0 * U_am[n+1])) * integration_constant\n \n # Compute error for each\n error_rk[i] = numpy.linalg.norm(delta_t[i] * (U_rk - u_exact(t)), ord=1)\n error_ab[i] = numpy.linalg.norm(delta_t[i] * (U_ab - u_exact(t)), ord=1)\n error_am[i] = numpy.linalg.norm(delta_t[i] * (U_am - u_exact(t)), ord=1)\n \n# Plot error vs. delta_t\nfig = plt.figure()\naxes = fig.add_subplot(1, 1, 1)\n\naxes.loglog(delta_t, error_rk, 'bo', label='RK-2')\naxes.loglog(delta_t, error_ab, 'ro', label='AB-2')\naxes.loglog(delta_t, error_am, 'go', label=\"AM-2\")\n\norder_C = lambda delta_x, error, order: numpy.exp(numpy.log(error) - order * numpy.log(delta_x))\naxes.loglog(delta_t, order_C(delta_t[1], error_rk[1], 2.0) * delta_t**2.0, '--r')\naxes.loglog(delta_t, order_C(delta_t[1], error_ab[1], 2.0) * delta_t**2.0, '--r')\naxes.loglog(delta_t, order_C(delta_t[1], error_am[1], 3.0) * delta_t**3.0, '--g')\n\naxes.legend(loc=4)\naxes.set_title(\"Comparison of Errors\")\naxes.set_xlabel(\"$\\Delta t$\")\naxes.set_ylabel(\"$|U(t) - u(t)|$\")\n\nplt.show()",
"Predictor-Corrector Methods\nOne way to simplify the Adams-Moulton methods so that implicit evaluations are not needed is by estimating the required implicit function evaluations with an explicit method. These are often called predictor-corrector methods as the explicit method provides a prediction of what the solution might be and the not explicit corrector step works to make that estimate more accurate.\nExample: One-Step Adams-Bashforth-Moulton\nUse the One-step Adams-Bashforth method to predict the value of $U^{n+1}$ and then use the Adams-Moulton method to correct that value:\n$\\hat{U}^{n+1} = U^n + \\Delta t f(U^n)$\n$U^{n+1} = U^n + \\frac{1}{2} \\Delta t (f(U^n) + f(\\hat{U}^{n+1})$\nThis method is second order accurate.",
"# One-step Adams-Bashforth-Moulton\nf = lambda t, u: -u\n\nt_exact = numpy.linspace(0.0, 10.0, 100)\nu_exact = numpy.exp(-t_exact)\n\nN = 100\nt = numpy.linspace(0, 10.0, N)\ndelta_t = t[1] - t[0]\nU = numpy.empty(t.shape)\n\nU[0] = 1.0\nfor n in xrange(t.shape[0] - 1):\n U[n+1] = U[n] + delta_t * f(t[n], U[n])\n U[n+1] = U[n] + 0.5 * delta_t * (f(t[n], U[n]) + f(t[n+1], U[n+1]))\n \nfig = plt.figure()\naxes = fig.add_subplot(1, 1, 1)\n\naxes.plot(t_exact, u_exact, 'k', label=\"True\")\naxes.plot(t, U, 'ro', label=\"2-step A-B\")\n\naxes.set_title(\"Adams-Bashforth-Moulton P/C Method\")\naxes.set_xlabel(\"t\")\naxes.set_xlabel(\"u(t)\")\naxes.legend(loc=1)\n\nplt.show()\n\n# Compare accuracy between RK-2, AB-2 and AM-2\nf = lambda t, u: -u\nu_exact = lambda t: numpy.exp(-t)\n\nt_f = 10.0\nnum_steps = [2**n for n in xrange(4,10)]\ndelta_t = numpy.empty(len(num_steps))\nerror_ab = numpy.empty(len(num_steps))\nerror_am = numpy.empty(len(num_steps))\nerror_pc = numpy.empty(len(num_steps))\n\nfor (i, N) in enumerate(num_steps):\n t = numpy.linspace(0, t_f, N)\n delta_t[i] = t[1] - t[0]\n \n # RK-2 bootstrap for AB and AM\n U_rk = numpy.empty(2)\n U_rk[0] = 1.0\n U_rk[1] = U_rk[0] + 0.5 * delta_t[i] * f(t[0], U_rk[0])\n U_rk[1] = U_rk[0] + delta_t[i] * f(t[0], U_rk[1])\n \n # Compute Adams-Bashforth 2-stage\n U_ab = numpy.empty(t.shape)\n U_ab[:2] = U_rk[:2]\n for n in xrange(t.shape[0] - 2):\n U_ab[n+2] = U_ab[n + 1] + delta_t[i] / 2.0 * (-f(t[n], U_ab[n]) + 3.0 * f(t[n+1], U_ab[n+1]))\n \n # Compute Adams-Moulton 2-stage\n U_am = numpy.empty(t.shape)\n U_am[:2] = U_ab[:2]\n decay_constant = 1.0\n integration_constant = 1.0 / (1.0 + 5.0 * decay_constant * delta_t[i] / 12.0)\n for n in xrange(t.shape[0] - 2):\n U_am[n+2] = (U_am[n+1] + decay_constant * delta_t[i] / 12.0 * (U_am[n] - 8.0 * U_am[n+1])) * integration_constant\n \n # Compute Adams-Bashforth-Moulton\n U_pc = numpy.empty(t.shape)\n U_pc[0] = 1.0\n for n in xrange(t.shape[0] - 1):\n U_pc[n+1] = U_pc[n] + delta_t[i] * f(t[n], U_pc[n])\n U_pc[n+1] = U_pc[n] + 0.5 * delta_t[i] * (f(t[n], U_pc[n]) + f(t[n+1], U_pc[n+1]))\n \n # Compute error for each\n error_ab[i] = numpy.linalg.norm(delta_t[i] * (U_ab - u_exact(t)), ord=1)\n error_am[i] = numpy.linalg.norm(delta_t[i] * (U_am - u_exact(t)), ord=1)\n error_pc[i] = numpy.linalg.norm(delta_t[i] * (U_pc - u_exact(t)), ord=1)\n \n# Plot error vs. delta_t\nfig = plt.figure()\naxes = fig.add_subplot(1, 1, 1)\n\naxes.loglog(delta_t, error_pc, 'bo', label='PC')\naxes.loglog(delta_t, error_ab, 'ro', label='AB-2')\naxes.loglog(delta_t, error_am, 'go', label=\"AM-2\")\n\norder_C = lambda delta_x, error, order: numpy.exp(numpy.log(error) - order * numpy.log(delta_x))\naxes.loglog(delta_t, order_C(delta_t[1], error_pc[1], 2.0) * delta_t**2.0, '--b')\naxes.loglog(delta_t, order_C(delta_t[1], error_ab[1], 2.0) * delta_t**2.0, '--r')\naxes.loglog(delta_t, order_C(delta_t[1], error_am[1], 3.0) * delta_t**3.0, '--g')\n\naxes.legend(loc=4)\naxes.set_title(\"Comparison of Errors\")\naxes.set_xlabel(\"$\\Delta t$\")\naxes.set_ylabel(\"$|U(t) - u(t)|$\")\n\nplt.show()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
GoogleCloudPlatform/training-data-analyst
|
courses/fast-and-lean-data-science/07_Keras_Flowers_TPU_playground.ipynb
|
apache-2.0
|
[
"Let's train this model on TPU. It's worth it.\nImports",
"import os, sys, math\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport tensorflow as tf\nprint(\"Tensorflow version \" + tf.__version__)\nAUTOTUNE = tf.data.AUTOTUNE",
"TPU detection",
"try: # detect TPUs\n tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect() # TPU detection\n strategy = tf.distribute.TPUStrategy(tpu)\nexcept ValueError: # detect GPUs\n strategy = tf.distribute.MirroredStrategy() # for GPU or multi-GPU machines\n #strategy = tf.distribute.get_strategy() # default strategy that works on CPU and single GPU\n #strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() # for clusters of multi-GPU machines\nprint(\"Number of accelerators: \", strategy.num_replicas_in_sync)",
"Configuration",
"GCS_PATTERN = 'gs://flowers-public/tfrecords-jpeg-192x192-2/*.tfrec'\nIMAGE_SIZE = [192, 192]\n\nif tpu:\n BATCH_SIZE = 16*strategy.num_replicas_in_sync # A TPU has 8 cores so this will be 128\nelse:\n BATCH_SIZE = 32 # On Colab/GPU, a higher batch size does not help and sometimes does not fit on the GPU (OOM)\n\nVALIDATION_SPLIT = 0.19\nCLASSES = ['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips'] # do not change, maps to the labels in the data (folder names)\n\n# splitting data files between training and validation\nfilenames = tf.io.gfile.glob(GCS_PATTERN)\nsplit = int(len(filenames) * VALIDATION_SPLIT)\ntraining_filenames = filenames[split:]\nvalidation_filenames = filenames[:split]\nprint(\"Pattern matches {} data files. Splitting dataset into {} training files and {} validation files\".format(len(filenames), len(training_filenames), len(validation_filenames)))\nvalidation_steps = int(3670 // len(filenames) * len(validation_filenames)) // BATCH_SIZE\nsteps_per_epoch = int(3670 // len(filenames) * len(training_filenames)) // BATCH_SIZE\nprint(\"With a batch size of {}, there will be {} batches per training epoch and {} batch(es) per validation run.\".format(BATCH_SIZE, steps_per_epoch, validation_steps))\n\n#@title display utilities [RUN ME]\n\ndef dataset_to_numpy_util(dataset, N):\n dataset = dataset.batch(N)\n \n # In eager mode, iterate in the Datset directly.\n for images, labels in dataset:\n numpy_images = images.numpy()\n numpy_labels = labels.numpy()\n break;\n\n return numpy_images, numpy_labels\n\ndef title_from_label_and_target(label, correct_label):\n label = np.argmax(label, axis=-1) # one-hot to class number\n correct_label = np.argmax(correct_label, axis=-1) # one-hot to class number\n correct = (label == correct_label)\n return \"{} [{}{}{}]\".format(CLASSES[label], str(correct), ', shoud be ' if not correct else '',\n CLASSES[correct_label] if not correct else ''), correct\n\ndef display_one_flower(image, title, subplot, red=False):\n plt.subplot(subplot)\n plt.axis('off')\n plt.imshow(image)\n plt.title(title, fontsize=16, color='red' if red else 'black')\n return subplot+1\n \ndef display_9_images_from_dataset(dataset):\n subplot=331\n plt.figure(figsize=(13,13))\n images, labels = dataset_to_numpy_util(dataset, 9)\n for i, image in enumerate(images):\n title = CLASSES[np.argmax(labels[i], axis=-1)]\n subplot = display_one_flower(image, title, subplot)\n if i >= 8:\n break;\n \n #plt.tight_layout()\n plt.subplots_adjust(wspace=0.1, hspace=0.1)\n plt.show()\n \ndef display_9_images_with_predictions(images, predictions, labels):\n subplot=331\n plt.figure(figsize=(13,13))\n for i, image in enumerate(images):\n title, correct = title_from_label_and_target(predictions[i], labels[i])\n subplot = display_one_flower(image, title, subplot, not correct)\n if i >= 8:\n break;\n \n #plt.tight_layout()\n plt.subplots_adjust(wspace=0.1, hspace=0.1)\n plt.show()\n \ndef display_training_curves(training, validation, title, subplot):\n if subplot%10==1: # set up the subplots on the first call\n plt.subplots(figsize=(10,10), facecolor='#F0F0F0')\n #plt.tight_layout()\n ax = plt.subplot(subplot)\n ax.set_facecolor('#F8F8F8')\n ax.plot(training)\n ax.plot(validation)\n ax.set_title('model '+ title)\n ax.set_ylabel(title)\n ax.set_xlabel('epoch')\n ax.legend(['train', 'valid.'])",
"Read images and labels from TFRecords",
"def read_tfrecord(example):\n features = {\n \"image\": tf.io.FixedLenFeature([], tf.string), # tf.string means bytestring\n \"class\": tf.io.FixedLenFeature([], tf.int64), # shape [] means scalar\n \"one_hot_class\": tf.io.VarLenFeature(tf.float32),\n }\n example = tf.io.parse_single_example(example, features)\n image = tf.io.decode_jpeg(example['image'], channels=3)\n image = tf.cast(image, tf.float32) / 255.0 # convert image to floats in [0, 1] range\n image = tf.reshape(image, [*IMAGE_SIZE, 3]) # explicit size will be needed for TPU\n one_hot_class = tf.sparse.to_dense(example['one_hot_class'])\n one_hot_class = tf.reshape(one_hot_class, [5])\n return image, one_hot_class\n\ndef load_dataset(filenames):\n # read from TFRecords. For optimal performance, read from multiple\n # TFRecord files at once and set the option experimental_deterministic = False\n # to allow order-altering optimizations.\n\n option_no_order = tf.data.Options()\n option_no_order.experimental_deterministic = False\n\n dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTOTUNE)\n dataset = dataset.with_options(option_no_order)\n dataset = dataset.map(read_tfrecord, num_parallel_calls=AUTOTUNE)\n return dataset\n\ndisplay_9_images_from_dataset(load_dataset(training_filenames))",
"training and validation datasets",
"def get_batched_dataset(filenames, train=False):\n dataset = load_dataset(filenames)\n dataset = dataset.cache() # This dataset fits in RAM\n if train:\n # Best practices for Keras:\n # Training dataset: repeat then batch\n # Evaluation dataset: do not repeat\n dataset = dataset.repeat()\n dataset = dataset.batch(BATCH_SIZE)\n dataset = dataset.prefetch(AUTOTUNE) # prefetch next batch while training (autotune prefetch buffer size)\n # should shuffle too but this dataset was well shuffled on disk already\n return dataset\n # source: Dataset performance guide: https://www.tensorflow.org/guide/performance/datasets\n\n# instantiate the datasets\ntraining_dataset = get_batched_dataset(training_filenames, train=True)\nvalidation_dataset = get_batched_dataset(validation_filenames, train=False)\n\nsome_flowers, some_labels = dataset_to_numpy_util(load_dataset(validation_filenames), 160)",
"Model [WORK REQUIRED]\n\ntrain the model as it is, with a single convolutional layer\nAccuracy 40%... Not great.\nadd additional convolutional layers interleaved with max-pooling layers. Try also adding a second dense layer. For example:<br/>\nconv 3x3, 16 filters, relu<br/>\nconv 3x3, 30 filters, relu<br/>\nmax pool 2x2<br/>\nconv 3x3, 50 filters, relu<br/>\nmax pool 2x2<br/>\nconv 3x3, 70 filters, relu<br/>\nflatten<br/>\ndense 5 softmax<br/>\nAccuracy 60%... slightly better. But this model is more than 800K parameters and it overfits dramatically (overfitting = eval loss goes up instead of down).\nTry replacing the Flatten layer by Global average pooling.\nAccuracy still 60% but the model is back to a modest 50K parameters, and does not overfit anymore. If you train longer, it can go even higher.\nTry experimenting with 1x1 convolutions too. They typically follow a 3x3 convolution and decrease the filter count. You can also add dropout between the dense layers. For example:\nconv 3x3, 20 filters, relu<br/>\nconv 3x3, 50 filters, relu<br/>\nmax pool 2x2<br/>\nconv 3x3, 70 filters, relu<br/>\nconv 1x1, 50 filters, relu<br/>\nmax pool 2x2<br/>\nconv 3x3, 100 filters, relu<br/>\nconv 1x1, 70 filters, relu<br/>\nmax pool 2x2<br/>\nconv 3x3, 120 filters, relu<br/>\nconv 1x1, 80 filters, relu<br/>\nmax pool 2x2<br/>\nglobal average pooling<br/>\ndense 5 softmax<br/>\naccuracy 70%\nThe goal is 80% accuracy ! Good luck. (You might want to train for more than 20 epochs to get there. Se your trainig curves to see if it is worth training longer.)",
"with strategy.scope(): # this line is all that is needed to run on TPU (or multi-GPU, ...)\n\n model = tf.keras.Sequential([\n\n ###\n tf.keras.layers.InputLayer(input_shape=[*IMAGE_SIZE, 3]),\n tf.keras.layers.Conv2D(kernel_size=3, filters=20, padding='same', activation='relu'),\n #\n # YOUR LAYERS HERE\n #\n # LAYERS TO TRY:\n # Conv2D(kernel_size=3, filters=30, padding='same', activation='relu')\n # MaxPooling2D(pool_size=2)\n # GlobalAveragePooling2D() / Flatten()\n # Dense(90, activation='relu')\n #\n tf.keras.layers.GlobalAveragePooling2D(),\n tf.keras.layers.Dense(5, activation='softmax')\n ###\n ])\n\n model.compile(\n optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n model.summary()",
"Training",
"EPOCHS = 20\n\nhistory = model.fit(training_dataset, steps_per_epoch=steps_per_epoch, epochs=EPOCHS,\n validation_data=validation_dataset)\n\nprint(history.history.keys())\ndisplay_training_curves(history.history['accuracy'], history.history['val_accuracy'], 'accuracy', 211)\ndisplay_training_curves(history.history['loss'], history.history['val_loss'], 'loss', 212)",
"Predictions",
"# randomize the input so that you can execute multiple times to change results\npermutation = np.random.permutation(160)\nsome_flowers, some_labels = (some_flowers[permutation], some_labels[permutation])\n\npredictions = model.predict(some_flowers, batch_size=16)\nevaluations = model.evaluate(some_flowers, some_labels, batch_size=16)\n \nprint(np.array(CLASSES)[np.argmax(predictions, axis=-1)].tolist())\nprint('[val_loss, val_acc]', evaluations)\n\ndisplay_9_images_with_predictions(some_flowers, predictions, some_labels)",
"License\n\nauthor: Martin Gorner<br>\ntwitter: @martin_gorner\n\nCopyright 2021 Google LLC\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nThis is not an official Google product but sample code provided for an educational purpose"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
liyigerry/msm_test
|
examples/bayesian-msm.ipynb
|
apache-2.0
|
[
"BayesianMarkovStateModel\nThis example demonstrates the class BayesianMarkovStateModel, which uses Metropolis Markov chain Monte Carlo (MCMC) to sample\nover the posterior distribution of transition matrices, given the observed transitions in your dataset. This can be useful\nfor evaluating the uncertainty due to sampling in your dataset.",
"%matplotlib inline\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom mdtraj.utils import timing\nfrom msmbuilder.example_datasets import load_doublewell\nfrom msmbuilder.cluster import NDGrid\nfrom msmbuilder.msm import BayesianMarkovStateModel, MarkovStateModel",
"Load some double-well data",
"trjs = load_doublewell(random_state=0)['trajectories']\nplt.hist(np.concatenate(trjs), bins=50, log=True)\nplt.ylabel('Frequency')\nplt.show()",
"We'll discretize the space using 10 states\nAnd the build one MSM using the MLE transition matrix estimator, and one with the Bayesian estimator",
"clusterer = NDGrid(n_bins_per_feature=10)\nmle_msm = MarkovStateModel(lag_time=100)\nb_msm = BayesianMarkovStateModel(lag_time=100, n_samples=10000, n_steps=1000)\n\nstates = clusterer.fit_transform(trjs)\nwith timing('running mcmc'):\n b_msm.fit(states)\n\nmle_msm.fit(states)\n\nplt.subplot(2, 1, 1)\nplt.plot(b_msm.all_transmats_[:, 0, 0])\nplt.axhline(mle_msm.transmat_[0, 0], c='k')\nplt.ylabel('t_00')\n\nplt.subplot(2, 1, 2)\nplt.ylabel('t_23')\nplt.xlabel('MCMC Iteration')\nplt.plot(b_msm.all_transmats_[:, 2, 3])\nplt.axhline(mle_msm.transmat_[2, 3], c='k')\nplt.show()\n\nplt.plot(b_msm.all_timescales_[:, 0], label='MCMC')\nplt.axhline(mle_msm.timescales_[0], c='k', label='MLE')\nplt.legend(loc='best')\nplt.ylabel('Longest timescale')\nplt.xlabel('MCMC iteration')\nplt.show()",
"Now lets try using 50 states\nThe MCMC sampling is a lot harder to converge",
"clusterer = NDGrid(n_bins_per_feature=50)\nmle_msm = MarkovStateModel(lag_time=100)\nb_msm = BayesianMarkovStateModel(lag_time=100, n_samples=1000, n_steps=100000)\n\nstates = clusterer.fit_transform(trjs)\nwith timing('running mcmc (50 states)'):\n b_msm.fit(states)\n\nmle_msm.fit(states)\n\nplt.plot(b_msm.all_timescales_[:, 0], label='MCMC')\nplt.axhline(mle_msm.timescales_[0], c='k', label='MLE')\nplt.legend(loc='best')\nplt.ylabel('Longest timescale')\nplt.xlabel('MCMC iteration')\n\nplt.plot(b_msm.all_transmats_[:, 0, 0], label='MCMC')\nplt.axhline(mle_msm.transmat_[0, 0], c='k', label='MLE')\nplt.legend(loc='best')\nplt.ylabel('t_00')\nplt.xlabel('MCMC iteration')"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
LSSTC-DSFP/LSSTC-DSFP-Sessions
|
Sessions/Session07/Day0/TooBriefVisualization.ipynb
|
mit
|
[
"from __future__ import division, print_function, absolute_import",
"Introduction to Visualization:\nDensity Estimation and Data Exploration\nVersion 0.1\nThere are many flavors of data analysis that fall under the \"visualization\" umbrella in astronomy. Today, by way of example, we will focus on 2 basic problems.\n\nBy AA Miller \n16 September 2017",
"import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"Problem 1) Density Estimation\nStarting with 2MASS and SDSS and extending through LSST, we are firmly in an era where data and large statistical samples are cheap. With this explosion in data volume comes a problem: we do not know the underlying probability density function (PDF) of the random variables measured via our observations. Hence - density estimation: an attempt to recover the unknown PDF from observations. In some cases theory can guide us to a parametric form for the PDF, but more often than not such guidance is not available. \nThere is a common, simple, and very familiar tool for density estimation: histograms. \nBut there is also a problem:\nHISTOGRAMS LIE!\nWe will \"prove\" this to be the case in a series of examples. For this exercise, we will load the famous Linnerud data set, which tested 20 middle aged men by measuring the number of chinups, situps, and jumps they could do in order to compare these numbers to their weight, pulse, and waist size. To load the data (just chinups for now) we will run the following:\nfrom sklearn.datasets import load_linnerud\nlinnerud = load_linnerud()\nchinups = linnerud.data[:,0]",
"from sklearn.datasets import load_linnerud\n\nlinnerud = load_linnerud()\nchinups = linnerud.data[:,0]",
"Problem 1a \nPlot the histogram for the number of chinups using the default settings in pyplot.",
"plt.hist( # complete",
"Already with this simple plot we see a problem - the choice of bin centers and number of bins suggest that there is a 0% probability that middle aged men can do 10 chinups. Intuitively this seems incorrect, so lets examine how the histogram changes if we change the number of bins or the bin centers.\nProblem 1b \nUsing the same data make 2 new histograms: (i) one with 5 bins (bins = 5), and (ii) one with the bars centered on the left bin edges (align = \"left\").\nHint - if overplotting the results, you may find it helpful to use the histtype = \"step\" option",
"plt.hist( # complete\n# complete",
"These small changes significantly change the output PDF. With fewer bins we get something closer to a continuous distribution, while shifting the bin centers reduces the probability to zero at 9 chinups. \nWhat if we instead allow the bin width to vary and require the same number of points in each bin? You can determine the bin edges for bins with 5 sources using the following command:\nbins = np.append(np.sort(chinups)[::5], np.max(chinups))\n\nProblem 1c \nPlot a histogram with variable width bins, each with the same number of points.\nHint - setting normed = True will normalize the bin heights so that the PDF integrates to 1.",
"# complete\nplt.hist(# complete",
"Ending the lie \nEarlier I stated that histograms lie. One simple way to combat this lie: show all the data. Displaying the original data points allows viewers to somewhat intuit the effects of the particular bin choices that have been made (though this can also be cumbersome for very large data sets, which these days is essentially all data sets). The standard for showing individual observations relative to a histogram is a \"rug plot,\" which shows a vertical tick (or other symbol) at the location of each source used to estimate the PDF.\nProblem 1d Execute the cell below to see an example of a rug plot.",
"plt.hist(chinups, histtype = 'step')\n\n# this is the code for the rug plot\nplt.plot(chinups, np.zeros_like(chinups), '|', color='k', ms = 25, mew = 4)",
"Of course, even rug plots are not a perfect solution. Many of the chinup measurements are repeated, and those instances cannot be easily isolated above. One (slightly) better solution is to vary the transparency of the rug \"whiskers\" using alpha = 0.3 in the whiskers plot call. But this too is far from perfect. \nTo recap, histograms are not ideal for density estimation for the following reasons: \n\nThey introduce discontinuities that are not present in the data\nThey are strongly sensitive to user choices ($N_\\mathrm{bins}$, bin centering, bin grouping), without any mathematical guidance to what these choices should be\nThey are difficult to visualize in higher dimensions\n\nHistograms are useful for generating a quick representation of univariate data, but for the reasons listed above they should never be used for analysis. Most especially, functions should not be fit to histograms given how greatly the number of bins and bin centering affects the output histogram.\nOkay - so if we are going to rail on histograms this much, there must be a better option. There is: Kernel Density Estimation (KDE), a nonparametric form of density estimation whereby a normalized kernel function is convolved with the discrete data to obtain a continuous estimate of the underlying PDF. As a rule, the kernel must integrate to 1 over the interval $-\\infty$ to $\\infty$ and be symmetric. There are many possible kernels (gaussian is highly popular, though Epanechnikov, an inverted parabola, produces the minimal mean square error). \nKDE is not completely free of the problems we illustrated for histograms above (in particular, both a kernel and the width of the kernel need to be selected), but it does manage to correct a number of the ills. We will now demonstrate this via a few examples using the scikit-learn implementation of KDE: KernelDensity, which is part of the sklearn.neighbors module. \nNote There are many implementations of KDE in Python, and Jake VanderPlas has put together an excellent description of the strengths and weaknesses of each. We will use the scitkit-learn version as it is in many cases the fastest implementation.\nTo demonstrate the basic idea behind KDE, we will begin by representing each point in the dataset as a block (i.e. we will adopt the tophat kernel). Borrowing some code from Jake, we can estimate the KDE using the following code:\nfrom sklearn.neighbors import KernelDensity\ndef kde_sklearn(data, grid, bandwidth = 1.0, **kwargs):\n kde_skl = KernelDensity(bandwidth = bandwidth, **kwargs)\n kde_skl.fit(data[:, np.newaxis])\n log_pdf = kde_skl.score_samples(grid[:, np.newaxis]) # sklearn returns log(density)\n\n return np.exp(log_pdf)\n\nThe two main options to set are the bandwidth and the kernel.",
"# execute this cell\nfrom sklearn.neighbors import KernelDensity\ndef kde_sklearn(data, grid, bandwidth = 1.0, **kwargs):\n kde_skl = KernelDensity(bandwidth = bandwidth, **kwargs)\n kde_skl.fit(data[:, np.newaxis])\n log_pdf = kde_skl.score_samples(grid[:, np.newaxis]) # sklearn returns log(density)\n\n return np.exp(log_pdf)",
"Problem 1e \nPlot the KDE of the PDF for the number of chinups middle aged men can do using a bandwidth of 0.1 and a tophat kernel.\nHint - as a general rule, the grid should be smaller than the bandwidth when plotting the PDF.",
"grid = # complete\nPDFtophat = kde_sklearn( # complete\nplt.plot( # complete",
"In this representation, each \"block\" has a height of 0.25. The bandwidth is too narrow to provide any overlap between the blocks. This choice of kernel and bandwidth produces an estimate that is essentially a histogram with a large number of bins. It gives no sense of continuity for the distribution. Now, we examine the difference (relative to histograms) upon changing the the width (i.e. kernel) of the blocks. \nProblem 1f \nPlot the KDE of the PDF for the number of chinups middle aged men can do using bandwidths of 1 and 5 and a tophat kernel. How do the results differ from the histogram plots above?",
"PDFtophat1 = # complete\n\n# complete\n# complete\n# complete",
"It turns out blocks are not an ideal representation for continuous data (see discussion on histograms above). Now we will explore the resulting PDF from other kernels. \nProblem 1g Plot the KDE of the PDF for the number of chinups middle aged men can do using a gaussian and Epanechnikov kernel. How do the results differ from the histogram plots above? \nHint - you will need to select the bandwidth. The examples above should provide insight into the useful range for bandwidth selection. You may need to adjust the values to get an answer you \"like.\"",
"PDFgaussian = # complete\nPDFepanechnikov = # complete",
"So, what is the optimal choice of bandwidth and kernel? Unfortunately, there is no hard and fast rule, as every problem will likely have a different optimization. Typically, the choice of bandwidth is far more important than the choice of kernel. In the case where the PDF is likely to be gaussian (or close to gaussian), then Silverman's rule of thumb can be used: \n$$h = 1.059 \\sigma n^{-1/5}$$\nwhere $h$ is the bandwidth, $\\sigma$ is the standard deviation of the samples, and $n$ is the total number of samples. Note - in situations with bimodal or more complicated distributions, this rule of thumb can lead to woefully inaccurate PDF estimates. The most general way to estimate the choice of bandwidth is via cross validation (we will cover cross-validation later today). \nWhat about multidimensional PDFs? It is possible using many of the Python implementations of KDE to estimate multidimensional PDFs, though it is very very important to beware the curse of dimensionality in these circumstances.\nProblem 2) Data Exploration\nNow a more open ended topic: data exploration. In brief, data exploration encompases a large suite of tools (including those discussed above) to examine data that live in large dimensional spaces. There is no single best method or optimal direction for data exploration. Instead, today we will introduce some of the tools available via python. \nAs an example we will start with a basic line plot - and examine tools beyond matplotlib.",
"x = np.arange(0, 6*np.pi, 0.1)\ny = np.cos(x)\n\nplt.plot(x,y, lw = 2)\nplt.xlabel('X')\nplt.ylabel('Y')\nplt.xlim(0, 6*np.pi)",
"Seaborn\nSeaborn is a plotting package that enables many useful features for exploration. In fact, a lot of the functionality that we developed above can readily be handled with seaborn.\nTo begin, we will make the same plot that we created in matplotlib.",
"import seaborn as sns\n\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nax.plot(x,y, lw = 2)\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_xlim(0, 6*np.pi)",
"These plots look identical, but it is possible to change the style with seaborn. \nseaborn has 5 style presets: darkgrid, whitegrid, dark, white, and ticks. You can change the preset using the following: \nsns.set_style(\"whitegrid\")\n\nwhich will change the output for all subsequent plots. Note - if you want to change the style for only a single plot, that can be accomplished with the following: \nwith sns.axes_style(\"dark\"):\n\nwith all ploting commands inside the with statement. \nProblem 3a \nRe-plot the sine curve using each seaborn preset to see which you like best - then adopt this for the remainder of the notebook.",
"sns.set_style( # complete\n# complete",
"The folks behind seaborn have thought a lot about color palettes, which is a good thing. Remember - the choice of color for plots is one of the most essential aspects of visualization. A poor choice of colors can easily mask interesting patterns or suggest structure that is not real. To learn more about what is available, see the seaborn color tutorial. \nHere we load the default:",
"# default color palette\n\ncurrent_palette = sns.color_palette()\nsns.palplot(current_palette)",
"which we will now change to colorblind, which is clearer to those that are colorblind.",
"# set palette to colorblind\nsns.set_palette(\"colorblind\")\n\ncurrent_palette = sns.color_palette()\nsns.palplot(current_palette)",
"Now that we have covered the basics of seaborn (and the above examples truly only scratch the surface of what is possible), we will explore the power of seaborn for higher dimension data sets. We will load the famous Iris data set, which measures 4 different features of 3 different types of Iris flowers. There are 150 different flowers in the data set.\nNote - for those familiar with pandas seaborn is designed to integrate easily and directly with pandas DataFrame objects. In the example below the Iris data are loaded into a DataFrame. iPython notebooks also display the DataFrame data in a nice readable format.",
"iris = sns.load_dataset(\"iris\")\niris",
"Now that we have a sense of the data structure, it is useful to examine the distribution of features. Above, we went to great pains to produce histograms, KDEs, and rug plots. seaborn handles all of that effortlessly with the distplot function.\nProblem 3b \nPlot the distribution of petal lengths for the Iris data set.",
"# note - hist, kde, and rug all set to True, set to False to turn them off \nwith sns.axes_style(\"dark\"):\n sns.distplot(iris['petal_length'], bins=20, hist=True, kde=True, rug=True)",
"Of course, this data set lives in a 4D space, so plotting more than univariate distributions is important (and as we will see tomorrow this is particularly useful for visualizing classification results). Fortunately, seaborn makes it very easy to produce handy summary plots. \nAt this point, we are familiar with basic scatter plots in matplotlib.\nProblem 3c \nMake a matplotlib scatter plot showing the Iris petal length against the Iris petal width.",
"plt.scatter( # complete",
"Of course, when there are many many data points, scatter plots become difficult to interpret. As in the example below:",
"with sns.axes_style(\"darkgrid\"):\n xexample = np.random.normal(loc = 0.2, scale = 1.1, size = 10000)\n yexample = np.random.normal(loc = -0.1, scale = 0.9, size = 10000)\n\n plt.scatter(xexample, yexample)",
"Here, we see that there are many points, clustered about the origin, but we have no sense of the underlying density of the distribution. 2D histograms, such as plt.hist2d(), can alleviate this problem. I prefer to use plt.hexbin() which is a little easier on the eyes (though note - these histograms are just as subject to the same issues discussed above).",
"# hexbin w/ bins = \"log\" returns the log of counts/bin\n# mincnt = 1 displays only hexpix with at least 1 source present\nwith sns.axes_style(\"darkgrid\"):\n plt.hexbin(xexample, yexample, bins = \"log\", cmap = \"viridis\", mincnt = 1)\n plt.colorbar()",
"While the above plot provides a significant improvement over the scatter plot by providing a better sense of the density near the center of the distribution, the binedge effects are clearly present. An even better solution, like before, is a density estimate, which is easily built into seaborn via the kdeplot function.",
"with sns.axes_style(\"darkgrid\"):\n sns.kdeplot(xexample, yexample,shade=False)",
"This plot is much more appealing (and informative) than the previous two. For the first time we can clearly see that the distribution is not actually centered on the origin. Now we will move back to the Iris data set. \nSuppose we want to see univariate distributions in addition to the scatter plot? This is certainly possible with matplotlib and you can find examples on the web, however, with seaborn this is really easy.",
"sns.jointplot(x=iris['petal_length'], y=iris['petal_width'])",
"But! Histograms and scatter plots can be problematic as we have discussed many times before. \nProblem 3d \nRe-create the plot above but set kind='kde' to produce density estimates of the distributions.",
"sns.jointplot( # complete",
"That is much nicer than what was presented above. However - we still have a problem in that our data live in 4D, but we are (mostly) limited to 2D projections of that data. One way around this is via the seaborn version of a pairplot, which plots the distribution of every variable in the data set against each other. (Here is where the integration with pandas DataFrames becomes so powerful.)",
"sns.pairplot(iris[[\"sepal_length\", \"sepal_width\", \"petal_length\", \"petal_width\"]])",
"For data sets where we have classification labels, we can even color the various points using the hue option, and produce KDEs along the diagonal with diag_type = 'kde'.",
"sns.pairplot(iris, vars = [\"sepal_length\", \"sepal_width\", \"petal_length\", \"petal_width\"],\n hue = \"species\", diag_kind = 'kde')",
"Even better - there is an option to create a PairGrid which allows fine tuned control of the data as displayed above, below, and along the diagonal. In this way it becomes possible to avoid having symmetric redundancy, which is not all that informative. In the example below, we will show scatter plots and contour plots simultaneously.",
"g = sns.PairGrid(iris, vars = [\"sepal_length\", \"sepal_width\", \"petal_length\", \"petal_width\"],\n hue = \"species\", diag_sharey=False)\ng.map_lower(sns.kdeplot)\ng.map_upper(plt.scatter, edgecolor='white')\ng.map_diag(sns.kdeplot, lw=3)",
"Note - one disadvantage to the plot above is that the contours do not share the same color scheme as the KDE estimates and the scatter plot. I have not been able to figure out how to change this in a satisfactory way. (One potential solution is detailed here, however, it is worth noting that this solution restricts your color choices to a maximum of ~5 unless you are a colormaps wizard, and I am not.)"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
barjacks/foundations-homework
|
07/RichPeople_Pandas_Homework_7_Skinner.ipynb
|
mit
|
[
"The Richest People in the World: Taking a closer look at Switzerland and the Gender Gap",
"import pandas as pd\n\n!pip install pandas\n\ndf = pd.read_excel(\"Billionaire Characteristics Database/richpeople.xlsx\")\n\n!pip install xlrd\n\ndf.head(1)\n\ndf.columns\n\nrecent = df[df['year'] == 2014]\nrecent.head(1)",
"1) Where are most billionaires from?",
"recent['countrycode'].value_counts().head()\n#df['animal'].value_counts()",
"2) How many of them are from Switzerland?",
"Swiss_billionaires = recent[recent['countrycode'] == 'CHE']\nSwiss_billionaires.count().head(1)",
"3) Display all of the Swiss billionaires, starting with the richest.",
"Swiss_billionaires = recent[recent['countrycode'] == 'CHE']\nSwiss_billionaires_sorted = Swiss_billionaires.sort_values(by='networthusbillion', ascending=False)\nSwiss_billionaires_sorted\n",
"4) How many of the richest Swiss are female, and how many are male?",
"female_Swiss_billionaires = Swiss_billionaires[Swiss_billionaires['gender'] == 'female'].count().head(1)\nfemale_Swiss_billionaires\n\nmale_Swiss_billionaire = Swiss_billionaires[Swiss_billionaires['gender'] == 'male'].count().head(1)\nmale_Swiss_billionaire",
"So that is 8 female Swiss billionaires and 14 male ones.\n5) How is their wealth distributed? How many billions do super rich men and women have in Switzerland compared to women?",
"female_Swiss_billionaires_list = Swiss_billionaires[Swiss_billionaires['gender'] == 'female']\nfemale_mean = female_Swiss_billionaires_list['networthusbillion'].mean()\nprint(female_mean)\n\n#print(\"Swiss female billionaire's average wealth:\", female_Swiss_billionaire['networthusbillion'].mean())\n#print(\"Swiss male billionaire's average wealth:\", male_Swiss_billionaire['networthusbillion'].mean())\n\nmale_Swiss_billionaires_list = Swiss_billionaires[Swiss_billionaires['gender'] == 'male']\nmale_mean = male_Swiss_billionaires_list['networthusbillion'].mean()\nprint(male_mean)\n\ndiff = male_mean - female_mean\nMillions_diff = diff * 1000\nmale_sum = male_Swiss_billionaires_list['networthusbillion'].sum()\nfemale_sum = female_Swiss_billionaires_list['networthusbillion'].sum()\n\nprint(\"In total Swiss male billionaires have funds of\", male_sum, \"billion US-Dollars. Female billionaires own\", female_sum,\"billion meaning that in average a Swiss male billionaire has\", Millions_diff, \"million USD more than Swiss female billionaires. That's qite a bit of cash!\")",
"6) Do more Swiss women or more men inherit their fortunes?",
"#df[(df['animal'] == 'cat') & (df['inches'] > 12)]\nmale_Swiss_billionaires_list[male_Swiss_billionaires_list['selfmade'] == 'inherited'].count().head(1)\ninherited = male_Swiss_billionaires_list[male_Swiss_billionaires_list['selfmade'] == 'inherited'].count().head(1)\nprint(\"So out of 14 male Swiss billionaires 9 of them inherited their fortune.\")\n\n\nfemale_Swiss_billionaires_list[female_Swiss_billionaires_list['selfmade'] == 'inherited'].count().head(1)\nfemale_inherited = female_Swiss_billionaires_list[female_Swiss_billionaires_list['selfmade'] == 'inherited'].count().head()\nfemale_inherited\n#print(\"Out of 8 Swiss female billionaires, only one woman made her fortune on her own\")",
"7) Display the only Swiss female billionaire who is a self made woman?",
"recent[(recent['gender'] == 'female') & (recent['selfmade'] == 'self-made') & (recent['countrycode'] == 'CHE')]",
"8) Display the richest woman in the world",
"recent[(recent['gender'] == 'female')].sort_values(by='networthusbillion', ascending=False)",
"9) Who is the richest self made woman? And how many women are selfmade?",
"recent[(recent['gender'] == 'female') & (recent['selfmade'] == 'self-made')].sort_values(by='networthusbillion', ascending=False)",
"Interesting. Chan Laiwa, the richest self made woman in the world is from China. She is also above 70. The second on the list, Gayle Cook, is even over 80.\n10. Lets compare that to the richest self made men. How much have they made?",
"recent[(recent['gender'] == 'male') & (recent['selfmade'] == 'self-made')].sort_values(by='networthusbillion', ascending=False)",
"11. What about the youngest billionaires. Are any of them female?",
"recent.sort_values(by='age').head(5)",
"12. We compared women's and men's average fortunes before for Switzerland. Lets do the same for the whole world.",
"female_billionaires = recent[recent['gender'] == 'female']\nmale_billionaires = recent[recent['gender'] == 'male']\nprint(\"Female billionaire average wealth:\", female_billionaires['networthusbillion'].mean())\nprint(\"Male billionaire average wealth:\", male_billionaires['networthusbillion'].mean())\n\nfemales = female_billionaires['networthusbillion'].mean()\nmales = male_billionaires['networthusbillion'].mean()\ndifference = ((float(females) - float(males)) * 1000000000)\nfemales_more_than_males = int(difference) / 1000000\n\nprint(\"Female billionaires are slighthly richer than their mail counterparts. In average they have\", int(females_more_than_males), \"Million US-Dollars more than men.\" )\n\ntotal_billions_women = int(female_billionaires['networthusbillion'].sum())\n\ntotal_billions_men = int(male_billionaires['networthusbillion'].sum())\n\nprint(\"In total, however, male billions have\", int(total_billions_men / total_billions_women), \"times more billions than women. In total:\",total_billions_men, \"billion US-Dollars. That is more than the GDP of France.\")",
"13. Who has the most billions?",
"recent.sort_values(by='networthusbillion', ascending = False).head(5)",
"14. Which nation has the wealthiest billionaires in average?",
"recent.groupby(['countrycode'])['networthusbillion'].mean().sort_values(ascending = False).head(15)",
"15. How is the wealth distributed by age? Lets plot that out.",
"%matplotlib inline\n\nimport matplotlib.pyplot as plt\nplt.style.available\nplt.style.use('fivethirtyeight')\n\nrecent.plot(kind='scatter', x='age', y='networthusbillion')\nplt.savefig(\"Scatterplot_billionaires_worldwide.svg\")"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
radu941208/DeepLearning
|
Hyperparameter_Tuning_Regularization_Optimization/Gradient+Checking.ipynb
|
mit
|
[
"Gradient Checking\nWelcome to the final assignment for this week! In this assignment you will learn to implement and use gradient checking. \nYou are part of a team working to make mobile payments available globally, and are asked to build a deep learning model to detect fraud--whenever someone makes a payment, you want to see if the payment might be fraudulent, such as if the user's account has been taken over by a hacker. \nBut backpropagation is quite challenging to implement, and sometimes has bugs. Because this is a mission-critical application, your company's CEO wants to be really certain that your implementation of backpropagation is correct. Your CEO says, \"Give me a proof that your backpropagation is actually working!\" To give this reassurance, you are going to use \"gradient checking\".\nLet's do it!",
"# Packages\nimport numpy as np\nfrom testCases import *\nfrom gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector",
"1) How does gradient checking work?\nBackpropagation computes the gradients $\\frac{\\partial J}{\\partial \\theta}$, where $\\theta$ denotes the parameters of the model. $J$ is computed using forward propagation and your loss function.\nBecause forward propagation is relatively easy to implement, you're confident you got that right, and so you're almost 100% sure that you're computing the cost $J$ correctly. Thus, you can use your code for computing $J$ to verify the code for computing $\\frac{\\partial J}{\\partial \\theta}$. \nLet's look back at the definition of a derivative (or gradient):\n$$ \\frac{\\partial J}{\\partial \\theta} = \\lim_{\\varepsilon \\to 0} \\frac{J(\\theta + \\varepsilon) - J(\\theta - \\varepsilon)}{2 \\varepsilon} \\tag{1}$$\nIf you're not familiar with the \"$\\displaystyle \\lim_{\\varepsilon \\to 0}$\" notation, it's just a way of saying \"when $\\varepsilon$ is really really small.\"\nWe know the following:\n\n$\\frac{\\partial J}{\\partial \\theta}$ is what you want to make sure you're computing correctly. \nYou can compute $J(\\theta + \\varepsilon)$ and $J(\\theta - \\varepsilon)$ (in the case that $\\theta$ is a real number), since you're confident your implementation for $J$ is correct. \n\nLets use equation (1) and a small value for $\\varepsilon$ to convince your CEO that your code for computing $\\frac{\\partial J}{\\partial \\theta}$ is correct!\n2) 1-dimensional gradient checking\nConsider a 1D linear function $J(\\theta) = \\theta x$. The model contains only a single real-valued parameter $\\theta$, and takes $x$ as input.\nYou will implement code to compute $J(.)$ and its derivative $\\frac{\\partial J}{\\partial \\theta}$. You will then use gradient checking to make sure your derivative computation for $J$ is correct. \n<img src=\"images/1Dgrad_kiank.png\" style=\"width:600px;height:250px;\">\n<caption><center> <u> Figure 1 </u>: 1D linear model<br> </center></caption>\nThe diagram above shows the key computation steps: First start with $x$, then evaluate the function $J(x)$ (\"forward propagation\"). Then compute the derivative $\\frac{\\partial J}{\\partial \\theta}$ (\"backward propagation\"). \nExercise: implement \"forward propagation\" and \"backward propagation\" for this simple function. I.e., compute both $J(.)$ (\"forward propagation\") and its derivative with respect to $\\theta$ (\"backward propagation\"), in two separate functions.",
"# GRADED FUNCTION: forward_propagation\n\ndef forward_propagation(x, theta):\n \"\"\"\n Implement the linear forward propagation (compute J) presented in Figure 1 (J(theta) = theta * x)\n \n Arguments:\n x -- a real-valued input\n theta -- our parameter, a real number as well\n \n Returns:\n J -- the value of function J, computed using the formula J(theta) = theta * x\n \"\"\"\n \n ### START CODE HERE ### (approx. 1 line)\n J = np.dot(theta,x)\n ### END CODE HERE ###\n \n return J\n\nx, theta = 2, 4\nJ = forward_propagation(x, theta)\nprint (\"J = \" + str(J))",
"Expected Output:\n<table style=>\n <tr>\n <td> ** J ** </td>\n <td> 8</td>\n </tr>\n</table>\n\nExercise: Now, implement the backward propagation step (derivative computation) of Figure 1. That is, compute the derivative of $J(\\theta) = \\theta x$ with respect to $\\theta$. To save you from doing the calculus, you should get $dtheta = \\frac { \\partial J }{ \\partial \\theta} = x$.",
"# GRADED FUNCTION: backward_propagation\n\ndef backward_propagation(x, theta):\n \"\"\"\n Computes the derivative of J with respect to theta (see Figure 1).\n \n Arguments:\n x -- a real-valued input\n theta -- our parameter, a real number as well\n \n Returns:\n dtheta -- the gradient of the cost with respect to theta\n \"\"\"\n \n ### START CODE HERE ### (approx. 1 line)\n dtheta = x\n ### END CODE HERE ###\n \n return dtheta\n\nx, theta = 2, 4\ndtheta = backward_propagation(x, theta)\nprint (\"dtheta = \" + str(dtheta))",
"Expected Output:\n<table>\n <tr>\n <td> ** dtheta ** </td>\n <td> 2 </td>\n </tr>\n</table>\n\nExercise: To show that the backward_propagation() function is correctly computing the gradient $\\frac{\\partial J}{\\partial \\theta}$, let's implement gradient checking.\nInstructions:\n- First compute \"gradapprox\" using the formula above (1) and a small value of $\\varepsilon$. Here are the Steps to follow:\n 1. $\\theta^{+} = \\theta + \\varepsilon$\n 2. $\\theta^{-} = \\theta - \\varepsilon$\n 3. $J^{+} = J(\\theta^{+})$\n 4. $J^{-} = J(\\theta^{-})$\n 5. $gradapprox = \\frac{J^{+} - J^{-}}{2 \\varepsilon}$\n- Then compute the gradient using backward propagation, and store the result in a variable \"grad\"\n- Finally, compute the relative difference between \"gradapprox\" and the \"grad\" using the following formula:\n$$ difference = \\frac {\\mid\\mid grad - gradapprox \\mid\\mid_2}{\\mid\\mid grad \\mid\\mid_2 + \\mid\\mid gradapprox \\mid\\mid_2} \\tag{2}$$\nYou will need 3 Steps to compute this formula:\n - 1'. compute the numerator using np.linalg.norm(...)\n - 2'. compute the denominator. You will need to call np.linalg.norm(...) twice.\n - 3'. divide them.\n- If this difference is small (say less than $10^{-7}$), you can be quite confident that you have computed your gradient correctly. Otherwise, there may be a mistake in the gradient computation.",
"# GRADED FUNCTION: gradient_check\n\ndef gradient_check(x, theta, epsilon = 1e-7):\n \"\"\"\n Implement the backward propagation presented in Figure 1.\n \n Arguments:\n x -- a real-valued input\n theta -- our parameter, a real number as well\n epsilon -- tiny shift to the input to compute approximated gradient with formula(1)\n \n Returns:\n difference -- difference (2) between the approximated gradient and the backward propagation gradient\n \"\"\"\n \n # Compute gradapprox using left side of formula (1). epsilon is small enough, you don't need to worry about the limit.\n ### START CODE HERE ### (approx. 5 lines)\n thetaplus = theta+epsilon # Step 1\n thetaminus = theta-epsilon # Step 2\n J_plus = forward_propagation(x,thetaplus) # Step 3\n J_minus = forward_propagation(x,thetaminus) # Step 4\n gradapprox = (J_plus-J_minus)/(2*epsilon) # Step 5\n ### END CODE HERE ###\n \n # Check if gradapprox is close enough to the output of backward_propagation()\n ### START CODE HERE ### (approx. 1 line)\n grad = backward_propagation(x, theta)\n ### END CODE HERE ###\n \n ### START CODE HERE ### (approx. 1 line)\n numerator = np.linalg.norm(grad-gradapprox) # Step 1'\n denominator = np.linalg.norm(grad)+np.linalg.norm(gradapprox) # Step 2'\n difference = numerator/denominator # Step 3'\n ### END CODE HERE ###\n \n if difference < 1e-7:\n print (\"The gradient is correct!\")\n else:\n print (\"The gradient is wrong!\")\n \n return difference\n\nx, theta = 2, 4\ndifference = gradient_check(x, theta)\nprint(\"difference = \" + str(difference))",
"Expected Output:\nThe gradient is correct!\n<table>\n <tr>\n <td> ** difference ** </td>\n <td> 2.9193358103083e-10 </td>\n </tr>\n</table>\n\nCongrats, the difference is smaller than the $10^{-7}$ threshold. So you can have high confidence that you've correctly computed the gradient in backward_propagation(). \nNow, in the more general case, your cost function $J$ has more than a single 1D input. When you are training a neural network, $\\theta$ actually consists of multiple matrices $W^{[l]}$ and biases $b^{[l]}$! It is important to know how to do a gradient check with higher-dimensional inputs. Let's do it!\n3) N-dimensional gradient checking\nThe following figure describes the forward and backward propagation of your fraud detection model.\n<img src=\"images/NDgrad_kiank.png\" style=\"width:600px;height:400px;\">\n<caption><center> <u> Figure 2 </u>: deep neural network<br>LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID</center></caption>\nLet's look at your implementations for forward propagation and backward propagation.",
"def forward_propagation_n(X, Y, parameters):\n \"\"\"\n Implements the forward propagation (and computes the cost) presented in Figure 3.\n \n Arguments:\n X -- training set for m examples\n Y -- labels for m examples \n parameters -- python dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\":\n W1 -- weight matrix of shape (5, 4)\n b1 -- bias vector of shape (5, 1)\n W2 -- weight matrix of shape (3, 5)\n b2 -- bias vector of shape (3, 1)\n W3 -- weight matrix of shape (1, 3)\n b3 -- bias vector of shape (1, 1)\n \n Returns:\n cost -- the cost function (logistic cost for one example)\n \"\"\"\n \n # retrieve parameters\n m = X.shape[1]\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n W3 = parameters[\"W3\"]\n b3 = parameters[\"b3\"]\n\n # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID\n Z1 = np.dot(W1, X) + b1\n A1 = relu(Z1)\n Z2 = np.dot(W2, A1) + b2\n A2 = relu(Z2)\n Z3 = np.dot(W3, A2) + b3\n A3 = sigmoid(Z3)\n\n # Cost\n logprobs = np.multiply(-np.log(A3),Y) + np.multiply(-np.log(1 - A3), 1 - Y)\n cost = 1./m * np.sum(logprobs)\n \n cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)\n \n return cost, cache",
"Now, run backward propagation.",
"def backward_propagation_n(X, Y, cache):\n \"\"\"\n Implement the backward propagation presented in figure 2.\n \n Arguments:\n X -- input datapoint, of shape (input size, 1)\n Y -- true \"label\"\n cache -- cache output from forward_propagation_n()\n \n Returns:\n gradients -- A dictionary with the gradients of the cost with respect to each parameter, activation and pre-activation variables.\n \"\"\"\n \n m = X.shape[1]\n (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache\n \n dZ3 = A3 - Y\n dW3 = 1./m * np.dot(dZ3, A2.T)\n db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)\n \n dA2 = np.dot(W3.T, dZ3)\n dZ2 = np.multiply(dA2, np.int64(A2 > 0))\n dW2 = 1./m * np.dot(dZ2, A1.T) * 2\n db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)\n \n dA1 = np.dot(W2.T, dZ2)\n dZ1 = np.multiply(dA1, np.int64(A1 > 0))\n dW1 = 1./m * np.dot(dZ1, X.T)\n db1 = 4./m * np.sum(dZ1, axis=1, keepdims = True)\n \n gradients = {\"dZ3\": dZ3, \"dW3\": dW3, \"db3\": db3,\n \"dA2\": dA2, \"dZ2\": dZ2, \"dW2\": dW2, \"db2\": db2,\n \"dA1\": dA1, \"dZ1\": dZ1, \"dW1\": dW1, \"db1\": db1}\n \n return gradients",
"You obtained some results on the fraud detection test set but you are not 100% sure of your model. Nobody's perfect! Let's implement gradient checking to verify if your gradients are correct.\nHow does gradient checking work?.\nAs in 1) and 2), you want to compare \"gradapprox\" to the gradient computed by backpropagation. The formula is still:\n$$ \\frac{\\partial J}{\\partial \\theta} = \\lim_{\\varepsilon \\to 0} \\frac{J(\\theta + \\varepsilon) - J(\\theta - \\varepsilon)}{2 \\varepsilon} \\tag{1}$$\nHowever, $\\theta$ is not a scalar anymore. It is a dictionary called \"parameters\". We implemented a function \"dictionary_to_vector()\" for you. It converts the \"parameters\" dictionary into a vector called \"values\", obtained by reshaping all parameters (W1, b1, W2, b2, W3, b3) into vectors and concatenating them.\nThe inverse function is \"vector_to_dictionary\" which outputs back the \"parameters\" dictionary.\n<img src=\"images/dictionary_to_vector.png\" style=\"width:600px;height:400px;\">\n<caption><center> <u> Figure 2 </u>: dictionary_to_vector() and vector_to_dictionary()<br> You will need these functions in gradient_check_n()</center></caption>\nWe have also converted the \"gradients\" dictionary into a vector \"grad\" using gradients_to_vector(). You don't need to worry about that.\nExercise: Implement gradient_check_n().\nInstructions: Here is pseudo-code that will help you implement the gradient check.\nFor each i in num_parameters:\n- To compute J_plus[i]:\n 1. Set $\\theta^{+}$ to np.copy(parameters_values)\n 2. Set $\\theta^{+}_i$ to $\\theta^{+}_i + \\varepsilon$\n 3. Calculate $J^{+}_i$ using to forward_propagation_n(x, y, vector_to_dictionary($\\theta^{+}$ )). \n- To compute J_minus[i]: do the same thing with $\\theta^{-}$\n- Compute $gradapprox[i] = \\frac{J^{+}_i - J^{-}_i}{2 \\varepsilon}$\nThus, you get a vector gradapprox, where gradapprox[i] is an approximation of the gradient with respect to parameter_values[i]. You can now compare this gradapprox vector to the gradients vector from backpropagation. Just like for the 1D case (Steps 1', 2', 3'), compute: \n$$ difference = \\frac {\\| grad - gradapprox \\|_2}{\\| grad \\|_2 + \\| gradapprox \\|_2 } \\tag{3}$$",
"# GRADED FUNCTION: gradient_check_n\n\ndef gradient_check_n(parameters, gradients, X, Y, epsilon = 1e-7):\n \"\"\"\n Checks if backward_propagation_n computes correctly the gradient of the cost output by forward_propagation_n\n \n Arguments:\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\":\n grad -- output of backward_propagation_n, contains gradients of the cost with respect to the parameters. \n x -- input datapoint, of shape (input size, 1)\n y -- true \"label\"\n epsilon -- tiny shift to the input to compute approximated gradient with formula(1)\n \n Returns:\n difference -- difference (2) between the approximated gradient and the backward propagation gradient\n \"\"\"\n \n # Set-up variables\n parameters_values, _ = dictionary_to_vector(parameters)\n grad = gradients_to_vector(gradients)\n num_parameters = parameters_values.shape[0]\n J_plus = np.zeros((num_parameters, 1))\n J_minus = np.zeros((num_parameters, 1))\n gradapprox = np.zeros((num_parameters, 1))\n \n # Compute gradapprox\n for i in range(num_parameters):\n \n # Compute J_plus[i]. Inputs: \"parameters_values, epsilon\". Output = \"J_plus[i]\".\n # \"_\" is used because the function you have to outputs two parameters but we only care about the first one\n ### START CODE HERE ### (approx. 3 lines)\n thetaplus = np.copy(parameters_values) # Step 1\n thetaplus[i][0] = thetaplus[i][0] + epsilon # Step 2\n J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaplus)) # Step 3\n ### END CODE HERE ###\n \n # Compute J_minus[i]. Inputs: \"parameters_values, epsilon\". Output = \"J_minus[i]\".\n ### START CODE HERE ### (approx. 3 lines)\n thetaminus = np.copy(parameters_values) # Step 1\n thetaminus[i][0] = thetaminus[i][0] - epsilon # Step 2 \n J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaminus)) # Step 3\n ### END CODE HERE ###\n \n # Compute gradapprox[i]\n ### START CODE HERE ### (approx. 1 line)\n gradapprox[i] = (J_plus[i]-J_minus[i])/(2*epsilon)\n ### END CODE HERE ###\n \n # Compare gradapprox to backward propagation gradients by computing difference.\n ### START CODE HERE ### (approx. 1 line)\n numerator = np.linalg.norm(grad - gradapprox) # Step 1'\n denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2'\n difference = numerator/denominator # Step 3'\n ### END CODE HERE ###\n\n if difference > 1e-7:\n print (\"\\033[93m\" + \"There is a mistake in the backward propagation! difference = \" + str(difference) + \"\\033[0m\")\n else:\n print (\"\\033[92m\" + \"Your backward propagation works perfectly fine! difference = \" + str(difference) + \"\\033[0m\")\n \n return difference\n\nX, Y, parameters = gradient_check_n_test_case()\n\ncost, cache = forward_propagation_n(X, Y, parameters)\ngradients = backward_propagation_n(X, Y, cache)\ndifference = gradient_check_n(parameters, gradients, X, Y)",
"Expected output:\n<table>\n <tr>\n <td> ** There is a mistake in the backward propagation!** </td>\n <td> difference = 0.285093156781 </td>\n </tr>\n</table>\n\nIt seems that there were errors in the backward_propagation_n code we gave you! Good that you've implemented the gradient check. Go back to backward_propagation and try to find/correct the errors (Hint: check dW2 and db1). Rerun the gradient check when you think you've fixed it. Remember you'll need to re-execute the cell defining backward_propagation_n() if you modify the code. \nCan you get gradient check to declare your derivative computation correct? Even though this part of the assignment isn't graded, we strongly urge you to try to find the bug and re-run gradient check until you're convinced backprop is now correctly implemented. \nNote \n- Gradient Checking is slow! Approximating the gradient with $\\frac{\\partial J}{\\partial \\theta} \\approx \\frac{J(\\theta + \\varepsilon) - J(\\theta - \\varepsilon)}{2 \\varepsilon}$ is computationally costly. For this reason, we don't run gradient checking at every iteration during training. Just a few times to check if the gradient is correct. \n- Gradient Checking, at least as we've presented it, doesn't work with dropout. You would usually run the gradient check algorithm without dropout to make sure your backprop is correct, then add dropout. \nCongrats, you can be confident that your deep learning model for fraud detection is working correctly! You can even use this to convince your CEO. :) \n<font color='blue'>\nWhat you should remember from this notebook:\n- Gradient checking verifies closeness between the gradients from backpropagation and the numerical approximation of the gradient (computed using forward propagation).\n- Gradient checking is slow, so we don't run it in every iteration of training. You would usually run it only to make sure your code is correct, then turn it off and use backprop for the actual learning process."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
nreimers/deeplearning4nlp-tutorial
|
2015-10_Lecture/Lecture2/code/3_Intro_Lasagne_Solution.ipynb
|
apache-2.0
|
[
"Introduction to Lasagne\nThere are various libaries building on top of Theano to provide easy buidling blocks for designing deep neural networks. Some of them are:\n- Lasagne (https://github.com/Lasagne/Lasagne)\n- Blocks (https://github.com/mila-udem/blocks)\n- Keras (http://keras.io/)\n- OpenDeep (http://www.opendeep.org/)\nAll libaries are kind of similar but different in the details, for example in the design philosophy. I chose (after too little research) Lasagne as it will allow you to interact with Theano and the computation graph. Keep an eye onto this evolving area.\nFor a great example how to use Lasagne for MNIST see the Lasagne Tutorial: http://lasagne.readthedocs.org/en/latest/user/tutorial.html\nBascis\nLasagne provides you with several basic components to build your neural networks. Instead of defining your HiddenLayer and SoftmaxLayer as in the previous example, you can use existent implementations from the library and easily plug them together.\nIn the following we will reimplement the MLP for the MNIST-dataset using Lasagne. For more information on Lasagne see http://lasagne.readthedocs.org/en/latest/\nLoad your dataset\nAs before we load our dataset. See 2_MNIST for more details.",
"import gzip\nimport cPickle\nimport numpy as np\nimport theano\nimport theano.tensor as T\n\nimport lasagne\n\n# Load the pickle file for the MNIST dataset.\ndataset = 'data/mnist.pkl.gz'\n\nf = gzip.open(dataset, 'rb')\ntrain_set, dev_set, test_set = cPickle.load(f)\nf.close()\n\n#train_set contains 2 entries, first the X values, second the Y values\ntrain_x, train_y = train_set\ndev_x, dev_y = dev_set\ntest_x, test_y = test_set",
"Build the MLP\nNow we use the provided layers from Lasagne to build our MLP",
"def build_mlp(n_in, n_hidden, n_out, input_var=None):\n #Input layer, 1 dimension = number of samples, 2 dimension = input, our 28*28 image\n l_in = lasagne.layers.InputLayer(shape=(None, n_in), input_var=input_var)\n \n # Our first hidden layer with n_hidden units\n # As nonlinearity we use tanh, you could also try rectify\n l_hid1 = lasagne.layers.DenseLayer(incoming=l_in,\n num_units=n_hidden, nonlinearity=lasagne.nonlinearities.tanh,\n W=lasagne.init.GlorotUniform())\n \n # Our output layer (a softmax layer)\n l_out = lasagne.layers.DenseLayer(incoming=l_hid1, \n num_units=n_out, nonlinearity=lasagne.nonlinearities.softmax)\n \n return l_out\n ",
"Create the Train Function\nAfter loading the data and defining the MLP, we can now create the train function.",
"# Parameters\nn_in = 28*28\nn_hidden = 50\nn_out = 10\n\n# Create the network\nx = T.dmatrix('x') # the data, one image per row\ny = T.lvector('y') # the labels are presented as 1D vector of [int] labels\n\nnetwork = build_mlp(n_in, n_hidden, n_out, x)\n\n# Create a loss expression for training, i.e., a scalar objective we want\n# to minimize (for our multi-class problem, it is the cross-entropy loss):\nprediction = lasagne.layers.get_output(network)\nloss = lasagne.objectives.categorical_crossentropy(prediction, y)\nloss = loss.mean()\n\n# Create update expressions for training, i.e., how to modify the\n# parameters at each training step. Here, we'll use Stochastic Gradient\n# Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more.\nparams = lasagne.layers.get_all_params(network, trainable=True)\nupdates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.01, momentum=0.9)\n\n\n# Predict the labels\nnetwork_predict_label = T.argmax(lasagne.layers.get_output(network, deterministic=True), axis=1)\n\n\n# Compile a function performing a training step on a mini-batch (by giving\n# the updates dictionary) and returning the corresponding training loss:\ntrain_fn = theano.function(inputs=[x, y], outputs=loss, updates=updates)\n\n# Create the predict_labels function\npredict_labels = theano.function(inputs=[x], outputs=network_predict_label)\n\n\n",
"Train the model\nWe run the training for some epochs and output the accurarcy of our network",
"#Function that helps to iterate over our data in minibatches\ndef iterate_minibatches(inputs, targets, batchsize, shuffle=False):\n assert len(inputs) == len(targets)\n if shuffle:\n indices = np.arange(len(inputs))\n np.random.shuffle(indices)\n for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):\n if shuffle:\n excerpt = indices[start_idx:start_idx + batchsize]\n else:\n excerpt = slice(start_idx, start_idx + batchsize)\n yield inputs[excerpt], targets[excerpt]\n\n#Method to compute the accruarcy. Call predict_labels to get the labels for the dataset\ndef compute_accurarcy(dataset_x, dataset_y): \n predictions = predict_labels(dataset_x)\n errors = sum(predictions != dataset_y) #Number of errors\n accurarcy = 1 - errors/float(len(dataset_y))\n return accurarcy\n\nnumber_of_epochs = 10\nprint \"%d epochs\" % number_of_epochs\n\nfor epoch in xrange(number_of_epochs): \n for batch in iterate_minibatches(train_x, train_y, 20, shuffle=True):\n inputs, targets = batch\n train_fn(inputs, targets) \n\n accurarcy_dev = compute_accurarcy(dev_x, dev_y)\n accurarcy_test = compute_accurarcy(test_x, test_y)\n\n print \"%d epoch: Accurarcy on dev: %f, accurarcy on test: %f\" % (epoch, accurarcy_dev, accurarcy_test)\n \nprint \"DONE\"\n"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
TheOregonian/long-term-care-db
|
notebooks/analysis/.ipynb_checkpoints/complaints_analysis-checkpoint.ipynb
|
mit
|
[
"This is an analysis of complaints data, munged in ../notebooks/mung.ipynb.\nRaw data is in ../data/raw\nThe fields are:\n1. <i>abuse_number:</i> A unique number assigned each complaint.\n2. <i>facility_id:</i> A unique number to each facility building. Stays if ownership changes.\n3. <i>facility_name:</i> Name of facility as of January 2017, when DHS provided the facility data to The Oregonian.\n4. <i>abuse_type:</i> A – facility abuse; L – licensing. Note: This does not apply to nursing facilities. All their complaints are either blank in this field or licensing.\n5. <i>action_notes:</i> DHS determination of what general acts constituted the abuse or rule violation.\n6. <i>incident_date:</i> Date the incident occured\n7. <i>outcome:</i> A very brief description of the consequences of the abuse or rule violation to the reisdent\n8. <i>outcome_notes:</i> A detailed description of what happened.\n9. <i>year:</i> year incident occured\n10. <i>fac_name:</i> If complaint is online, name listed for the facility\n11. <i>public:</i> Whether or not complaint is online",
"import pandas as pd\nimport numpy as np\nimport analysis_data_loader as loader\nfrom IPython.core.display import display, HTML\ndisplay(HTML(\"<style>.container { width:100% !important; }</style>\"))\npd.set_option('display.max_colwidth', -1)\n\ndf = loader.load_facilities()\n\ndf = pd.read_csv('/Users/fzarkhin/OneDrive - Advance Central Services, Inc/fproj/github/database-story/data/processed/complaints.csv')",
"<h3>How many complaints do not appear in the state's public database?</h3>",
"df[df['public']=='offline'].count()[0]",
"<h3>How many complaints do appear in the state's public database?</h3>",
"df[df['public']=='online'].count()[0]",
"<h3>What percent of complaints are missing?</h3>",
"df[df['public']=='offline'].count()[0]/df.count()[0]*100",
"<h3>How many complaints were labelled 'Exposed to potential harm' or 'No negative outcome?'</h3>",
"df[(df['outcome']=='Exposed to Potential Harm') | (df['outcome']=='No Negative Outcome')].count()[0]",
"<h3>Of all missing complaints, what percent are in the above two categories?</h3>",
"df[(df['outcome']=='Exposed to Potential Harm') |\n (df['outcome']=='No Negative Outcome')].count()[0]/df[df['public']=='offline'].count()[0]*100",
"<h3>How many complaints are labelled 'A,' which stands for abuse, but are offline?</h3>",
"df[(df['abuse_type']=='A') & (df['public']=='offline')].count()[0]\n\n#df.groupby('outcome').count().reset_index()[['outcome','abuse_number']].sort_values('abuse_number', ascending = False)",
"<h3>What's the online/offline breakdown by outcome?</h3>",
"totals = df.groupby(['omg_outcome','public']).count()['abuse_number'].unstack().reset_index()\n\ntotals.fillna(0, inplace = True)\n\ntotals['total'] = totals['online']+totals['offline']\n\ntotals['pct_offline'] = round(totals['offline']/totals['total']*100)\n\ntotals.sort_values('pct_offline',ascending=False)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
nilmtk/nilmtk
|
docs/manual/user_guide/nilmtk_api_tutorial.ipynb
|
apache-2.0
|
[
"NILMTK Rapid Experimentation API\nThis notebook demonstrates the use of NILMTK's ExperimentAPI - a new NILMTK interface which allows NILMTK users to focus on which experiments to run rather than on the code required to run such experiments.\nIt is important to note that handing over so much flexibility to the user does require the user to be somewhat familiar with the data set, but this part of the process is supported by NILMTK as data exploration is simple and well documented. \nLets us start with a very simple experiment to demonstrate the use of the API for multiple appliances in a minimal use case. This experiment shows how the user can select the appliances in the dataset on which disaggregation is to be performed.\nImporting the API.",
"from nilmtk.api import API\nimport warnings\nwarnings.filterwarnings(\"ignore\")",
"Next, we import the required algorithms on which we wish to run the experiments",
"from nilmtk.disaggregate import CO",
"Next, we enter the values for the different parameters in the dictionary. Since we need multiple appliances, we enter the names of all the required appliances in the 'appliances' parameter.",
"experiment1 = {\n 'power': {'mains': ['apparent','active'],'appliance': ['apparent','active']},\n 'sample_rate': 60,\n 'appliances': ['fridge','air conditioner', 'microwave'],\n 'methods': {\"CO\":CO({})},\n 'train': { \n 'datasets': {\n 'Dataport': {\n 'path': 'data/dataport.hdf5',\n 'buildings': {\n 10: {\n 'start_time': '2015-04-04',\n 'end_time': '2015-04-06'\n }\n } \n }\n }\n },\n 'test': {\n 'datasets': {\n 'Dataport': {\n 'path': 'data/dataport.hdf5',\n 'buildings': {\n 10: {\n 'start_time': '2015-04-25',\n 'end_time': '2015-04-26'\n }\n }\n }\n },\n 'metrics':['rmse']\n }\n}",
"In this example experimental setup, we have set the sample rate at 60Hz and use Combinatorial Optimisation to \ndisaggregate the required appliances from building 10 in the dataport dataset with the RMSE metric to measure the accuracy. We also specify the dates for training and testing\nNext we provide this experiment dictionary as input to the API.",
"api_results_experiment_1 = API(experiment1)",
"We can observe the prediction vs. truth graphs in the above cell. The accuracy metrics can be accessed using the following commands:",
"errors_keys = api_results_experiment_1.errors_keys\nerrors = api_results_experiment_1.errors\nfor i in range(len(errors)):\n print (errors_keys[i])\n print (errors[i])\n print (\"\\n\\n\")",
"This was a trivial experiment that only scratches the surface of the true potential of this API.\nIn the next experiment we will run an incrementally more complex version of the above experiment. Here we will use multiple models to disaggregate the appliance readings with the models having their own sets of parameters which can be set by the users within the experimental dictionary in order to fine tune experiments.\nWe also import the required algorithms for the next experiments",
"from nilmtk.disaggregate import FHMM_EXACT, Mean\n\nexperiment2 = {\n 'power': {'mains': ['apparent','active'],'appliance': ['apparent','active']},\n 'sample_rate': 60,\n 'appliances': ['fridge','air conditioner', 'microwave'],\n 'methods': {\"Mean\":Mean({}),\"FHMM_EXACT\":FHMM_EXACT({'num_of_states':2}), \"CombinatorialOptimisation\":CO({})},\n 'train': { \n 'datasets': {\n 'Dataport': {\n 'path': 'data/dataport.hdf5',\n 'buildings': {\n 10: {\n 'start_time': '2015-04-04',\n 'end_time': '2015-04-06'\n }\n } \n }\n }\n },\n 'test': {\n 'datasets': {\n 'Datport': {\n 'path': 'data/dataport.hdf5',\n 'buildings': {\n 10: {\n 'start_time': '2015-04-25',\n 'end_time': '2015-04-26'\n }\n }\n }\n },\n 'metrics':['mae', 'rmse']\n }\n}\n\napi_results_experiment_2 = API(experiment2)\n\napi_results_experiment_2.errors\n\nerrors_keys = api_results_experiment_2.errors_keys\nerrors = api_results_experiment_2.errors\nfor i in range(len(errors)):\n print (errors_keys[i])\n print (errors[i])\n print (\"\\n\\n\")",
"The API makes running experiments extremely quick and efficient, with the emphasis on creating finely tuned reproducible experiments where model and parameter performances can be easily evaluated at a glance. \nIn the next iteration of this experiment, we introduce more parameters chunksize, DROP_ALL_NANS and artificial_aggregate and add another disaggregation algorithm (Hart85). We also train and test data from multiple buildings of the same dataset.\nWe also import the Hart algorithm for the next experiment",
"from nilmtk.disaggregate import Hart85\n\nexperiment3 = {\n 'power': {'mains': ['apparent','active'],'appliance': ['apparent','active']},\n 'sample_rate': 60,\n 'appliances': ['fridge','air conditioner','electric furnace','washing machine'],\n 'artificial_aggregate': True,\n 'chunksize': 20000,\n 'DROP_ALL_NANS': False,\n 'methods': {\"Mean\":Mean({}),\"Hart85\":Hart85({}), \"FHMM_EXACT\":FHMM_EXACT({'num_of_states':2}), \"CO\":CO({})},\n 'train': { \n 'datasets': {\n 'Dataport': {\n 'path': 'data/dataport.hdf5',\n 'buildings': {\n 54: {\n 'start_time': '2015-01-28',\n 'end_time': '2015-02-12'\n },\n 56: {\n 'start_time': '2015-01-28',\n 'end_time': '2015-02-12'\n },\n 57: {\n 'start_time': '2015-04-30',\n 'end_time': '2015-05-14'\n },\n } \n }\n }\n },\n 'test': {\n 'datasets': {\n 'Datport': {\n 'path': 'data/dataport.hdf5',\n 'buildings': {\n 94: {\n 'start_time': '2015-04-30',\n 'end_time': '2015-05-07'\n },\n 103: {\n 'start_time': '2014-01-26',\n 'end_time': '2014-02-03'\n },\n 113: {\n 'start_time': '2015-04-30',\n 'end_time': '2015-05-07'\n },\n }\n }\n },\n 'metrics':['mae', 'rmse']\n }\n}\n\napi_results_experiment_3 = API(experiment3)\n\nerrors_keys = api_results_experiment_3.errors_keys\nerrors = api_results_experiment_3.errors\nfor i in range(len(errors)):\n print (errors_keys[i])\n print (errors[i])\n print (\"\\n\\n\")",
"The results of the above experiment are presented for every chunk per building in the test set.\nIn the following experiment, we demonstrate how to run experiments across datasets, which was previously not possible. The important thing to pay attention to is that such datasets can only be trained and tested together as long as they have common appliances in homes with common ac_types.",
"experiment4 = {\n 'power': {'mains': ['apparent','active'],'appliance': ['apparent','active']},\n 'sample_rate': 60,\n 'appliances': ['washing machine','fridge'],\n 'artificial_aggregate': True,\n 'chunksize': 20000,\n 'DROP_ALL_NANS': False,\n 'methods': {\"Mean\":Mean({}),\"Hart85\":Hart85({}), \"FHMM_EXACT\":FHMM_EXACT({'num_of_states':2}), 'CO':CO({})}, \n 'train': {\n 'datasets': {\n 'UKDALE': {\n 'path': 'C:/Users/Hp/Desktop/nilmtk-contrib/ukdale.h5',\n 'buildings': {\n 1: {\n 'start_time': '2017-01-05',\n 'end_time': '2017-03-05'\n }, \n }\n }, \n }\n }, \n 'test': {\n 'datasets': {\n 'DRED': {\n 'path': 'C:/Users/Hp/Desktop/nilmtk-contrib/dred.h5',\n 'buildings': {\n 1: {\n 'start_time': '2015-09-21',\n 'end_time': '2015-10-01'\n }\n }\n },\n 'REDD': {\n 'path': 'C:/Users/Hp/Desktop/nilmtk-contrib/redd.h5',\n 'buildings': {\n 1: {\n 'start_time': '2011-04-17',\n 'end_time': '2011-04-27'\n }\n }\n }\n },\n 'metrics':['mae', 'rmse']\n }\n}\n\napi_results_experiment_4 = API(experiment4)\n\nerrors_keys = api_results_experiment_4.errors_keys\nerrors = api_results_experiment_4.errors\nfor i in range(len(errors)):\n print (errors_keys[i])\n print (errors[i])\n print (\"\\n\\n\")",
"Just like the above experiments, any user can set up other experiments very quickly."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
authman/DAT210x
|
Module4/Module4 - Lab1.ipynb
|
mit
|
[
"DAT210x - Programming with Python for DS\nModule4- Lab1",
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom plyfile import PlyData, PlyElement\n\n# Look pretty...\n\n# matplotlib.style.use('ggplot')\nplt.style.use('ggplot')",
"Every 100 samples in the dataset, we save 1. If things run too slow, try increasing this number. If things run too fast, try decreasing it... =)",
"reduce_factor = 100",
"Load up the scanned armadillo:",
"plyfile = PlyData.read('Datasets/stanford_armadillo.ply')\n\narmadillo = pd.DataFrame({\n 'x':plyfile['vertex']['z'][::reduce_factor],\n 'y':plyfile['vertex']['x'][::reduce_factor],\n 'z':plyfile['vertex']['y'][::reduce_factor]\n})",
"PCA\nIn the method below, write code to import the libraries required for PCA.\nThen, train a PCA model on the passed in armadillo dataframe parameter. Lastly, project the armadillo down to the two principal components, by dropping one dimension.\nNOTE-1: Be sure to RETURN your projected armadillo rather than None! This projection will be stored in a NumPy NDArray rather than a Pandas dataframe. This is something Pandas does for you automatically =).\nNOTE-2: Regarding the svd_solver parameter, simply pass that into your PCA model constructor as-is, e.g. svd_solver=svd_solver.\nFor additional details, please read through Decomposition - PCA.",
"def do_PCA(armadillo, svd_solver):\n # .. your code here ..\n \n return None",
"Preview the Data",
"# Render the Original Armadillo\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\nax.set_title('Armadillo 3D')\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Z')\nax.scatter(armadillo.x, armadillo.y, armadillo.z, c='green', marker='.', alpha=0.75)",
"Time Execution Speeds\nLet's see how long it takes PCA to execute:",
"%timeit pca = do_PCA(armadillo, 'full')",
"Render the newly transformed PCA armadillo!",
"fig = plt.figure()\nax = fig.add_subplot(111)\nax.set_title('Full PCA')\nax.scatter(pca[:,0], pca[:,1], c='blue', marker='.', alpha=0.75)\nplt.show()",
"Let's also take a look at the speed of the randomized solver on the same dataset. It might be faster, it might be slower, or it might take exactly the same amount of time to execute:",
"%timeit rpca = do_PCA(armadillo, 'randomized')",
"Let's see what the results look like:",
"fig = plt.figure()\nax = fig.add_subplot(111)\nax.set_title('Randomized PCA')\nax.scatter(rpca[:,0], rpca[:,1], c='red', marker='.', alpha=0.75)\nplt.show()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
tpin3694/tpin3694.github.io
|
sql/multiple_conditional_statements.ipynb
|
mit
|
[
"Title: Multiple Conditional Statements \nSlug: multiple_conditional_statements\nSummary: Multiple Conditional Statements in SQL. \nDate: 2017-01-16 12:00\nCategory: SQL\nTags: Basics\nAuthors: Chris Albon \nNote: This tutorial was written using Catherine Devlin's SQL in Jupyter Notebooks library. If you have not using a Jupyter Notebook, you can ignore the two lines of code below and any line containing %%sql. Furthermore, this tutorial uses SQLite's flavor of SQL, your version might have some differences in syntax.\nFor more, check out Learning SQL by Alan Beaulieu.",
"# Ignore\n%load_ext sql\n%sql sqlite://\n%config SqlMagic.feedback = False",
"Create Data",
"%%sql\n\n-- Create a table of criminals\nCREATE TABLE criminals (pid, name, age, sex, city, minor);\nINSERT INTO criminals VALUES (412, 'James Smith', 15, 'M', 'Santa Rosa', 1);\nINSERT INTO criminals VALUES (901, 'Gordon Ado', 32, 'F', 'San Francisco', 0);\nINSERT INTO criminals VALUES (512, 'Bill Byson', 21, 'M', 'Petaluma', 0);",
"View All Rows",
"%%sql\n\n-- Select all\nSELECT *\n\n-- From the criminals table\nFROM criminals",
"View Rows Where Age Is Greater Than 20 And City Is San Francisco",
"%%sql\n\n-- Select all unique\nSELECT distinct *\n\n-- From the criminals table\nFROM criminals\n\n-- Where age is greater than 20 and city is San Francisco\nWHERE age > 20 AND city == 'San Francisco'",
"View Rows Where Age Is Greater Than 20 or City Is San Francisco",
"%%sql\n\n-- Select all unique\nSELECT distinct *\n\n-- From the criminals table\nFROM criminals\n\n-- Where age is greater than 20 and city is San Francisco\nWHERE age > 20 OR city == 'San Francisco'"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
stsouko/CGRtools
|
doc/tutorial/3_standardization.ipynb
|
lgpl-3.0
|
[
"3. Structure standardization\n\n(c) 2019, 2020 Dr. Ramil Nugmanov;\n(c) 2019 Dr. Timur Madzhidov; Ravil Mukhametgaleev\n\nInstallation instructions of CGRtools package information and tutorial's files see on https://github.com/stsouko/CGRtools\nNOTE: Tutorial should be performed sequentially from the start. Random cell running will lead to unexpected results.",
"import pkg_resources\nif pkg_resources.get_distribution('CGRtools').version.split('.')[:2] != ['4', '0']:\n print('WARNING. Tutorial was tested on 4.0 version of CGRtools')\nelse:\n print('Welcome!')\n\n# load data for tutorial\nfrom pickle import load\nfrom traceback import format_exc\n\nwith open('molecules.dat', 'rb') as f:\n molecules = load(f) # list of MoleculeContainer objects\nwith open('reactions.dat', 'rb') as f:\n reactions = load(f) # list of ReactionContainer objects\n\nm1, m2, m3, m4 = molecules # molecule\nr2 = reactions[2] # reaction",
"3.1. Molecules\nMoleculeContainer has standardize, kekule, thiele, neutralize, implicify_hydrogens and canonicalize methods.\nMethod thiele transforms Kekule representation of rings into aromatized.\nMethod standardize applies functional group standardization rules to molecules (more than 50 rules).\nMethod canonicalize apply set of methods: neutralize, standardize, kekule, implicify_hydrogens, thiele",
"m3 # molecule with kekulized ring\n\nm3.standardize() # apply standardization. Returns True if any group found\n\nm3 # group-standardized structure.\n\nm3.thiele() # aromatizes and returns True then any ring found\n\nm3",
"Molecules has explicify_hydrogens and implicify_hydrogens methods to handle hydrogens.\nThis methods is used to add or remove hydrogens in molecule.\nNote implicify_hydrogens working for aromatic rings only in kekule form. explicify_hydrogens for aromatized forms required kekule and optionally thiele procedures applyed before.",
"m3.explicify_hydrogens() # return number of added hydrogens\n\nm3.clean2d(randomize=True) # for added hydrogen atoms coordinates are not calculated. Thus, it looks like hydrogen has the same position on image\nm3\n\nm3.kekule()\nm3.implicify_hydrogens()\n\nm3",
"3.2. Reactions standardization\nReactionContainer has same methods as molecules. In this case they are applied to all molecules in reaction.",
"r2\n\nr2.standardize()\nr2.explicify_hydrogens()\nr2.clean2d(randomize=True)\nr2"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
GoogleCloudPlatform/mlops-on-gcp
|
examples/mlops-env-on-gcp/provisioning-kfp/verify-infra.ipynb
|
apache-2.0
|
[
"Verifying the MLOps environment on GCP\nListing the installed packages",
"!pip install -U -q pip\n\n!pip list | grep 'tfx\\|kfp\\|beam\\|tensorflow'",
"Connecting to KFP and listing experiments",
"%%bash\n\nPREFIX=<YOUR-PREFIX>\nZONE=<YOUR-ZONE>\nNAMESPACE=<YOUR-NAMESPACE>\n\ngcloud container clusters get-credentials $PREFIX-cluster --zone $ZONE\n\necho \"https://\"$(kubectl describe configmap inverse-proxy-config -n $NAMESPACE | \\\ngrep \"googleusercontent.com\")",
"Use the URL produced by the previous cell as the HOST_URL",
"import kfp\n \nHOST_URL = ''\nNAMESPACE = 'kfp' # Change to your namespace\n \nclient = kfp.Client(host=HOST_URL, namespace=NAMESPACE)\n[pipeline.name for pipeline in client.list_pipelines().pipelines]",
"Connectiong to Cloud SQL ML Metadata and list tables\nThis page describes how to connect a mysql client to your Cloud SQL instance using the Cloud SQL Proxy.",
"!pip install -U -q mysql-connector\n\nimport mysql.connector\n\nmetadb = mysql.connector.connect(\n host='127.0.0.1',\n port=3306,\n database='metadb',\n user=\"root\",\n passwd=\"\" # set root password \n)\n\ncursor = metadb.cursor(buffered=True)\n\ncursor.execute(\"SHOW TABLES FROM metadb;\")\ncursor.fetchall()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
essicolo/ecologie-mathematique
|
02_Python/2.ipynb
|
mit
|
[
"Chapitre 2 : Python\nLe python est une famille de reptile avec pas de pattes comprenant 10 espèces. Mais Python est un langage de programmation lancé en 1991 par Guido van Rossum, un fan du groupe d'humoriste britanique Mounty Python. \n\nSource: Ministry of silly walks, Mounty Python\nPython figure parmi les langages de programmation les plus utilisés au monde. Il s'agit d'un langage dynamique, c'est à dire que le code peut être exécuté ligne par ligne ou bloc par bloc: un avantage majeur pour des activités qui nécessitent des intéractions fréquentes. Bien que Python soit surtout utilisé pour créer des applications, il s'impose de plus en plus comme outil privilégié pour le calcul scientifique en raison des récents développements de bibliothèques d'analyse, de modélisation et de visualisation, dont plusieurs seront utilisés dans ce manuel.\nPréparer son ordinateur\nInstaller Python\nInstaller et gérer Python sur un ordinateur serait une tâche plutôt ardue si ce n'était de la distribution Anaconda, spécialisée pour le calcul scientifique. Anaconda est distribué librement sur Linux, Windows et OS X (il existe d'autres distribution Python équivalentes, notamment Enthought et Python(x,y)). Il est possible de travailler sur Python en mode infonuagique, notamment avec SageMath. Toutefois, l'option nuagique n'est en ce moment pas à la hauteur d'un environnement local en terme d'efficacité, de polyvalence et d'autonomie. \nSur Windows et sur OS X, téléchargez et installez! Sur Linux, téléchargez, ouvrez un terminal, naviguez dans le répertoire de téléchargement (par exemple cd ~/Téléchargements), puis lancez la commande spécifiée sur la page de téléchargement, par exemple bash Anaconda3-4.3.1-Linux-x86_64.sh.\nNote. Les modules présentés dans ce cours devraient être disponibles sur Linux, Windows et Mac. Ce n'est pas le cas pour tous les modules Python. La plupart fonctionnent néanmoins sur Linux. Que ce soit sur Ubuntu, l'une de ses nombreuses dérivées (Elementary, Linux Mint, KDE Neon, etc.), sous Debian, openSUSE, Arch, Fedora ou autre, les systèmes d'opération Linux sont de bonnes options pour le calcul scientifique.\nPremiers pas avec Python\nVoyons si Python est aussi libre qu'on le prétend. Si Python est bien installé sur votre ordinateur, la manière la plus directe de le lancer est en ouvrant un terminal (chercher cmd dans le menu si vous êtes sur Windows). Dans ce terminal, écrivez python, puis tapez enter. Vous devriez obtenir quelque chose comme ceci.\n\nLes symboles >>> forment ce que l'on nomme l'invite de commande. J'entre ici les commandes dans le carnet, mais pour l'instant, entrez-les dans le terminal (on débutera avec les carnets un peu plus loin).\nOpérations de base\n\n\"La liberté, c’est la liberté de dire que deux et deux font quatre. Si cela est accordé, tout le reste suit.\" - George Orwell, 1984",
"2+2\n\n67.1-43.3\n\n2*4\n\n2**4\n\n1/2\n\n1 / 2 # les espaces ne signifie rien ici",
"Tout va bien pour l'instant. Remarquez que la dernière opération comporte des espaces entre les nombres et l'opérateur /. Dans ce cas (ce n'est pas toujours le cas), les espaces ne signifient rien - il est même suggéré de les placer pour éclaircir le code, ce qui est utile lorsque les équations sont complexes. Puis, après l'opération 1 / 2, j'ai placé le symbole # suivi d'une note. Le symbole # est interprété par Python comme un ordre de ne pas considérer ce qui le suit. Cela est très utile pour insérer à même le code des commentaires pertinents pour mieux comprendre les opérations.\nAssigner des objets à des variables est fondamental en programmation. Par exemple.",
"a = 3",
"Techniquement, a pointe vers le nombre entier 3. Conséquemment, on peut effectuer des opérations sur a.",
"a * 6\n\nA + 2",
"Le message d'erreur nous dit que A n'est pas défini. Sa version minuscule, a, l'est pourtant. La raison est que Python considère la case dans la définition des objets. Utiliser la mauvaise case mène donc à des erreurs.\nLe nom d'une variable doit toujours commencer par une lettre, et ne doit pas contenir de caractères réservés (espaces, +, *, .). Par convention, les objets qui commencent par une lettre majuscules sont utilisés pour définir des classes (modules), utiles pour le développement de logiciels, mais rarement utilisés dans le cadre d'un feuille de calcul scientifique.",
"rendement_arbre = 50 # pomme/arbre\nnombre_arbre = 300 # arbre\nnombre_pomme = rendement_arbre * nombre_arbre\nnombre_pomme",
"Types de données\nJusqu'à maintenant, nous n'avons utilisé que des nombres entiers (integer ou int) et des nombres réels (float ou float64). Python inclue d'autres types. La chaîne de caractère (string) est un ou plusieurs symboles. Elle est définie entre des double-guillemets \" \" ou des apostrophes ' '. Il n'existe pas de standard sur l'utilisation de l'un ou de l'autre, mais en règle générale, on utilise les apostrophe pour les experssions courtes, contenant un simple mot ou séquence de lettres, et les guillements pour les phrases. Une raison pour cela: les guillemets sont utiles pour insérer des apostrophes dans une chaîne de caractère.",
"a = \"L'ours\"\nb = \"polaire\"\na + \" \" + b + \" ressemble à un faux zèbre.\"",
"Notez que l'objet a a été défini précédemment. Il est possible en Python de réassigner une variable, mais cela peut porter à confusion, jusqu'à générer des erreurs de calcul si une variable n'est pas assigné à l'objet auquel on voulait référer.\nL'opérateur + sur des caractères retourne une concaténation.\nCombien de caractères contient la chaîne \"L'ours polaire\"? Python sait compter. Demandons-lui.",
"c = a + \" \" + b\nlen(c)",
"Quatorze, c'est bien cela (comptez \"L'ours polaire\", en incluant l'espace). len, pour lenght (longueur), est une fonction (aussi appelée méthode) incluse par défaut dans l'environnement de travail de Python. La fonction est appelée en écrivant len(). Mais une fonction de quoi? Des arguments qui se trouvent entre les parenthèses. Dans ce cas, il y a un seul argument: c.\nEn calcul scientifique, il est courrant de lancer des requêtes sur si une résultat est vrai ou faux.",
"a = 17\nprint(a < 10)\nprint(a > 10)\nprint(a == 10)\nprint(a != 10)\nprint(a == 17)\nprint(~a == 17)",
"Je viens d'introduire un nouveau type de donnée: les données booléennes (boolean, ou bool), qui ne peuvent prendre que deux états - True ou False. En même temps, j'ai utilisé la fonction print parce que dans mon carnet, seule la dernière opération permet d'afficher le résultat. Si l'on veut forcer une sortie, on utilise print. Puis, on a vu plus haut que le symbole = est réservé pour assigner des objets: pour les tests d'égalité, on utilise le double égal, ==, ou != pour la non égalité. Enfin, pour inverser une donnée de type booléenne, on utilise le symbole ~.\nPour les tests sur les chaînes de caractères, on utilisera in et son inverse not in.",
"print('o' in 'Ours')\nprint('O' in 'Ours')\nprint('O' not in 'Ours')",
"Les collections de données\nLes exercices précédents ont permis de présenter les types de données offerts par défault sur Python qui sont les plus importants pour le calcul scientifique: int (integer, ou nombre entier), float64 (nombre réel), str (string, ou chaîne de caractère) et bool (booléen). D'autres s'ajouterons tout au long du cours, comme les unités de temps (date-heure), les catégories et les géométries (points, linges, polygones) géoréférencées. Lorsque l'on procède à des opérations de calcul en science, nous utilisons rarement des valeurs uniques. Nous préférons les oragniser et les traiter en collections. Par défaut, Python offre trois types importants: les listes, les tuples et les dictionnaires.\nD'abord, les listes, ou list, sont une série de variables sans restriction sur leur type. Elles peuvent même contenir d'autres listes. Une liste est délimitée par des crochets [ ], et les éléments de la liste sont séparés par des virgules.",
"espece = ['Petromyzon marinus', 'Lepisosteus osseus', 'Amia calva', 'Hiodon tergisus']\nespece",
"Pour accéder aux éléments d'une liste, appelle la liste suivie de la position de l'objet désiré entre crochets. Fait important qui reviendra tout au long du cours: en Python, l'indice du premier élément est zéro.",
"print(espece[0])\nprint(espece[2])\nprint(espece[:2])\nprint(espece[2:])",
"Pour les deux dernières commandes, la position :2 signifie jusqu'à 2 non inclusivement et 2: signifie de 2 à la fin.\nPour ajouter un élément à notre liste, on peut utiliser la fonction append.",
"espece.append(\"Cyprinus carpio\")\nespece",
"Notez que la fonction append est appelée après la variable et précédée un point. Cette manière de procéder est courrante en programmation orientée objet. La fonction append est un attribut d'un objet list et prend un seul argument: l'objet qui est ajouté à la liste. C'est une manière de dire grenouille.saute(longueur=1.0).\nEn lançant espece[2] = \"Lepomis gibbosus\", on note qu'il est possible de changer une élément de la liste.",
"print(espece)\nespece[2] = \"Lepomis gibbosus\"\nprint(espece)",
"Si les données contenues dans une liste sont de même type, cette liste peut être considérée comme un vecteur. En créant une liste de vecteurs de dimensions cohérentes, on crée une matrice. Nous verrons plus tard que pour les vecteurs et les matrices, on utilisera un format offert par un module complémentaire. Pour l'instant, on pourrait définir une matrice comme suit.",
"mat = [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n [10, 11, 12]]\nmat",
"Les tuples, définis tuple par Python, différent des listes du fait que ses éléments ne peuvent pas être modifiés. Un tuple est délimité par des parenthèses ( ), et comme chez la liste, ses éléments sont séparés par des virgules. Les tuples sont moins polyvalents que les listes. Vous les utiliserez probablement rarement, et surtout comme arguments dans certaines fonctions en calcul scientifique.",
"espece = ('Petromyzon marinus', 'Lepisosteus osseus', 'Amia calva', 'Hiodon tergisus')\nespece[2] = \"Lepomis gibbosus\"\nespece",
"Les dictionnaires, ou dict, sont des listes dont chaque élément est identifié par une clé. Un dictionnaire est délimité par des accolades sous forme mon_dict = {'clé1': x, 'clé2': y, 'clé3': z }. On appelle un élément par sa clé entre des crochets, par exemple mon_dict['clé1'].\nLe dict se rapproche d'un tableau: nous verrons plus tard que le format de tableau (offert dans un module complémentaire) est bâti à partir du format dict. Contrairement à un tableau où les colonnes contiennent toutes le même nombre de lignes, chaque élément du dictionnaire est indépendant des autres.",
"tableau = {'espece': ['Petromyzon marinus', 'Lepisosteus osseus', 'Amia calva', 'Hiodon tergisus'], 'poids': [10, 13, 21, 4], 'longueur': [35, 44, 50, 8]}\nprint('Mon tableau: ', tableau)\nprint('Mes espèces:', tableau['espece'])\nprint('Noms des clés (ou colonnes):', tableau.keys())",
"Les fonctions (ou méthodes)\nPlus haut, j'ai présenté les fonctions len et append. Une myriade de fonctions sont livrées par défaut avec Python. Mais il en manque aussi cruellement.",
"sqrt(2)",
"Message d'erreur: la commande sqrt n'est pas définie. \n\nQuoi, Python n'est pas foutu de calculer une racine carrée?\n\nPar défaut, non.\n\nMais!\nDe nombreuses extensions (les modules) permettent de combler ces manques. Nous aborderons ça un peu plus loin dans ce chapitre. Pour l'instant, exerçons-nous à créer notre propre fonction de racine carrée.",
"def racine(x, n=2):\n r = x**(1/n)\n return r",
"En Python, def est le mot-clé pour définir une fonction. Suit ensuite, après un espace, le nom que vous désirez donner à la fonction: racine. Les arguments de la fonction suivent entre les parenthèses. Dans ce cas, x est la valeur de laquelle on veut extraire la racine et n est l'ordre de la racine. L'agument x n'a pas de valeur par défaut: elle doit être spécifiée pour que la fonction fonctionne. La mention n=2 signifie que si la valeur de n n'est pas spécifiée, elle prendra la valeur de 2 (la racine carrée). Pour marquer la fin de la définition et le début de la suite d'instructions, on utilise les deux points :, puis un retour de ligne. Une indentation (ou retrait) de quatre barres d'espacement signifie que l'on se trouve à l'intérieur de la suite d'instructions, où l'on calcule une valeur de r comme l'exposant de l'inverse de l'ordre de la racine. La dernière ligne indique ce que la fonction doit retourner.",
"print(racine(9))\nprint(racine(x=9))\nprint(racine(8, 3))\nprint(racine(x=8, n=3))",
"S'ils ne sont pas spécifiés, Python comprend que les arguments sont entrés dans l'ordre défini dans la fonction. En entrant racine(9), Python comprend que le 9 est attribué à x et donne à n sa valeur par défaut, 2. Ce qui est équivalent à entrer racine(x=9). Les autres entrées sont aussi équivalentes, et extraient la racine cubique. S'il se peut qu'il y ait confusion entre les arguments nommés et ceux qui ne le sont pas, Python vous retournera un message d'erreur. Règle générale, il est préférable pour la lisibilité du code de nommer les arguments plutôt que de les spécifier dans l'ordre.\nSupposons maintenant que vous avez une liste de données dont vous voulez extraire la racine.",
"data = [3.5, 8.1, 10.2, 0.5, 5.6]\nracine(x=data, n=2)",
"Oups. Python vous dit qu'il y a une erreur, et vous indique avec une flèche ----> à quelle ligne de notre fonction elle est encourrue. Les exposants ** (on peut aussi utiliser la fonction pow) ne sont pas applicables aux listes. Une solution est d'appliquer la fonction à chaque élément de la liste avec une ittération. On verra plus tard des manières plus efficaces de procéder. Je me sers de ce cas d'étude pour introduire les boucles ittératives.\nLes boucles\nLes boucles permettent d'effectuer une même suite d'opérations sur plusieurs objets. Pour faire suite à notre exemple:",
"racine_data = []\nfor i in [0, 1, 2, 3, 4]:\n r = racine(x=data[i], n=2)\n racine_data.append(r)\n\nracine_data",
"Nous avons d'abord créé une liste vide, racine_data. Ensuite, pour (for) chaque indice de la liste (i in [0, 1, 2, 3, 4]), nous demandons à Python d'effectuer la suite d'opération qui suit le ; et qui est indentée de quatre espaces. Dans la suite d'opération, calculer la racine carrée de data à l'indice i, puis l'ajouter à la liste racine_data. Au lieu d'entrer une liste [0, 1, 2, 3, 4], on aurait pu utiliser la fonction range et lui assigner automatiquement la longueur de la liste.",
"racine_data = []\nfor i in range(len(data)):\n r = racine(x=data[i], n=2)\n print('Racine carrée de ', data[i], ' = ', r)\n racine_data.append(r)",
"La fonction range retourne une séquence calculée au besoin. Elle est calculée si elle est évoquée dans une boucle ou en lançant list.",
"print(range(len(data)))\nprint(list(range(len(data))))\nprint(range(2, len(data)))\nprint(list(range(2, len(data))))",
"Première observation, si un seul argument est inclus, range retourne une séquence partant de zéro. Seconde observation, la séquence se termine en excluant l'argument. Ainsi, range(2,5) retourne la séquence [2, 3, 4]. En spécifiant la longueur de data comme argument, la séquence range(5) retourne la liste [0, 1, 2, 3, 4], soit les indices dont nous avons besoin pour itérer dans la liste.\nLes boucles for vous permettront par exemple de générer en peu de temps 10, 100, 1000 graphiques (autant que vous voulez), chacun issu de simulations obtenues à partir de conidtions initiales différentes, et de les enregistrer dans un répertoire sur votre ordinateur. Un travail qui pourrait prendre des semaines sur Excel peut être effectué en Python en quelques secondes.\nUn second outil est disponible pour les itérations: les boucles while. Elles effectue une opération tant qu'un critère n'est pas atteint. Elles sont utiles pour les opérations où l'on cherche une convergence. Je les couvre rapidement puisque'elles sont rarement utilisées dans les flux de travail courrants. En voici un petit exemple.",
"x = 100\nwhile (x > 1.1):\n x=racine(x)\n print(x)",
"Nous avons inité x à une valeur de 100. Puis, tant que (while) le test x > 1.1 est vrai, attribuer à x la nouvelle valeur calculée en extrayant la racine de la valeur précédente de x. Enfin, indiquer la valeur avec print.\nExplorons maintenant comment Python réagit si on lui demande de calculer $\\sqrt{-1}$.",
"racine(x=-1, n=2)",
"D'abord, Python ne retourne pas de message d'erreur, mais un nouveau type de donnée: le nombre imaginaire. Puis, 6.123233995736766e-17 n'est pas zéro, mais très proche. La résolution des calculs étant numérique, on obeserve parfois de légères déviations par rapport aux solutions mathématiques.\nSi pour un cas particulier, on veut éviter que notre fonction retourne un nombre imaginaire, comment s'y prendre? Avec une condition.\nConditions: if, elif, else\n\nSi la condition 1 est remplie, effectuer une suite d'instruction 1. Si la condition 1 n'est pas remplie, et si la condition 2 est remplie, effectuer la suite d'instruction 2. Sinon, effectuer la suite d'instruction 3.\n\nVoilà comment on exprime une suite de conditions. Pour notre racine d'un nombre négatif, on pourrait procéder comme suit.",
"def racine_positive_nn(x, n=2):\n if x<0:\n raise ValueError(\"x est négatif\")\n elif x==0:\n raise ValueError(\"x est nul\")\n else:\n r = x**(1/n)\n return r",
"La racine positive et non-nulle (racine_positive_nn) comprend les mot-clés if (si), elif (une contration de else if) et else (sinon). ValueError est une fonction pour retourner un message d'erreur lorsqu'elle est précédée de raise. Comme c'est le cas pour def et for, les instructions des conditions sont indentées. Notez la double indentation (8 espaces) pour les instructions des conditions. Alors que la plupart des langages de programmation demandent d'emboîter les instructions dans des parenthèses, accolades et crochets, Python préfère nous forcer à bien indenter le code (ce que l'on devrait faire de toute manière pour améliorer la lisibilité) et s'y fier pour effectuer ses opérations.",
"racine_positive_nn(x=-1, n=2)\n\nracine_positive_nn(x=0, n=2)\n\nracine_positive_nn(x=4, n=2)",
"Charger un module\nLe module numpy, installé par défaut avec Anaconda, est une boîte d'outil de calcul numérique populée par de nombreuses foncions mathématiques. Dont la racine carrée.",
"import numpy as np\nnp.sqrt(9)\n\nfrom numpy import sqrt\nsqrt(9)",
"La plupart des fonctions que vous aurez à construire seront vouées à des instructions spécialisées à votre cas d'étude. Pour la plupart des opérations d'ordre générale (comme les racines carrées, les tests statistiques, la gestion de matrices et de tableau, les graphiques, les modèles d'apprentissage, etc.), des équipes ont déjà développé des fonctions nécessaires à leur utilisation, et les ont laissées disponibles au grand public. L'introduction à Python se termine là-dessus.\nComme une langue, on n'apprend à s'exprimer en un langage informatique qu'en se mettant à l'épreuve, ce que vous ferez tout au long de ce cours.\nPrenons une pause de Python et passons à des technicalités.\nL'environnement de travail\nLe gestionnaire conda\nUne fois que Anaconda est installé, l'installation de modules et des environnements virtuel devient possible avec le gestionnaire conda. Cette section ne présente qu'un aperçu de ses capacités, basé sur ce dont vous aurez besoin pour ce cours. Pour plus d'information, consultez le guide.\nInstaller des modules\nSans module, Python ne sera pas un environnement de calcul scientifique appréciable. Heureusement, il existe des modules pour faciliter la vie des scientifiques qui désirent calculer des opérations simples comme des moyennes et des angles, ou des opérations plus compliquées comme des intégrales et des algorithmes d'intelligence artificielle. Plusieurs modules sont installés par défaut avec Anaconda. Pour lister l'ensemble des modules installés dans un environnement, ouvrez un terminal (si vous vous trouvez dans une session Python, vous devez quitter par la commande quit()) et lancez:\nconda list\nLes modules sont téléchargés et installés depuis des dépôts en ligne. L'entreprise Continuum Analytics, qui développe et supporte Anaconda, offre ses propres dépôts. Par défaut, le module statsmodels (que nous utiliserons pour certaines opérations) sera téléchargé depuis les dépôts par défaut si vous lancez:\nconda install statsmodels\nIl est préférable d'utiliser le dépôt communautaire conda-forge plutôt que les dépôts officiels de Continuum Analytics. Sur conda-forge, davantage de modules sont disponibles, ceux-ci sont davantage à jour et leur qualité est contrôlée.\nconda config --add channels conda-forge\nPar la suite, tous les modules seront téléchargés depuis conda-forge. Pour effectuer une mise à jour de tous les modules, lancez:\nconda update --all\nInstaller des environnements virtuels\nVous voilà en train de travailler sur des données complexes qui demandent plusieurs opérations. Vous avez l'habitude, à toutes les semaines, de lancer conda update --all pour mettre à jour les modules, ce qui corrige les bogues et ajoute des fonctionnalités. L'équipe de développement d'un module a décidé de modifier, pour le mieux, une fonction. Vous n'êtes pas au courant de ce changement et vous passez deux jours à cherche ce qui cause ce message d'erreur dans vos calculs. Vous envoyez votre fichier de calcul à votre collègue qui n'a pas mis à jour ce module, puis vos corrections lui causent des problèmes. Croyez-moi, ça arrive souvent.\nLes environnements virtuels sont là pour éviter cela. Il s'agit d'un répertoire dans lequel Python ainsi que ses modules sont isolés. Pour un projet spécifique, vous pouvez créer un environnement virtuel sous Python 2.7.9 et installer des versions de modules spécifiques sans les mettre à jour. Ça permet d'une part de travailler avec des outils qui ne changent pas en cours de projet, et d'autre part à travailler entre collègues avec les mêmes versions.\nPour créer un environnement nommé fertilisation_laitue incluant Python en version 2.7.9 et le module statsmodels version 0.6.0, lancez:\nconda create -n fertilisation_laitue python=2.7.9\nLe répertoire de projet sera automatiquement installé dans le répertoire envs de votre installation de Anaconda.\nPour activer cet environnement, sous Linux et en OS X:\nsource activate fertilisation_laitue\nSous Windows:\nactivate fertilisation_laitue\nDepuis l'environnement virtuel, vous pouvez installer les modules dont vous avez besoin, en spécifiant la version. Par exemple,\nconda install statsmodels=0.6.0\nDepuis votre environnement virtuel (y compris l'environnement root), vous pouvez aussi lancer Jupyter, une interface qui vous permettra d'intéragir de manière conviviale avec Python.\nÀ titre d'exemple, préparons-nous au cours en créant un environnement virtuel qui incluera la version de Python 3.5. Précédemment, nous avions fait cela en deux étapes: (1) créer l'environnement, puis (2) installer les bibliothèques. Nous pouvons tout aussi bien le faire d'un coup. Je nomme arbitrairement l'environnement ecolopy.\nconda create -n ecolopy numpy scipy pandas matplotlib jupyterlab\nActivons l'environnement (Linux et OS X: source activate ecolopy, Windows: activate ecolopy), puis installons les bibliothèques nécessaires. Puisque j'utilise Linux,\nsource activate ecolopy\nPour partager un environnement de travail avec des collègues ou avec la postérité, vous pouvez générer une liste de prérequis via conda list -e > req.txt, à partir de laquelle quiconque utlise Anaconda pourra créer un environnement virtuel identique au vôtre via conda create -n ecolopy environment --file req.txt.\nPour tester l'environnement, lancez python!\npython\nPour ce cours, vous êtes libres de générer un environnement de travail ou de travailler dans l'environnement par défaut (nommé root).\nLa première ligne importe le module NumPy (numpy) et en crée une instance dont on choisi optionnellement le nom: np (utilisé conventionnellement pour numpy). Ce faisant, on appelle numpy et on le lie avec np. Ainsi, on peut aller chercher l'instance sqrt de np avec np.sqrt(). Si l'on ne cherche qu'à importer la fonction sqrt et que l'on ne comte pas utiliser le tout NumPy:\nDe nombreux modules seront utilisés lors de ce cours. La section suivante vise à les présenter brièvement.\nModules de base\nNumPy\nNumPy, une contraction de Numerical Python, donne accès à de nombreuses fonctions mathématiques et intervient inmanquablement pour effectuer des opérations sur les matrices. La grande majorité des opérations effectuées lors de ce cours fera explicitement ou implicitement (via un autre module s'appuyant sur NumPy) référence à NumPy. NumPy permet notamment:\n\nde donner accès à des opérations mathématiques de base comme la racine carrée, la trigonométrie, les logarithmes, etc.;\nd'effectuer des opérations rapides sur des matrices multidimentionnelles (ndarray, ou n-dimensionnal array), dont des calculs d'algèbre linéaire;\nd'effectuer des calculs élément par élément, ligne par ligne, colonne par colonne, etc., grâce à la \"vectorisation\" - par exemple en additionnant un scalaire à un vecteur, le scalaire sera additionné à tous les éléments du vecteur;\nd'importer et exporter des fichiers de données;\nde générer des nombres aléatoires selon des lois de probabilité.\n\nSciPy\nBasée sur NumPy, SciPy est une collection de fonctions mathématiques offrant une panoplie d'outil pour le calcul scientifique. Il simplifie certaines fonctions de Numpy, et offre des gadgets qui se rendront essentiels pour des opérations courrantes, notamment:\n\ncalcul intégral et résolution de systèmes d'équations différentielles ordinaires\ninterpolation entre coordonnées\ntraitement et analyse de signal\n\nNote. Un bibliothèque portant le préfixe scikit fait partie de la trousse d'extensions de SciPy. \npandas\nLes données sont souvent organisées sous forme de tableau, les colonnes représentant les variables mesurées et les lignes représentant les observations. La bibliothèque pandas offre un kit d'outil pour travailler avec des tableaux de données (DataFrame) de manière efficace. Avec une rapidité d'exécution héritée de NumPy, pandas inclut l'approche des bases de données relationnelles (SQL) pour filtrer, découper, synthétiser, formater et fusionner des tableaux.\nmatplotlib\nLes graphiques sont des synthèses visuelles de données qui autrement seraient pénibles à interpréter. Malgré les récents développements en visualisation sur Python, matplotlib reste la bibliothèque de base pour la présentation de graphiques: nuages de points, lignes, boxplots, histogrammes, contours, etc. Il y en a d'autres comme altair, seaborn et bokeh, qui vous seront présentées au moment opportun.\nModules spécialisés: <<<À AJUSTER À LA FIN>>>\nSymPy\nLe calcul symbolique a une place théorique importante en calcul scientifique. SymPy sera utilisée pour valider des fonctions issues d'équations différentielles.\nstatsmodels\nPlus que de la statistique, la bibliothèque statsmodels est conçue comme accompagnatrice dans l'analyse de données. Elle aidera à effectuer des statistiques comme des analyses de variance, des régressions et des analyses de survie, mais aussi des opérations de prétraitement comme l'imputation de données manquantes.\nscikit-learn\nL'apprentissage automatique (machine learning en anglais), permet de détecter des structures dans les données dans l'objectif de prédire une nouvelle occurance, que ce soit un ou plusieurs variables numériques (régression) ou catégorielles (classification). De nombreux algorhitmes sont appelés à être utilisés en sciences de la vie. scikit-learn est une trousse d'outil permettant d'appréhender ces outils complexes de manière efficace, conviviale et cohérente, en plus d'offir la possibilité d'empaqueter des machines d'apprentissage dans des logiciels. La documentation de scikit-learn est d'une rare qualité. scikit-learn peut aussi être utilisé pour effectuer des classifications non supervisées (classifier des données qui n'ont pas de catégorie prédéterminées), notamment l'analyse de partitionnement (clustering en anglais).\nscikit-bio\nscikit-bio sera utilisé principalement pour l'analyse compositionnelle et pour l'ordination. Ses possibilités ne s'arrêtent toutefois pas là. Techniquement, la bibliothèque scikit-bio a moins de lien avec scikit-learn qu'avec QIIME, un logiciel libre dédié à la bioinformatique, une discipline connexe au génie écologique, mais axée sur l'analyse génétique.\nbokeh\nUn graphique est une représentation visuelle de données. Bien que matplotlib un outil essentiel au calcul scientifique avec Python, de nombreuses autres bibliothèques ont été développées pour combler ses lacunes. L'une d'entre elle émerge du transfert de la publication traditionnelle (papier, puis pdf) vers la publication de documents interactifs. bokeh est une bibliothèque qui, parmi d'autres (notamment plotly et mpld3), offre la possibilité de créer des graphiques intéractifs. Bonus: bokeh est aussi une plateforme de développement de logiciels scientifiques.\nggplot\ngg, pour Grammar of Graphics. C'est avant tout un langage pour exprimer le passage de la donnée à sa représentation graphique. Le module ggplot2 est l'un des plus prisés du langage R. Un groupe de travail a heureusement planché sur une version Python, moins complète mais hautement utile pour tracer des graphiques de manière conviviale autant pour l'exploration de données que pour la publication.\nSfePy\nSimple Finite Elements with Python est un gros module conçu pour appréhender de la manière la plus simple possible la modélisation d'équations différentielles partielles par éléments finis. Cette méthode, largement utilisée en ingénierie, sera utile pour modéliser une panoplie de mécanismes déterministes: les transferts d'énergie, l'écoulement de l'eau, le transport des solutés et la dispersion des espèces.\nlifelines\nCombien de temps reste-t-il avant un événment? C'est la question que pose l'analyste de survie. lifelines est un module Python conçu exactement pour cela.\nInterfaces\nOn les appelle des interfaces graphiques ou des environnement intégrés de développement et son conçus pour faciliter l'utilisation d'un langage de programmation, souvent pour des applications particulières. Utiliser Python uniquement dans un terminal n'est pas très pratique pour garder la trace des calculs. Comme la plupart des interfaces conçus pour le calcul scientifique, Jupyter comporte trois composantes: un éditeur de commande, un moyen d'exécuter les commandes et un afficheur de graphiques.\nJupyter\nJupyter lab\nAnciennement nommé IPython notebook, puis Jupyter notebook, Jupyter lab s'inspire d'un format usuelle en science: le carnet de laboratoire.\n<!---->\n<img src=\"https://raw.githubusercontent.com/jupyterlab/jupyterlab/master/jupyter-plugins-demo.gif\" width=\"600\">\nJupyter lab fonctionne dans une fenêtre de navigateur internet. Le code est interprété par IPython, un interprétateur pour le calcul scientifique sur Python. Chaque cellule peut contenir un texte explicatif (édité en markdown, un outil de traitement de texte où le formattage est effectué dans texte à l'aide de caractères spéciaux), incluant des équations (écrites en format LaTeX via MathJax - il existe des éditeurs d'équations en ligne), ou des morceaux de code. Par ailleurs, ces notes de cours sont rédigées dans des carnets Jupyter.\nnteract\nAfin de libérer Jupyter du navigateur web, une équipe a développé le logiciel nteract, une version épurée de Jupyter en format d'application bureau. C'est l'interface que nous allons utiliser lors de ce cours. Téléchargez l'installateur et installez!\nAutres\nL'interface de Rodeo comprend des fenêtres pour l'édition de code, pour interprétateur IPython, pour la session de travail et pour afficher les graphiques. En fait, il imite l'interface de RStudio pour R. C'est une solution visuellement élégante et moderne, mais pas aussi complète que Spyder.\nSpyder est un acronyme pour \"Scientific PYthon Development EnviRonment\". Si vous avez installé Anaconda, Spyder est déjà installé sur votre ordiateur. Il est comparable à Rodeo, mais est plus ancien, plus complet, mais aussi plus complexe. \nIl existe aussi plusieurs autres environnements de développement en mode libre (Atom/Hydrogen, Eclipse/PyDev, Komodo IDE, Lighttable, Ninja IDE). Mais certaines préféreront seulement utiliser un bon éditeur texte (Atom, Brackets, LighttTable, Notepad++, etc.) accompagné d'un terminal sous IPython. \nAstuces pour utiliser Jupyter\nCe manuel est créé avec Jupyter. En double-cliquant dans ses cellules de texte, vous aurez accès au code markdown, un langage HTML simplifié permettant d'insérer des titres, des tableaux, des emphases en italique et en gras, des équations, des liens, des citations, des encadrés, des listes, etc.\nCellule markdown\nFormatter un texte en markdown est relativement facile. Vous utiliserez sans doute ces outils:\n```\nTitre 1\nTitre 2\nTitre 6\nFormater du code alligné. Ou bien un\n```\nparagraphe dédié au code\n```\nListe à puce\n- item 1\n- item 2\n - item 2.1\n - item 2.1.1\nListe numérotée\n0. item 1\n0. item 2\n 0. item 2.1\n 0. item 2.1.1\nTexte emphasé en italique ou en gras.\nHyperlien\nInsérer une image\n\nInsérer une équation en ligne: $\\alpha + \\beta$. Ou en un paragraphe:\n$$ c = \\sqrt{\\left( a^2 + b^2 \\right)} $$\n| Symbole | Format |\n| --- | ---|\n| Titre 1 | # Titre 1|\n| Titre 2 | ## Titre 2|\n| Titre 6 | ###### Titre 6|\n| code ligne | `code`|\n| code paragraphe | ```code```|\n| Items de liste à puce | - item avec indentation pour les sous-items |\n| Items de liste numérotée | 0. item avec indentation pour les sous-items |\n| Italique | Texte emphasé en *italique* |\n| Gras | Texte emphasé en **gras** |\n| Hyperlien | Créer un [lien](https://python.org) |\n| Image | Insérer une image  |\n| Équation | Insérer une équation en format LaTeX \\$c = \\sqrt \\left( a^2 + b^2 \\right)\\$ |\n```\nCellule de code\nIl était suggéré au cours de ce chapitre d'entrer les commandes dans un terminal. À partir d'ici, il sera préférable d'utiliser un notebook à partir d'ici, et d'excécuter les calculs dans des cellules de code.",
"a = 5/2\na"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
tombosc/dict_based_learning
|
visualize_toy_dataset.ipynb
|
mit
|
[
"Visualize toy dataset\nThe goal of the toy dataset is to make experiment with small vocabulary size.\nIt would be nice if we could generate data that looks like real text from afar.\nFor example we could try to make it zipfian (the log frequency of the tokens is linear with regards to the log rank of the tokens).\nMaybe we can also take a look at the variance in the positions of the words, i.e. we would like some words to be able to appear pretty much everywhere in the sentence whereas others should appear in the beginning.\nWe can do quick experiments without worrying too much about this but eventually but it would be nice to be able to trust this model for more complex experiments, hyper parameters search, ...\nOne possibility is to learn a model on real data and learn the distribution of the features and learned params.\nTODO:\n\nmaybe we can gain better control if we directly manipulate the norms of the vectors as they should correlate with the frequencies.",
"import numpy as np\n\nfrom dictlearn.generate_synthetic_data import FakeTextGenerator\n\nV = 100\nembedding_size = 50\nmarkov_order = 6\ntemperature=1.0\nsentence_size = 20\n\nmodel = FakeTextGenerator(V, embedding_size, markov_order, temperature)\nn_sentences=1000\nsentences = model.create_corpus(n_sentences, 5, 10, 0.7, 0.1, 0.5)",
"Alternative model",
"import numpy as np\n\nfrom dictlearn.generate_synthetic_data_alt import FakeTextGenerator\n\nembedding_size = 20\nmarkov_order = 3\ntemperature=1.0\nsentence_size = 20\n\nmodel = FakeTextGenerator(100, 400, embedding_size, markov_order, temperature)\nn_sentences=1000\nsentences = model.create_corpus(n_sentences, 5, 10, 0.7, 0.1)\n\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nplt.figure(figsize=(20, 20))\nplt.imshow(model.features.T, interpolation='none')\nplt.colorbar()\nplt.show()\n\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom collections import Counter\n\ndef summarize(sentences, V, label):\n \"\"\"\n sentences: list of list of characters\n V: vocabulary size\n \"\"\"\n sentence_size = len(sentences[0])\n # count tokens and their positions\n #positions = np.zeros((V,sentence_size))\n unigram_counts = Counter()\n for sentence in sentences:\n for i,tok in enumerate(sentence):\n unigram_counts[tok] += 1\n #positions[w, i] += 1\n ordered_count = [c for _, c in unigram_counts.most_common()]\n print ordered_count[:100]\n print ordered_count[500:600]\n\n print ordered_count[-100:]\n\n total_word_count = sum(ordered_count)\n # compute empirical frequency\n ordered_freq = [float(oc)/total_word_count for oc in ordered_count] \n print len(ordered_count), len(ordered_freq), V\n plt.plot(range(len(ordered_freq)), ordered_freq)\n plt.title(\"word frequency ordered by decreasing order of occurences (rank) on \" + label)\n plt.show()\n\n plt.plot(np.log(range(len(ordered_freq))), np.log(ordered_count))\n plt.title(\"log(word frequency) / log(rank) on \" + label)\n plt.show()",
"Study corpus",
"summarize(sentences, model.V, \"corpus\")\n",
"Not really zipfian so far. Maybe read that if we really care about that.\nStudy definitions",
"definitions = []\nfor defs in model.dictionary.values():\n definitions += defs\nsummarize(definitions, V, \"definitions\")\n"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
dinrker/PredictiveModeling
|
Session 4 - Features_I_Transformations_DimensionalityReduction.ipynb
|
mit
|
[
"Feature Engineering\n|Session | Session |\n|-----------|---------|\n|Feature Engineering I | Feature Transformation and Dimension Reduction (PCA)|\n|Feature Engineering II | Nonlinear Dimension Reduction (Autoencoder)|\n|Feature Engineering III | Random Projections |\nGoals of this Lesson\n\n\nFeature Transformations\n\nStandard Normal Transform\nDomain-Specific Transform\nMystery Transform\n\n\n\nDimensionality Reduction\n\nPCA: Model and Learning\nPCA for Images\nPCA for Visualization\n\n\n\nReferences\n\nChapter 14 of Elements of Statistical Learning by Hastie, Tibshirani, Friedman\nA Few Useful Things to Know about Machine Learning\nSciKit-Learn's documentation on data preprocessing\nSciKit-Learn's documentation on dimensionality reduction\n\n0. Preliminaries\nFirst we need to import Numpy, Pandas, MatPlotLib...",
"from IPython.display import Image\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport time\n%matplotlib inline",
"I've created a function that we'll use later to create visualizations. It is a bit messy and not essential to the material so don't worry about understanding it. I'll be happy to explain it to anyone interested during a break or after the session.",
"from matplotlib.colors import ListedColormap\n\n# Another messy looking function to make pretty plots of basketball courts\ndef visualize_court(log_reg_model, coord_type='cart', court_image = '../data/nba/nba_court.jpg'):\n two_class_cmap = ListedColormap(['#FFAAAA', '#AAFFAA']) # light red for miss, light green for make\n x_min, x_max = 0, 50 #width (feet) of NBA court\n y_min, y_max = 0, 47 #length (feet) of NBA half-court\n grid_step_size = 0.2\n grid_x, grid_y = np.meshgrid(np.arange(x_min, x_max, grid_step_size), np.arange(y_min, y_max, grid_step_size))\n features = np.c_[grid_x.ravel(), grid_y.ravel()]\n # change coordinate system\n if coord_type == 'polar':\n features = np.c_[grid_x.ravel(), grid_y.ravel()]\n hoop_location = np.array([25., 0.])\n features -= hoop_location\n dists = np.sqrt(np.sum(features**2, axis=1))\n angles = np.arctan2(features[:,1], features[:,0])\n features = np.hstack([dists[np.newaxis].T, angles[np.newaxis].T])\n \n grid_predictions = log_reg_model.predict(features)\n grid_predictions = grid_predictions.reshape(grid_x.shape)\n fig, ax = plt.subplots()\n court_image = plt.imread(court_image)\n ax.imshow(court_image, interpolation='bilinear', origin='lower',extent=[x_min,x_max,y_min,y_max])\n ax.imshow(grid_predictions, cmap=two_class_cmap, interpolation = 'nearest',\n alpha = 0.60, origin='lower',extent=[x_min,x_max,y_min,y_max])\n plt.xlim(x_min, x_max)\n plt.ylim(y_min, y_max)\n plt.title( \"Make / Miss Prediction Boundaries\" )\n plt.show()",
"We also need functions for shuffling the data and calculating classification errrors.",
"### function for shuffling the data and labels\ndef shuffle_in_unison(features, labels):\n rng_state = np.random.get_state()\n np.random.shuffle(features)\n np.random.set_state(rng_state)\n np.random.shuffle(labels)\n \n### calculate classification errors\n# return a percentage: (number misclassified)/(total number of datapoints)\ndef calc_classification_error(predictions, class_labels):\n n = predictions.size\n num_of_errors = 0.\n for idx in xrange(n):\n if (predictions[idx] >= 0.5 and class_labels[idx]==0) or (predictions[idx] < 0.5 and class_labels[idx]==1):\n num_of_errors += 1\n return num_of_errors/n",
"1. Warm-up\nLet's start with a warm-up exercise. In the data directory you'll find a dataset of recent movies and their ratings according to several popular websites. Let's load it with Pandas...",
"# load a dataset of recent movies and their ratings across several websites\nmovie_data = pd.read_csv('../data/movie_ratings.csv')\n# reduce it to just the ratings categories\nmovie_data = movie_data[['FILM','RottenTomatoes','RottenTomatoes_User','Metacritic','Metacritic_User','Fandango_Ratingvalue', 'IMDB']]\nmovie_data.head()\n\nmovie_data.describe()",
"Logistic Regression Review\nData\nWe observe pairs $(\\mathbf{x}{i},y{i})$ where\n\\begin{eqnarray}\ny_{i} \\in { 0, 1} &:& \\mbox{class label} \\\n\\mathbf{x}{i} = (x{i,1}, \\dots, x_{i,D}) &:& \\mbox{set of $D$ explanatory variables (aka features)} \n\\end{eqnarray}\n Parameters\n\\begin{eqnarray}\n\\mathbf{\\beta}^{T} = (\\beta_{0}, \\dots, \\beta_{D}) : \\mbox{values encoding the relationship between the features and label}\n\\end{eqnarray}\n Transformation Function \n\\begin{equation}\nf(z_{i}=\\mathbf{x}{i} \\mathbf{\\beta} ) = (1+e^{-\\mathbf{x}{i} \\mathbf{\\beta} })^{-1}\n\\end{equation}\nError Function\n\\begin{eqnarray}\n\\mathcal{L} = \\sum_{i=1}^{N} -y_{i} \\log f(\\mathbf{x}{i} \\mathbf{\\beta} ) - (1-y{i}) \\log (1-f(\\mathbf{x}_{i} \\mathbf{\\beta} ))\n\\end{eqnarray}\n Learning $\\beta$ \n- Randomly initialize $\\beta$\n- Until $\\alpha || \\nabla \\mathcal{L} || < tol $:\n - $\\mathbf{\\beta}{t+1} = \\mathbf{\\beta}{t} - \\alpha \\nabla_{\\mathbf{\\beta}} \\mathcal{L}$\n<span style=\"color:red\">STUDENT ACTIVITY (10 MINS)</span>\nLet's run a logistic regression classifier on this data via SciKit-Learn. If you need a refresher, check out the notebook from the first course and the SciKit-Learn documentation on logistic regression. The goal is to predict if the IMDB rating is under or over 7/10, using the other ratings as features. I've started the code. You just need to fill-in the lines for training and computing the error. Note there is no test set yet.",
"from sklearn.linear_model import LogisticRegression\n\n# set the random number generator for reproducability\nnp.random.seed(123)\n\n# let's try to predict the IMDB rating from the others\nfeatures = movie_data[['RottenTomatoes','RottenTomatoes_User','Metacritic','Metacritic_User','Fandango_Ratingvalue']].as_matrix()\n# create classes: more or less that 7/10 rating\nlabels = (movie_data['IMDB'] >= 7.).astype('int').tolist()\nshuffle_in_unison(features, labels)\n\n### Your Code Goes Here ###\n\n# initialize and train a logistic regression model\n\n# compute error on training data\n\nmodel_LogReg = LogisticRegression()\nmodel_LogReg.fit(features, labels)\npredicted_labels = model_LogReg.predict(features)\ntrain_error_rate = calc_classification_error(predicted_labels, labels)\n\n\n###########################\n\nprint \"Classification error on training set: %.2f%%\" %(train_error_rate*100)\n# compute the baseline error since the classes are imbalanced\nprint \"Baseline Error: %.2f%%\" %((sum(labels)*100.)/len(labels))",
"2. Feature Transformations\nGood features are crucial for training well-performing classifiers: 'garbage in, garbage out.' In this section we introduce several transformations that are commonly applied to data as a preprocessing step before training a classifier.\n2.1 Normal Standardization\nRecall the formula of the standard linear model: $$\\hat Y = f(\\beta^{T} \\mathbf{X}) $$ where $\\hat Y$ is the predictions, $f(\\cdot)$ is the transformation function, $\\beta$ is the weights (parameters), and $X$ is the $N \\times D$ matrix of features. For simplicity, assume there are just two features: $$ \\beta^{T} \\mathbf{x}{i} = \\beta{1}x_{i,1} + \\beta_{2}x_{i,2}.$$ Usually $x_{i,1}$ and $x_{i,2}$ will be measured in different units. For instance, in the movie ratings data, the Rotten Tomatoes dimension is on a $0-100$ scale and the Fandango ratings are on $0-5$. The difference in scale causes one dimension to dominate the inner product. Linear models can learn to cope with this imbalance by changing the scales of the weights accordingly, but this makes optimization harder because gradient steps are unequal across dimensions.\nOne way to get rid of hetergeneous scales is to standardize the data so that the values in each dimension are distributed according to the standard Normal distribution. In math, this means we'll transform the data like so: $$\\mathbf{X}{std} = \\frac{\\mathbf{X} - \\boldsymbol{\\mu}{X}}{\\boldsymbol{\\sigma}_{X}}. $$ This is also called 'z-score scaling.' Let's examine the affect of this transformation on training error.",
"# perform z-score scaling\nfeatures_mu = np.mean(features, axis=0)\nfeatures_sigma = np.std(features, axis=0)\nstd_features = (features - features_mu)/features_sigma\n\n# re-train model\nlm = LogisticRegression()\nlm.fit(std_features, labels)\n\n### compute error on training data\npredictions = lm.predict(std_features)\nprint \"Classification error on training set: %.3f%%\" %(calc_classification_error(predictions, labels)*100)\n# compute the baseline error since the classes are imbalanced\nprint \"Baseline Error: %.2f%%\" %((sum(labels)*100.)/len(labels))",
"Standard Normal scaling is a common and usually default first step, especially when you know the data in measured in different units.\n2.2 Domain-Specific Transformations\nSometimes the data calls for a specific transformation. We'll demonstrate this on the NBA dataset used in our first workshop. Let's load it...",
"nba_shot_data = pd.read_csv('../data/nba/NBA_xy_features.csv')\nnba_shot_data.head()",
"And let's run logistic regression on the data just as we did before...",
"# split data into train and test\ntrain_set_size = int(.80*len(nba_shot_data))\ntrain_features = nba_shot_data.ix[:train_set_size,['x_Coordinate','y_Coordinate']].as_matrix()\ntest_features = nba_shot_data.ix[train_set_size:,['x_Coordinate','y_Coordinate']].as_matrix()\ntrain_class_labels = nba_shot_data.ix[:train_set_size,['shot_outcome']].as_matrix()\ntest_class_labels = nba_shot_data.ix[train_set_size:,['shot_outcome']].as_matrix()\n\n#Train logistic regression model\nstart_time = time.time()\nlm.fit(train_features, np.ravel(train_class_labels))\nend_time = time.time()\nprint \"Training ended after %.2f seconds.\" %(end_time-start_time)\n\n# compute the classification error on training data\npredictions = lm.predict(test_features)\nprint \"Classification Error on the Test Set: %.2f%%\" %(calc_classification_error(predictions, np.array(test_class_labels)) * 100)\n\n# compute the baseline error since the classes are imbalanced\nprint \"Baseline Error: %.2f%%\" %(np.sum(test_class_labels)/len(test_class_labels)*100)\n\n# visualize the boundary on the basketball court\nvisualize_court(lm)",
"Now let's transform the Cartesian coordinates into polar coordinates: (x,y) --> (radius, angle)...",
"### Transform coordinate system\n\n# radius coordinate: calculate distance from point to hoop\nhoop_location = np.array([25.5, 0.])\ntrain_features -= hoop_location\ntest_features -= hoop_location\ntrain_dists = np.sqrt(np.sum(train_features**2, axis=1))\ntest_dists = np.sqrt(np.sum(test_features**2, axis=1))\n\n# angle coordinate: use arctan2 function\ntrain_angles = np.arctan2(train_features[:,1], train_features[:,0])\ntest_angles = np.arctan2(test_features[:,1], test_features[:,0])\n\n# combine vectors into polar coordinates\npolar_train_features = np.hstack([train_dists[np.newaxis].T, train_angles[np.newaxis].T])\npolar_test_features = np.hstack([test_dists[np.newaxis].T, test_angles[np.newaxis].T])\n\npd.DataFrame(polar_train_features, columns=[\"Radius\",\"Angle\"]).head()\n\n#Train model\nstart_time = time.time()\nlm.fit(polar_train_features, np.ravel(train_class_labels))\nend_time = time.time()\nprint \"Training ended after %.2f seconds.\" %(end_time-start_time)\n\n# compute the classification error on test data\npredictions = lm.predict(polar_test_features)\nprint \"Classification Error on the Test Set: %.2f%%\" %(calc_classification_error(predictions, np.array(test_class_labels)) * 100)\n\n# compute the baseline error since the classes are imbalanced\nprint \"Baseline Error: %.2f%%\" %(np.sum(test_class_labels)/len(test_class_labels)*100)\n\n# visualize the boundary on the basketball court\nvisualize_court(lm, coord_type='polar')",
"<span style=\"color:red\">STUDENT ACTIVITY (10 mins)</span>\n2.3 Mystery Data\nThe data folder contains some mysterious data that can't be modeled well with a linear function. Running the code below, we see the squared error is over 70. However, the error can be driven to zero using one of two transformations. See if you can find one or both. The transformations are common ones you surely know.",
"from sklearn.linear_model import LinearRegression\n\n# load (x,y) where y is the mystery data\nx = np.arange(0, 30, .2)[np.newaxis].T\ny = np.load(open('../data/mystery_data.npy','rb'))\n\n### transformation goes here ###\n\nx = np.cos(x)\n\n################################\n\n# initialize regression model\nlm = LinearRegression()\nlm.fit(x,y)\ny_hat = lm.predict(x)\nsquared_error = np.sum((y - y_hat)**2)\n\nif not np.isclose(squared_error,0):\n print \"The squared error should be zero! Yours is %.8f.\" %(squared_error)\nelse:\n print \"You found the secret transformation! Your squared error is %.8f.\" %(squared_error)",
"3. Dimensionality Reduction\nSometimes the data calls for more aggressive transformations. High-dimensional data is usually hard to model because classifiers are likely to overfit. Regularization is one way to combat high dimensionality, but often it can not be enough. This section will cover dimensionality reduction--a technique for reducing the number of features while still preserving curcial information. This is a form of unsupervised learning since we use no class information. \n3.1 Image Dataset: Bob Ross Paintings\n\nIn this section and throughout the next session, we'll use a dataset of Bob Ross' paintings. Images are a type of data that notoriously have redundant features and whose dimensionality can be reduced significantly, without much loss of information. We'll explore this phenomenom via 403 $400 \\times 300$ full-color images of natural landscape paintings.\nBefore we load the data, let's take a minute to review how image data is stored on a computer. Of course, all the computer sees are numbers ranging from 0 to 255. Each pixel takes on one of these values. Furthermore, there are three layers to color images--one for red, blue, and green values. Therefore, the painting we'll examine are represented as $300 \\times 400 \\times 3$-dimensional tensors (multi-dimensional arrays). This layering is depicted below.\n\nWhile images need to be represented with three dimensions to be visualized, the learning algorithms we'll consider don't need any notion of color values so I've already flattened the images into vector form, i.e. to create a matrix of size $403 \\times 360000$. Let's load the dataset...",
"# un-zip the paintings file\nimport zipfile\nzipper = zipfile.ZipFile('../data/bob_ross/bob_ross_paintings.npy.zip')\nzipper.extractall('../data/bob_ross/')\n\n# load the 403 x 360,000 matrix\nbr_paintings = np.load(open('../data/bob_ross/bob_ross_paintings.npy','rb'))\nprint \"Dataset size: %d x %d\"%(br_paintings.shape)",
"and then visualize two of the images...",
"# subplot containing first image\nax1 = plt.subplot(1,2,1)\nbr_painting = br_paintings[70,:]\nax1.imshow(np.reshape(br_painting, (300, 400, 3)))\n\n# subplot containing second image\nax2 = plt.subplot(1,2,2)\nbr_painting = br_paintings[33,:]\nax2.imshow(np.reshape(br_painting, (300, 400, 3)))\nplt.show()",
"3.2 Principal Component Analysis\nAs we've seen, the dataset has many, many, many more features (columns) than examples (rows). Simple Lasso or Ridge regularization probably won't be enough to prevent overfitting so we have to do something more drastic. In this section, we'll cover Principal Component Analysis, a popular technique for reducing the dimensionality of data.\nUnsupervised Learning\nPCA does not take into consideration labels, only the input features. We can think of PCA as performing unsupervised 'inverse' prediction. Our goal is: for a datapoint $\\mathbf{x}{i}$, find a lower-dimensional representation $\\mathbf{h}{i}$ such that $\\mathbf{x}{i}$ can be 'predicted' from $\\mathbf{h}{i}$ using a linear transformation. In math, this statement can be written as $$\\mathbf{\\tilde x}{i} = \\mathbf{h}{i} \\mathbf{W}^{T} \\text{ where } \\mathbf{h}{i} = \\mathbf{x}{i} \\mathbf{W}. $$ $\\mathbf{W}$ is a $D \\times K$ matrix of parameters that need to be learned--much like the $\\beta$ vector in regression models. $D$ is the dimensionality of the original data, and $K$ is the dimensionality of the compressed representation $\\mathbf{h}_{i}$. The graphic below reiterates the above described PCA pipline: \n\nOptimization\nHaving defined the PCA model, we look to write learning as an optimization process. Recall that we wish to make a reconstruction of the data, denoted $\\mathbf{\\tilde x}{i}$, as close as possible to the original input: $$\\mathcal{L} = \\sum{i=1}^{N} (\\mathbf{x}{i} - \\mathbf{\\tilde x}{i})^{2}.$$ We can make a substitution for $\\mathbf{\\tilde x}{i}$ from the equation above: $$ = \\sum{i=1}^{N} (\\mathbf{x}{i} - \\mathbf{h}{i}\\mathbf{W}^{T})^{2}.$$ And we can make another substitution for $\\mathbf{h}{i}$, bringing us to the final form of the loss function: $$ = \\sum{i=1}^{N} (\\mathbf{x}{i} - \\mathbf{x}{i}\\mathbf{W}\\mathbf{W}^{T})^{2}.$$ \nWe could perform gradient descent on $\\mathcal{L}$, just like we do for logistic regression models, but there exists a deterministic solution. We won't show the derivation here, but you can find it here. $\\mathbf{W}$ is optimal when it contains the eigenvectors of the data's covariance matrix, and thus we can use a standard eigen decomposition to learn the transform: $$ \\boldsymbol{\\Sigma}{\\mathbf{X}} = \\mathbf{W} \\boldsymbol{\\Lambda} \\boldsymbol{W}^{T} $$ where $\\boldsymbol{\\Sigma}{\\mathbf{X}}$ is the data's empirical covariance matrix and $\\boldsymbol{\\Lambda}$ is a diagonal matrix of eigenvalues. Eigen decompositions can be performed effeciently by any scientific computing library, including numpy. \nIntuition\nThe connection to the data's (co-)variance becomes a little more clear when the intuitions behind PCA are examined. The PCA transformation projects the data onto linear subspaces oriented in the directions of highest variance. To elaborate, assume the data resides in two dimensions according to the following scatter plot. The columns of $\\mathbf{W}$--the $K=2$ principal components--would be the green lines below:\n\n'PCA 1st Dimension' is the direction of greatest variance, and if the data is projected down to one dimension, the new representations would be produced by collapsing the data onto that line.\nPrincipal Component Analysis (PCA) Overview\nData\nWe observe $\\mathbf{x}_{i}$ where\n\\begin{eqnarray}\n\\mathbf{x}{i} = (x{i,1}, \\dots, x_{i,D}) &:& \\mbox{set of $D$ explanatory variables (aka features). No labels.} \n\\end{eqnarray}\n Parameters\n$\\mathbf{W}$: Matrix with dimensionality $D \\times K$, where $D$ is the dimensionality of the original data and $K$ the dimensionality of the new features. The matrix encodes the transformation between the original and new feature spaces.\nError Function\n\\begin{eqnarray}\n\\mathcal{L} = \\sum_{i=1}^{N} ( \\mathbf{x}{i} - \\mathbf{x}{i} \\mathbf{W} \\mathbf{W}^{T})^{2}\n\\end{eqnarray}\nPCA on Bob Ross dataset\nNow let's run PCA on the Bob Ross paintings dataset...\n<span style=\"color:red\">Caution: Running PCA on this dataset can take from 30 seconds to several minutes, depending on your computer's processing power.</span>",
"from sklearn.decomposition import PCA\n\npca = PCA(n_components=400)\nstart_time = time.time()\nreduced_paintings = pca.fit_transform(br_paintings)\nend_time = time.time()\n\nprint \"Training took a total of %.2f seconds.\" %(end_time-start_time)\nprint \"Preserved percentage of original variance: %.2f%%\" %(pca.explained_variance_ratio_.sum() * 100) \nprint \"Dataset is now of size: %d x %d\"%(reduced_paintings.shape)",
"Let's visualize two of the paintings...",
"img_idx = 70\nreconstructed_img = pca.inverse_transform(reduced_paintings[img_idx,:])\noriginal_img = br_paintings[70,:]\n\n# subplot for original image\nax1 = plt.subplot(1,2,1)\nax1.imshow(np.reshape(original_img, (300, 400, 3)))\nax1.set_title(\"Original Painting\")\n\n# subplot for reconstruction\nax2 = plt.subplot(1,2,2)\nax2.imshow(np.reshape(reconstructed_img, (300, 400, 3)))\nax2.set_title(\"Reconstruction\")\nplt.show()",
"We can also visualize the transformation matrix $\\mathbf{W}^{T}$. It's rows act as 'filters' or 'feature detectors'...",
"# get the transformation matrix\ntransformation_mat = pca.components_ # This is the W^T matrix\n# two components to show\ncomp1 = 13\ncomp2 = 350\n\n# subplot \nax1 = plt.subplot(1,2,1)\nfilter1 = transformation_mat[comp1-1,:]\nax1.imshow(np.reshape(filter1, (300, 400, 3)))\nax1.set_title(\"%dth Principal Component\"%(comp1))\n\n# subplot \nax2 = plt.subplot(1,2,2)\nfilter2 = transformation_mat[comp2-1,:]\nax2.imshow(np.reshape(filter2, (300, 400, 3)))\nax2.set_title(\"%dth Principal Component\"%(comp2))\nplt.show()",
"3.3 PCA for Visualization\nPCA can also be done for visualization purposes. Let's perform PCA on the movie ratings dataset and see if any semblence of the class structure can be seen.",
"# get the movie features\nmovie_features = movie_data[['RottenTomatoes','RottenTomatoes_User','Metacritic','Metacritic_User','Fandango_Ratingvalue']].as_matrix()\n\n# perform standard scaling again but via SciKit-Learn\nfrom sklearn.preprocessing import StandardScaler\nz_scaler = StandardScaler()\nmovie_features = z_scaler.fit_transform(movie_features)\n\npca = PCA(n_components=2)\nstart_time = time.time()\nmovie_2d_proj = pca.fit_transform(movie_features)\nend_time = time.time()\n\nprint \"Training took a total of %.4f seconds.\" %(end_time-start_time)\nprint \"Preserved percentage of original variance: %.2f%%\" %(pca.explained_variance_ratio_.sum() * 100) \nprint \"Dataset is now of size: %d x %d\"%(movie_2d_proj.shape)\n\nlabels = movie_data['FILM'].tolist()\nclasses = movie_data['IMDB'].tolist()\n\n# color the points by IMDB ranking\nlabels_to_show = []\ncolors = []\nfor idx, c in enumerate(classes):\n if c > 7.25:\n colors.append('g')\n if c > 8.:\n labels_to_show.append(labels[idx])\n else:\n colors.append('r')\n if c < 4.75:\n labels_to_show.append(labels[idx])\n\n# plot data\nplt.scatter(movie_2d_proj[:, 0], movie_2d_proj[:, 1], marker = 'o', c = colors, s = 150, alpha = .6)\n\n# add movie title annotations\nfor label, x, y in zip(labels, movie_2d_proj[:, 0].tolist(), movie_2d_proj[:, 1].tolist()):\n if label not in labels_to_show:\n continue\n if x < 0:\n text_x = -20\n else:\n text_x = 150\n plt.annotate(label.decode('utf-8'),xy = (x, y), xytext = (text_x, 40),\n textcoords = 'offset points', ha = 'right', va = 'bottom',\n arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3, rad=0'),\n bbox = dict(boxstyle = 'round,pad=0.5', fc = 'b', alpha = 0.2))\n \nplt.title('PCA Projection of Movies')\nplt.show()",
"<span style=\"color:red\">STUDENT ACTIVITY (until end of session)</span>\nYour task is to reproduce the above PCA examples on a new dataset of images. Let's load it...",
"from sklearn.datasets import fetch_olivetti_faces\n\nfaces_dataset = fetch_olivetti_faces(shuffle=True)\nfaces = faces_dataset.data # 400 flattened 64x64 images\nperson_ids = faces_dataset.target # denotes the identity of person (40 total)\n\nprint \"Dataset size: %d x %d\" %(faces.shape)\nprint \"And the images look like this...\"\nplt.imshow(np.reshape(faces[200,:], (64, 64)), cmap='Greys_r')\nplt.show()",
"This dataset contains 400 64x64 pixel images of 40 people each exhibiting 10 facial expressions. The images are in gray-scale, not color, and therefore flattened vectors contain 4096 dimensions.\n<span style=\"color:red\">Subtask 1: Run PCA</span>",
"?PCA\n\n### Your code goes here ###\n\n# train PCA model on 'faces'\nfrom sklearn.decomposition import PCA\n\npca = PCA(n_components=100)\n\nstart_time = time.time()\nfaces_reduced = pca.fit_transform(faces)\nend_time = time.time()\n\n\n###########################\n\nprint \"Training took a total of %.2f seconds.\" %(end_time-start_time)\nprint \"Preserved percentage of original variance: %.2f%%\" %(pca.explained_variance_ratio_.sum() * 100) \nprint \"Dataset is now of size: %d x %d\"%(faces_reduced.shape)",
"<span style=\"color:red\">Subtask 2: Reconstruct an image</span>",
"### Your code goes here ###\n\n# Use learned transformation matrix to project back to the original 4096-dimensional space\n# Remember you need to use np.reshape() \n\n###########################\n\nimg_idx = 70\nreconstructed_img = pca.inverse_transform(faces_reduced[img_idx,:])\noriginal_img = faces[70,:]\n\n# subplot for original image\nax1 = plt.subplot(1,2,1)\nax1.imshow(np.reshape(original_img, (64, 64)), cmap='Greys_r')\nax1.set_title(\"Original Image\")\n\n# subplot for reconstruction\nax2 = plt.subplot(1,2,2)\nax2.imshow(np.reshape(reconstructed_img, (64, 64)), cmap='Greys_r')\nax2.set_title(\"Reconstruction\")\nplt.show()\n",
"Your output should look something like what's below (although could be a different face):\n\n<span style=\"color:red\">Subtask 3: Visualize one or more components of the transformation matrix (W)</span>",
"### Your code goes here ###\n\n# Now visualize one of the principal components\n# Again, remember you need to use np.reshape() \n\n###########################\n\ntransformation_mat = pca.components_\n# two components to show\ncomp1 = 5\ncomp2 = 90\n\n# subplot \nax1 = plt.subplot(1,2,1)\nfilter1 = transformation_mat[comp1,:]\nax1.imshow(np.reshape(filter1, (64, 64)), cmap='Greys_r')\nax1.set_title(\"%dth Principal Component\"%(comp1))\n\n# subplot \nax2 = plt.subplot(1,2,2)\nfilter2 = transformation_mat[comp2,:]\nax2.imshow(np.reshape(filter2, (64, 64)), cmap='Greys_r')\nax2.set_title(\"%dth Principal Component\"%(comp2))\nplt.show()",
"Your output should look something like what's below (although could have differently ranked components):\n\n<span style=\"color:red\">Subtask 4: Generate a 2D scatter plot</span>",
"### Your code goes here ###\n\n# Train another PCA model to project the data into two dimensions\n# Bonus: color the scatter plot according to the person_ids to see if any structure can be seen\n\n# Run PCA for 2 components\n\n# Generate plot\n\n###########################\n\n\npca = PCA(n_components=2)\nstart_time = time.time()\nfaces_2d_proj = pca.fit_transform(faces)\nend_time = time.time()\n\nprint \"Training took a total of %.2f seconds.\" %(end_time-start_time)\nprint \"Preserved percentage of original variance: %.2f%%\" %(pca.explained_variance_ratio_.sum() * 100) \nprint \"Dataset is now of size: %d x %d\"%(faces_2d_proj.shape)\n\n# Generate plot\n\n# color the points by the person ids\ncolors = [plt.cm.Set1((c+1)/40.) for c in person_ids]\n\n# plot data\nplt.scatter(faces_2d_proj[:, 0], faces_2d_proj[:, 1], marker = 'o', c = colors, s = 175, alpha = .6)\nplt.title('2D Projection of Faces Dataset')\nplt.show()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
mne-tools/mne-tools.github.io
|
0.21/_downloads/974f822d2280f83b67727ee3355c7c2f/plot_sensor_connectivity.ipynb
|
bsd-3-clause
|
[
"%matplotlib inline",
"Compute all-to-all connectivity in sensor space\nComputes the Phase Lag Index (PLI) between all gradiometers and shows the\nconnectivity in 3D using the helmet geometry. The left visual stimulation data\nare used which produces strong connectvitiy in the right occipital sensors.",
"# Author: Martin Luessi <mluessi@nmr.mgh.harvard.edu>\n#\n# License: BSD (3-clause)\n\nimport mne\nfrom mne import io\nfrom mne.connectivity import spectral_connectivity\nfrom mne.datasets import sample\nfrom mne.viz import plot_sensors_connectivity\n\nprint(__doc__)",
"Set parameters",
"data_path = sample.data_path()\nraw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nevent_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'\n\n# Setup for reading the raw data\nraw = io.read_raw_fif(raw_fname)\nevents = mne.read_events(event_fname)\n\n# Add a bad channel\nraw.info['bads'] += ['MEG 2443']\n\n# Pick MEG gradiometers\npicks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,\n exclude='bads')\n\n# Create epochs for the visual condition\nevent_id, tmin, tmax = 3, -0.2, 1.5 # need a long enough epoch for 5 cycles\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))\n\n# Compute connectivity for band containing the evoked response.\n# We exclude the baseline period\nfmin, fmax = 3., 9.\nsfreq = raw.info['sfreq'] # the sampling frequency\ntmin = 0.0 # exclude the baseline period\nepochs.load_data().pick_types(meg='grad') # just keep MEG and no EOG now\ncon, freqs, times, n_epochs, n_tapers = spectral_connectivity(\n epochs, method='pli', mode='multitaper', sfreq=sfreq, fmin=fmin, fmax=fmax,\n faverage=True, tmin=tmin, mt_adaptive=False, n_jobs=1)\n\n# Now, visualize the connectivity in 3D\nplot_sensors_connectivity(epochs.info, con[:, :, 0])"
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
AllenDowney/ThinkStats2
|
code/chap02ex.ipynb
|
gpl-3.0
|
[
"Chapter 2\nExamples and Exercises from Think Stats, 2nd Edition\nhttp://thinkstats2.com\nCopyright 2016 Allen B. Downey\nMIT License: https://opensource.org/licenses/MIT",
"import numpy as np\n\nfrom os.path import basename, exists\n\n\ndef download(url):\n filename = basename(url)\n if not exists(filename):\n from urllib.request import urlretrieve\n\n local, _ = urlretrieve(url, filename)\n print(\"Downloaded \" + local)\n\n\ndownload(\"https://github.com/AllenDowney/ThinkStats2/raw/master/code/thinkstats2.py\")\ndownload(\"https://github.com/AllenDowney/ThinkStats2/raw/master/code/thinkplot.py\")",
"Given a list of values, there are several ways to count the frequency of each value.",
"t = [1, 2, 2, 3, 5]",
"You can use a Python dictionary:",
"hist = {}\nfor x in t:\n hist[x] = hist.get(x, 0) + 1\n \nhist",
"You can use a Counter (which is a dictionary with additional methods):",
"from collections import Counter\ncounter = Counter(t)\ncounter",
"Or you can use the Hist object provided by thinkstats2:",
"import thinkstats2\nhist = thinkstats2.Hist([1, 2, 2, 3, 5])\nhist",
"Hist provides Freq, which looks up the frequency of a value.",
"hist.Freq(2)",
"You can also use the bracket operator, which does the same thing.",
"hist[2]",
"If the value does not appear, it has frequency 0.",
"hist[4]",
"The Values method returns the values:",
"hist.Values()",
"So you can iterate the values and their frequencies like this:",
"for val in sorted(hist.Values()):\n print(val, hist[val])",
"Or you can use the Items method:",
"for val, freq in hist.Items():\n print(val, freq)",
"thinkplot is a wrapper for matplotlib that provides functions that work with the objects in thinkstats2.\nFor example Hist plots the values and their frequencies as a bar graph.\nConfig takes parameters that label the x and y axes, among other things.",
"import thinkplot\nthinkplot.Hist(hist)\nthinkplot.Config(xlabel='value', ylabel='frequency')",
"As an example, I'll replicate some of the figures from the book.\nFirst, I'll load the data from the pregnancy file and select the records for live births.",
"download(\"https://github.com/AllenDowney/ThinkStats2/raw/master/code/nsfg.py\")\n\ndownload(\"https://github.com/AllenDowney/ThinkStats2/raw/master/code/2002FemPreg.dct\")\ndownload(\n \"https://github.com/AllenDowney/ThinkStats2/raw/master/code/2002FemPreg.dat.gz\"\n)\n\nimport nsfg\n\npreg = nsfg.ReadFemPreg()\nlive = preg[preg.outcome == 1]",
"Here's the histogram of birth weights in pounds. Notice that Hist works with anything iterable, including a Pandas Series. The label attribute appears in the legend when you plot the Hist.",
"hist = thinkstats2.Hist(live.birthwgt_lb, label='birthwgt_lb')\nthinkplot.Hist(hist)\nthinkplot.Config(xlabel='Birth weight (pounds)', ylabel='Count')",
"Before plotting the ages, I'll apply floor to round down:",
"ages = np.floor(live.agepreg)\n\nhist = thinkstats2.Hist(ages, label='agepreg')\nthinkplot.Hist(hist)\nthinkplot.Config(xlabel='years', ylabel='Count')",
"As an exercise, plot the histogram of pregnancy lengths (column prglngth).\nHist provides smallest, which select the lowest values and their frequencies.",
"for weeks, freq in hist.Smallest(10):\n print(weeks, freq)",
"Use Largest to display the longest pregnancy lengths.\nFrom live births, we can select first babies and others using birthord, then compute histograms of pregnancy length for the two groups.",
"firsts = live[live.birthord == 1]\nothers = live[live.birthord != 1]\n\nfirst_hist = thinkstats2.Hist(firsts.prglngth, label='first')\nother_hist = thinkstats2.Hist(others.prglngth, label='other')",
"We can use width and align to plot two histograms side-by-side.",
"width = 0.45\nthinkplot.PrePlot(2)\nthinkplot.Hist(first_hist, align='right', width=width)\nthinkplot.Hist(other_hist, align='left', width=width)\nthinkplot.Config(xlabel='weeks', ylabel='Count', xlim=[27, 46])",
"Series provides methods to compute summary statistics:",
"mean = live.prglngth.mean()\nvar = live.prglngth.var()\nstd = live.prglngth.std()",
"Here are the mean and standard deviation:",
"mean, std",
"As an exercise, confirm that std is the square root of var:\nHere's are the mean pregnancy lengths for first babies and others:",
"firsts.prglngth.mean(), others.prglngth.mean()",
"And here's the difference (in weeks):",
"firsts.prglngth.mean() - others.prglngth.mean()",
"This functon computes the Cohen effect size, which is the difference in means expressed in number of standard deviations:",
"def CohenEffectSize(group1, group2):\n \"\"\"Computes Cohen's effect size for two groups.\n \n group1: Series or DataFrame\n group2: Series or DataFrame\n \n returns: float if the arguments are Series;\n Series if the arguments are DataFrames\n \"\"\"\n diff = group1.mean() - group2.mean()\n\n var1 = group1.var()\n var2 = group2.var()\n n1, n2 = len(group1), len(group2)\n\n pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)\n d = diff / np.sqrt(pooled_var)\n return d",
"Compute the Cohen effect size for the difference in pregnancy length for first babies and others.\nExercises\nUsing the variable totalwgt_lb, investigate whether first babies are lighter or heavier than others. \nCompute Cohen’s effect size to quantify the difference between the groups. How does it compare to the difference in pregnancy length?\nFor the next few exercises, we'll load the respondent file:",
"download(\"https://github.com/AllenDowney/ThinkStats2/raw/master/code/2002FemResp.dct\")\ndownload(\"https://github.com/AllenDowney/ThinkStats2/raw/master/code/2002FemResp.dat.gz\")\n\nresp = nsfg.ReadFemResp()",
"Make a histogram of <tt>totincr</tt> the total income for the respondent's family. To interpret the codes see the codebook.\nMake a histogram of <tt>age_r</tt>, the respondent's age at the time of interview.\nMake a histogram of <tt>numfmhh</tt>, the number of people in the respondent's household.\nMake a histogram of <tt>parity</tt>, the number of children borne by the respondent. How would you describe this distribution?\nUse Hist.Largest to find the largest values of <tt>parity</tt>.\nLet's investigate whether people with higher income have higher parity. Keep in mind that in this study, we are observing different people at different times during their lives, so this data is not the best choice for answering this question. But for now let's take it at face value.\nUse <tt>totincr</tt> to select the respondents with the highest income (level 14). Plot the histogram of <tt>parity</tt> for just the high income respondents.\nFind the largest parities for high income respondents.\nCompare the mean <tt>parity</tt> for high income respondents and others.\nCompute the Cohen effect size for this difference. How does it compare with the difference in pregnancy length for first babies and others?"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
kit-cel/lecture-examples
|
mloc/ch4_Autoencoders/Autoencoder_Compression_Binarizer.ipynb
|
gpl-2.0
|
[
"Image Compression using Autoencoders with BPSK\nThis code is provided as supplementary material of the lecture Machine Learning and Optimization in Communications (MLOC).<br>\nThis code illustrates\n* joint compression and error protection of images by auto-encoders\n* generation of BPSK symbols using stochastic quantizers\n* transmission over a binary symmetric channel (BSC)",
"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nprint(\"We are using the following device for learning:\",device)",
"Import and load MNIST dataset (Preprocessing)\nDataloader are powerful instruments, which help you to prepare your data. E.g. you can shuffle your data, transform data (standardize/normalize), divide it into batches, ... For more information see https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader <br>\nIn our case, we just use the dataloader to download the Dataset and preprocess the data on our own.",
"batch_size_train = 60000 # Samples per Training Batch\nbatch_size_test = 10000 # just create one large test dataset (MNIST test dataset has 10.000 Samples)\n\n# Get Training and Test Dataset with a Dataloader\ntrain_loader = torch.utils.data.DataLoader(\n torchvision.datasets.MNIST('./files/', train=True, download=True,\n transform=torchvision.transforms.Compose([torchvision.transforms.ToTensor()])),\n batch_size=batch_size_train, shuffle=True)\n\ntest_loader = torch.utils.data.DataLoader(\n torchvision.datasets.MNIST('./files/', train=False, download=True,\n transform=torchvision.transforms.Compose([torchvision.transforms.ToTensor()])),\n batch_size=batch_size_test, shuffle=True)\n\n# We are only interessted in the data and not in the targets\nfor idx, (data, targets) in enumerate(train_loader):\n x_train = data[:,0,:,:]\n\nfor idx, (data, targets) in enumerate(test_loader):\n x_test = data[:,0,:,:]\n\nimage_size = x_train.shape[1]\nx_test_flat = torch.reshape(x_test, (x_test.shape[0], image_size*image_size))",
"Plot 8 random images",
"plt.figure(figsize=(16,2))\nfor k in range(8):\n plt.subplot(1,8,k+1)\n plt.imshow(x_train[np.random.randint(x_train.shape[0])], interpolation='nearest', cmap='binary')\n plt.xticks(())\n plt.yticks(())",
"Specify Autoencoder\nAs explained in the lecture, we are using Stochstic Quantization. This means for the training process (def forward), we employ stochastic quantization in forward path but during back-propagation, we consider the quantization device as being\nnon-existent (.detach()). While validating and testing, use deterministic quantization (def test) <br>\nNote: .detach() removes the tensor from the computation graph",
"# target compression rate\nbit_per_image = 24\n\n# BSC error probability\nPe = 0.05\n\nhidden_encoder_1 = 500\nhidden_encoder_2 = 250\nhidden_encoder_3 = 100\nhidden_encoder = [hidden_encoder_1, hidden_encoder_2, hidden_encoder_3]\n\nhidden_decoder_1 = 100\nhidden_decoder_2 = 250\nhidden_decoder_3 = 500\nhidden_decoder = [hidden_decoder_1, hidden_decoder_2, hidden_decoder_3]\n\n\nclass Autoencoder(nn.Module):\n def __init__(self, hidden_encoder, hidden_decoder, image_size, bit_per_image):\n super(Autoencoder, self).__init__()\n # Define Transmitter Layer: Linear function, M input neurons (symbols), 2 output neurons (real and imaginary part) \n self.We1 = nn.Linear(image_size*image_size, hidden_encoder[0]) \n self.We2 = nn.Linear(hidden_encoder[0], hidden_encoder[1]) \n self.We3 = nn.Linear(hidden_encoder[1], hidden_encoder[2]) \n self.We4 = nn.Linear(hidden_encoder[2], bit_per_image) \n \n # Define Receiver Layer: Linear function, 2 input neurons (real and imaginary part), M output neurons (symbols)\n self.Wd1 = nn.Linear(bit_per_image,hidden_decoder[0]) \n self.Wd2 = nn.Linear(hidden_decoder[0], hidden_decoder[1]) \n self.Wd3 = nn.Linear(hidden_decoder[1], hidden_decoder[2]) \n self.Wd4 = nn.Linear(hidden_decoder[2], image_size*image_size) \n\n # Non-linearity (used in transmitter and receiver)\n self.activation_function = nn.ELU() \n self.sigmoid = nn.Sigmoid()\n self.softsign = nn.Softsign()\n\n def forward(self, training_data, Pe):\n encoded = self.encoder(training_data)\n # random binarization in training\n ti = encoded.clone()\n compressed = ti + (self.binarizer(ti) - ti).detach()\n # add error pattern (flip the bit or not)\n error_tensor = torch.distributions.Bernoulli(Pe * torch.ones_like(compressed)).sample() \n received = torch.mul( compressed, 1 - 2*error_tensor)\n \n reconstructed = self.decoder(received)\n return reconstructed\n \n def test(self, valid_data, Pe):\n encoded_test = self.encoder(valid_data)\n compressed_test = self.binarizer_deterministic(encoded_test)\n error_tensor_test = torch.distributions.Bernoulli(Pe * torch.ones_like(compressed_test)).sample()\n received_test = torch.mul( compressed_test, 1 - 2*error_tensor_test )\n reconstructed_test = self.decoder(received_test)\n loss_test = torch.mean(torch.square(valid_data - reconstructed_test))\n\n reconstructed_test_noerror = self.decoder(compressed_test)\n return reconstructed_test\n \n def encoder(self, batch):\n temp = self.activation_function(self.We1(batch))\n temp = self.activation_function(self.We2(temp))\n temp = self.activation_function(self.We3(temp))\n output = self.softsign(self.We4(temp))\n return output\n \n def decoder(self, batch):\n temp = self.activation_function(self.Wd1(batch))\n temp = self.activation_function(self.Wd2(temp))\n temp = self.activation_function(self.Wd3(temp))\n output = self.sigmoid(self.Wd4(temp))\n return output\n \n def binarizer(self, input):\n # This is the stochastic quatizer which we use for the training\n prob = torch.div(torch.add(input, 1.0), 2.0)\n bernoulli = torch.distributions.Bernoulli(prob) # torch.distributions.bernoulli.\n # bernoulli = tf.distributions.Bernoulli(probs=prob, dtype=tf.float32)\n return 2*bernoulli.sample() - 1\n\n def binarizer_deterministic(self, input):\n # This is the deteministic quatizer which we use for \n return torch.sign(input)",
"Helper function to get a random mini-batch of images",
"def get_batch(x, batch_size):\n idxs = np.random.randint(0, x.shape[0], (batch_size))\n return torch.stack([torch.reshape(x[k], (-1,)) for k in idxs])",
"Perform the training",
"batch_size = 250\n \nmodel = Autoencoder(hidden_encoder, hidden_decoder, image_size, bit_per_image)\nmodel.to(device)\n\n# Mean Squared Error loss\nloss_fn = nn.MSELoss()\n\n# Adam Optimizer\noptimizer = optim.Adam(model.parameters()) \n \nprint('Start Training') # Training loop\n\nfor it in range(25000): # Original paper does 50k iterations \n mini_batch = torch.Tensor(get_batch(x_train, batch_size)).to(device)\n # Propagate (training) data through the net\n reconstructed = model(mini_batch, Pe)\n \n # compute loss\n loss = loss_fn(mini_batch, reconstructed)\n\n # compute gradients\n loss.backward()\n\n # Adapt weights\n optimizer.step()\n\n # reset gradients\n optimizer.zero_grad()\n \n # Evaulation with the test data\n if it % 1000 == 0:\n reconstructed_test = model.test(x_test_flat.to(device), Pe)\n loss_test = torch.mean(torch.square(x_test_flat.to(device) - reconstructed_test))\n print('It %d: Loss %1.5f' % (it, loss_test.detach().cpu().numpy().squeeze()))\n \nprint('Training finished')",
"Evaluation\nCompare sent and received images",
"valid_images = model.test(x_test_flat.to(device), Pe).detach().cpu().numpy()\nvalid_binary = 0.5*(1 - model.binarizer(model.encoder(x_test_flat.to(device)))).detach().cpu().numpy() # from bipolar (BPSK) to binary\n# show 8 images and their reconstructed versions\nplt.figure(figsize=(16,4))\nidxs = np.random.randint(x_test.shape[0],size=8)\nfor k in range(8):\n plt.subplot(2,8,k+1) \n plt.imshow(np.reshape(x_test_flat[idxs[k]], (image_size,image_size)), interpolation='nearest', cmap='binary') \n plt.xticks(())\n plt.yticks(())\n \n plt.subplot(2,8,k+1+8)\n plt.imshow(np.reshape(valid_images[idxs[k]], (image_size,image_size)), interpolation='nearest', cmap='binary') \n plt.xticks(())\n plt.yticks(())\n\n# print binary data of the images\nfor k in range(8):\n print('Image %d: ' % (k+1), valid_binary[idxs[k],:])",
"Generate 8 arbitary images just by sampling random bit strings",
"random_data = 1-2*np.random.randint(2,size=(8,bit_per_image))\ngenerated_images = model.decoder(torch.Tensor(random_data).to(device)).detach().cpu().numpy()\nplt.figure(figsize=(16,2))\nfor k in range(8):\n plt.subplot(1,8,k+1)\n plt.imshow(np.reshape(generated_images[k],(image_size,image_size)), interpolation='nearest', cmap='binary')\n plt.xticks(())\n plt.yticks(())"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
kimegitee/deep-learning
|
first-neural-network/Your_first_neural_network.ipynb
|
mit
|
[
"Your first neural network\nIn this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.",
"%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt",
"Load and prepare the data\nA critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!",
"data_path = 'Bike-Sharing-Dataset/hour.csv'\n\nrides = pd.read_csv(data_path)\n\nrides.head()",
"Checking out the data\nThis dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above.\nBelow is a plot showing the number of bike riders over the first 10 days in the data set. You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.",
"rides[:24*10].plot(x='dteday', y='cnt')",
"Dummy variables\nHere we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies().",
"dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']\nfor each in dummy_fields:\n dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)\n rides = pd.concat([rides, dummies], axis=1)\n\nfields_to_drop = ['instant', 'dteday', 'season', 'weathersit', \n 'weekday', 'atemp', 'mnth', 'workingday', 'hr']\ndata = rides.drop(fields_to_drop, axis=1)\ndata.head()",
"Scaling target variables\nTo make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.\nThe scaling factors are saved so we can go backwards when we use the network for predictions.",
"quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']\n# Store scalings in a dictionary so we can convert back later\nscaled_features = {}\nfor each in quant_features:\n mean, std = data[each].mean(), data[each].std()\n scaled_features[each] = [mean, std]\n data.loc[:, each] = (data[each] - mean)/std",
"Splitting the data into training, testing, and validation sets\nWe'll save the last 21 days of the data to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.",
"# Save the last 21 days \ntest_data = data[-21*24:]\ndata = data[:-21*24]\n\n# Separate the data into features and targets\ntarget_fields = ['cnt', 'casual', 'registered']\nfeatures, targets = data.drop(target_fields, axis=1), data[target_fields]\ntest_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]",
"We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).",
"# Hold out the last 60 days of the remaining data as a validation set\ntrain_features, train_targets = features[:-60*24], targets[:-60*24]\nval_features, val_targets = features[-60*24:], targets[-60*24:]",
"Time to build the network\nBelow you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.\nThe network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation.\nWe use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation.\n\nHint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.\n\nBelow, you have these tasks:\n1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function.\n2. Implement the forward pass in the train method.\n3. Implement the backpropagation algorithm in the train method, including calculating the output error.\n4. Implement the forward pass in the run method.",
"class NeuralNetwork(object):\n def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # Set number of nodes in input, hidden and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Initialize weights\n self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5, \n (self.hidden_nodes, self.input_nodes))\n\n self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5, \n (self.output_nodes, self.hidden_nodes))\n self.lr = learning_rate\n \n #### Set this to your implemented sigmoid function ####\n # Activation function is the sigmoid function\n self.activation_function = lambda x: 1/(1+np.exp(-x))\n \n def train(self, inputs_list, targets_list):\n # Convert inputs list to 2d array\n inputs = np.array(inputs_list, ndmin=2).T\n targets = np.array(targets_list, ndmin=2).T\n \n #### Implement the forward pass here ####\n ### Forward pass ###\n # TODO: Hidden layer\n hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)\n hidden_outputs = self.activation_function(hidden_inputs)\n \n # TODO: Output layer\n final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)\n final_outputs = final_inputs\n \n #### Implement the backward pass here ####\n ### Backward pass ###\n \n # TODO: Output error\n # Output layer error is the difference between desired target and actual output.\n output_errors = targets - final_outputs\n \n # TODO: Backpropagated error\n # errors propagated to the hidden layer\n hidden_errors = np.dot(self.weights_hidden_to_output.T, output_errors)\n # hidden layer gradients\n hidden_grad = hidden_outputs*(1-hidden_outputs)\n \n # TODO: Update the weights\n # update hidden-to-output weights with gradient descent step\n self.weights_hidden_to_output += self.lr*np.dot(output_errors, hidden_outputs.T)\n # update input-to-hidden weights with gradient descent step\n self.weights_input_to_hidden += self.lr*np.dot((hidden_errors*hidden_grad), inputs.T)\n \n \n def run(self, inputs_list):\n # Run a forward pass through the network\n inputs = np.array(inputs_list, ndmin=2).T\n \n #### Implement the forward pass here ####\n # TODO: Hidden layer\n hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)\n hidden_outputs = self.activation_function(hidden_inputs)\n \n # TODO: Output layer\n final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)\n final_outputs = final_inputs\n \n return final_outputs\n\ndef MSE(y, Y):\n return np.mean((y-Y)**2)",
"Training the network\nHere you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.\nYou'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.\nChoose the number of epochs\nThis is the number of times the dataset will pass through the network, each time updating the weights. As the number of epochs increases, the network becomes better and better at predicting the targets in the training set. You'll need to choose enough epochs to train the network well but not too many or you'll be overfitting.\nChoose the learning rate\nThis scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.\nChoose the number of hidden nodes\nThe more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose.",
"import sys\n\n### Set the hyperparameters here ###\nepochs = 2000\nlearning_rate = 0.01\nhidden_nodes = 25\noutput_nodes = 4\n\nN_i = train_features.shape[1]\nnetwork = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)\n\nlosses = {'train':[], 'validation':[]}\nfor e in range(epochs):\n # Go through a random batch of 128 records from the training data set\n batch = np.random.choice(train_features.index, size=128)\n for record, target in zip(train_features.ix[batch].values, \n train_targets.ix[batch]['cnt']):\n network.train(record, target)\n \n # Printing out the training progress\n train_loss = MSE(network.run(train_features), train_targets['cnt'].values)\n val_loss = MSE(network.run(val_features), val_targets['cnt'].values)\n sys.stdout.write(\"\\rProgress: \" + str(100 * e/float(epochs))[:4] \\\n + \"% ... Training loss: \" + str(train_loss)[:5] \\\n + \" ... Validation loss: \" + str(val_loss)[:5])\n \n losses['train'].append(train_loss)\n losses['validation'].append(val_loss)\n\nplt.plot(losses['train'], label='Training loss')\nplt.plot(losses['validation'], label='Validation loss')\nplt.legend()\nplt.ylim(ymax=0.5)",
"Check out your predictions\nHere, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.",
"fig, ax = plt.subplots(figsize=(8,4))\n\nmean, std = scaled_features['cnt']\npredictions = network.run(test_features)*std + mean\nax.plot(predictions[0], label='Prediction')\nax.plot((test_targets['cnt']*std + mean).values, label='Data')\nax.set_xlim(right=len(predictions))\nax.legend()\n\ndates = pd.to_datetime(rides.ix[test_data.index]['dteday'])\ndates = dates.apply(lambda d: d.strftime('%b %d'))\nax.set_xticks(np.arange(len(dates))[12::24])\n_ = ax.set_xticklabels(dates[12::24], rotation=45)",
"Thinking about your results\nAnswer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does?\nYour answer below\nThe model captures well the patterns during a normal week, including the relative winding down in the weekends and the daily spikes in the morning and afternoon. However it falls short when predicting ridership from the weekend before Christmas to New Year's Day. The overall trend for this specific period was reflected in the model, as evidenced by its lowering of the estimates compared to other weeks, but the adjustments weren't strong enough to accurately predict the actual counts. This indicates an anomaly in the data whose cause requires additional data to validate. Using our intuition and knowledge of the cultural context we can guess at the cause of the discrepancy. It is possible that riders decrease their activities as the holiday season draws near. However, to confirm the hypothesis we need to see the behavior repeats several times. In the dataset we see that the behavior shows up only once because we have excluded the last 21 days from the 2 year period from the training and validation sets which leaves us with only 1 holiday season in the first year. Therefore, the sudden decrease is counted toward the weights but the signal wasn't strong enough to reliably establish it as an actual pattern compared to the daily fluctuations which the model have enough data points to train on, hence the halfway appearance of the prediction. The takeaway is that deep learning results still require personal oversight and interpretation from the user of the technique.\nUnit tests\nRun these unit tests to check the correctness of your network implementation. These tests must all be successful to pass the project.",
"import unittest\n\ninputs = [0.5, -0.2, 0.1]\ntargets = [0.4]\ntest_w_i_h = np.array([[0.1, 0.4, -0.3], \n [-0.2, 0.5, 0.2]])\ntest_w_h_o = np.array([[0.3, -0.1]])\n\nclass TestMethods(unittest.TestCase):\n \n ##########\n # Unit tests for data loading\n ##########\n \n def test_data_path(self):\n # Test that file path to dataset has been unaltered\n self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')\n \n def test_data_loaded(self):\n # Test that data frame loaded\n self.assertTrue(isinstance(rides, pd.DataFrame))\n \n ##########\n # Unit tests for network functionality\n ##########\n\n def test_activation(self):\n network = NeuralNetwork(3, 2, 1, 0.5)\n # Test that the activation function is a sigmoid\n self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))\n\n def test_train(self):\n # Test that weights are updated correctly on training\n network = NeuralNetwork(3, 2, 1, 0.5)\n network.weights_input_to_hidden = test_w_i_h.copy()\n network.weights_hidden_to_output = test_w_h_o.copy()\n \n network.train(inputs, targets)\n self.assertTrue(np.allclose(network.weights_hidden_to_output, \n np.array([[ 0.37275328, -0.03172939]])))\n self.assertTrue(np.allclose(network.weights_input_to_hidden,\n np.array([[ 0.10562014, 0.39775194, -0.29887597],\n [-0.20185996, 0.50074398, 0.19962801]])))\n\n def test_run(self):\n # Test correctness of run method\n network = NeuralNetwork(3, 2, 1, 0.5)\n network.weights_input_to_hidden = test_w_i_h.copy()\n network.weights_hidden_to_output = test_w_h_o.copy()\n\n self.assertTrue(np.allclose(network.run(inputs), 0.09998924))\n\nsuite = unittest.TestLoader().loadTestsFromModule(TestMethods())\nunittest.TextTestRunner().run(suite)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
yingchi/fastai-notes
|
deeplearning1/nbs/try-resnet.ipynb
|
apache-2.0
|
[
"Try Resnet\nParas Setting",
"import sys, os\ncurrent_dir = os.getcwd()\nHOME_DIR = current_dir\nDATA_HOME_DIR = current_dir + '/data/redux/'\nprint(current_dir, DATA_HOME_DIR)\n\nfrom utils import *\n%matplotlib inline\n\nbatch_size = 64\nno_of_epochs = 3\npath = DATA_HOME_DIR\nmodel_path = path+'model'",
"Use Reset",
"import resnet50\nfrom resnet50 import Resnet50\n\n# omit the last dense layer, so that we don't have to do model.pop\n# before we finetune the model to suit our purpose\nrn0 = Resnet50(include_top=False).model\nrn0.output_shape[1:]",
"```py\nfrom utils.py\ndef get_classes(path):\n batches = get_batches(path+'train', shuffle=False, batch_size=1)\n val_batches = get_batches(path+'valid', shuffle=False, batch_size=1)\n test_batches = get_batches(path+'test', shuffle=False, batch_size=1)\n return (val_batches.classes, batches.classes, onehot(val_batches.classes),\n onehot(batches.classes), val_batches.filenames, batches.filenames, \n test_batches.filenames)\n```",
"batches = get_batches(path+'train', shuffle=False, batch_size=batch_size)\nval_batches = get_batches(path+'valid', batch_size=batch_size*2, shuffle=False)\n# labels are the one-hot encoded version of classes\n(val_classes, trn_classes, val_labels, trn_labels, val_filenames, filenames,\ntest_filenames) = get_classes(path)\n\nval_features = rn0.predict_generator(val_batches, val_batches.nb_sample)\n\ntrn_features = rn0.predict_generator(batches, batches.nb_sample)",
"py\ndef save_array(fname, arr):\n c=bcolz.carray(arr, rootdir=fname, mode='w')\n c.flush()",
"save_array(model_path + 'trn_rn0_conv.bc', trn_features)\nsave_array(model_path + 'val_rn0_conv.bc', val_features)\n\ntrn_features = load_array(model_path + 'trn_rn0_conv.bc')\nval_features = load_array(model_path + 'val_rn0_conv.bc')",
"FC net",
"def get_fc_layers(p):\n return [\n BatchNormalization(axis=1, input_shape=rn0.output_shape[1:]),\n Flatten(),\n Dropout(p),\n Dense(1024, activation='relu'),\n BatchNormalization(),\n Dropout(p/2),\n Dense(1024, activation='relu'),\n BatchNormalization(),\n Dropout(p),\n Dense(2, activation='softmax')\n ]\n\nmodel = Sequential(get_fc_layers(.5))\n\nmodel.compile(optimizer=Adam(), loss='categorical_crossentropy', metrics=['accuracy'])\n\nmodel.fit(trn_features, trn_labels, nb_epoch=2,\n batch_size=batch_size, validation_data=(val_features, val_labels))",
"Global average pooling",
"def get_ap_layers(p):\n return [\n GlobalAveragePooling2D(input_shape=rn0.output_shape[1:]),\n Dropout(p),\n Dense(2, activation='softmax')\n ]\n\nmodel = Sequential(get_ap_layers(0.2))\n\nmodel.compile(optimizer=Adam(), loss='categorical_crossentropy', metrics=['accuracy'])\n\nmodel.fit(trn_features, trn_labels, nb_epoch=3,\n batch_size=batch_size, validation_data=(val_features, val_labels))",
"Use Resnet Large",
"rn1 = Resnet50(include_top=False, size=(400, 400)).model\nrn1.output_shape[1:]\n\nbatches = get_batches(path+'train', shuffle=False, batch_size=batch_size,\n target_size=(400, 400))\nval_batches = get_batches(path+'valid', batch_size=batch_size*2, shuffle=False,\n target_size=(400, 400))\n# labels are the one-hot encoded version of classes\n(val_classes, trn_classes, val_labels, trn_labels, val_filenames, filenames,\ntest_filenames) = get_classes(path)\n\nval_features = rn1.predict_generator(val_batches, val_batches.nb_sample)\ntrn_features = rn1.predict_generator(val_batches, val_batches.nb_sample)\n\nsave_array(model_path + 'trn_rn1_conv.bc', trn_features)\nsave_array(model_path + 'val_rn1_conv.bc', val_features)\n\ntrn_features = load_array(model_path + 'trn_rn1_conv.bc')\nval_features = load_array(model_path + 'val_rn1_conv.bc')\n\ndef get_ap_layers(p):\n return [\n GlobalAveragePooling2D(input_shape=rn1.output_shape[1:]),\n Dropout(p),\n Dense(2, activation='softmax')\n ]\n\nmodel.fit(trn_features, trn_labels, nb_epoch=3,\n batch_size=batch_size, validation_data=(val_features, val_labels))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
karlstroetmann/Formal-Languages
|
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
|
gpl-2.0
|
[
"from IPython.core.display import HTML\nwith open('../../style.css', 'r') as file:\n css = file.read()\nHTML(css)",
"Implementing an Earley Parser\nA Grammar for Grammars\nEarley's algorithm has two inputs:\n- a grammar $G$ and\n- a string $s$.\nIt then checks whether the string $s$ can be parsed with the given grammar.\nIn order to input the grammar in a natural way, we first have to develop a parser for grammars.\nAn example grammar that we want to parse is stored in the file simple.g.",
"!type simple.g\n\n!cat simple.g",
"We use <span style=\"font-variant:small-caps;\">Antlr</span> to develop a parser for this Grammar.\nThe pure grammar to parse this type of grammar is stored in\nthe file Pure.g4.",
"!type Pure.g4\n\n!cat Pure.g4",
"The annotated grammar is stored in the file Grammar.g4.",
"!type Grammar.g4\n\n!cat -n Grammar.g4",
"We start by generating both scanner and parser.",
"!antlr4 -Dlanguage=Python3 Grammar.g4\n\nfrom GrammarLexer import GrammarLexer\nfrom GrammarParser import GrammarParser\nimport antlr4",
"The function parse_grammar takes a filename as its argument and returns the grammar that is stored in the given file. The grammar is represented as list of rules. Each rule is represented as a tuple. The example below will clarify this structure.",
"def parse_grammar(filename):\n input_stream = antlr4.FileStream(filename)\n lexer = GrammarLexer(input_stream)\n token_stream = antlr4.CommonTokenStream(lexer)\n parser = GrammarParser(token_stream)\n grammar = parser.start()\n return grammar.g\n\nparse_grammar('simple.g')",
"Earley's Algorithm\nGiven a context-free grammar $G = \\langle V, \\Sigma, R, S \\rangle$ and a string $s = x_1x_2 \\cdots x_n \\in \\Sigma^$ of length $n$, \nan Earley item* is a pair of the form\n$$\\langle A \\rightarrow \\alpha \\bullet \\beta, k \\rangle$$\nsuch that \n- $(A \\rightarrow \\alpha \\beta) \\in R\\quad$ and\n- $k \\in {0,1,\\cdots,n}$. \nThe class EarleyItem represents a single Earley item.\n- mVariable is the variable $A$,\n- mAlpha is $\\alpha$,\n- mBeta is $\\beta$, and\n- mIndex is $k$.\nSince we later have to store objects of class EarleyItem in sets, we have to implement the functions\n- __eq__,\n- __ne__,\n- __hash__.\nIt is easiest to implement __hash__ by first converting the object into a string. Hence we also\nimplement the function __repr__, that converts an EarleyItem into a string.",
"class EarleyItem():\n def __init__(self, variable, alpha, beta, index):\n self.mVariable = variable\n self.mAlpha = alpha\n self.mBeta = beta\n self.mIndex = index\n \n def __eq__(self, other):\n return isinstance(other, EarleyItem) and \\\n self.mVariable == other.mVariable and \\\n self.mAlpha == other.mAlpha and \\\n self.mBeta == other.mBeta and \\\n self.mIndex == other.mIndex\n \n def __ne__(self, other):\n return not self.__eq__(other)\n \n def __hash__(self):\n return hash(self.__repr__())\n \n def __repr__(self):\n alphaStr = ' '.join(self.mAlpha)\n betaStr = ' '.join(self.mBeta)\n return f'<{self.mVariable} → {alphaStr} • {betaStr}, {self.mIndex}>'",
"Given an Earley item self, the function isComplete checks, whether the Earley item self has the form\n$$\\langle A \\rightarrow \\alpha \\bullet, k \\rangle,$$\ni.e. whether the $\\bullet$ is at the end of the grammar rule.",
"def isComplete(self):\n return self.mBeta == ()\n\nEarleyItem.isComplete = isComplete\ndel isComplete",
"The function sameVar(self, C) checks, whether the item following the dot is the same as the variable \ngiven as argument, i.e. sameVar(self, C) returns True if self is an Earley item of the form\n$$\\langle A \\rightarrow \\alpha \\bullet C\\beta, k \\rangle.$$",
"def sameVar(self, C):\n return len(self.mBeta) > 0 and self.mBeta[0] == C\n\nEarleyItem.sameVar = sameVar\ndel sameVar",
"The function scan(self, t) checks, whether the item following the dot matches the token t, \ni.e. scan(self, t) returns True if self is an Earley item of the form\n$$\\langle A \\rightarrow \\alpha \\bullet t\\beta, k \\rangle.$$\nThe argument $t$ can either be the name of a token or a literal.",
"def scan(self, t):\n if len(self.mBeta) > 0:\n return self.mBeta[0] == t or self.mBeta[0] == \"'\" + t + \"'\"\n return False\n\nEarleyItem.scan = scan\ndel scan",
"Given an Earley item, this function returns the name of the variable following the dot. If there is no variable following the dot, the function returns None. The function can distinguish variables from token names because variable names consist only of lower case letters.",
"def nextVar(self):\n if len(self.mBeta) > 0:\n var = self.mBeta[0]\n if var[0] != \"'\" and var.islower():\n return var\n return None\n\nEarleyItem.nextVar = nextVar\ndel nextVar",
"The function moveDot(self) moves the $\\bullet$ in the Earley item self, where self has the form \n$$\\langle A \\rightarrow \\alpha \\bullet \\beta, k \\rangle$$\nover the next variable, token, or literal in $\\beta$. It assumes that $\\beta$ is not empty.",
"def moveDot(self):\n return EarleyItem(self.mVariable, \n self.mAlpha + (self.mBeta[0],), \n self.mBeta[1:], \n self.mIndex)\n\nEarleyItem.moveDot = moveDot\ndel moveDot",
"The class Grammar represents a context free grammar. It stores a list of the rules of the grammar.\nEach grammar rule of the form\n$$ a \\rightarrow \\beta $$\nis stored as the tuple $(a,) + \\beta$. The start symbol is assumed to be the variable on the left hand side of\nthe first rule. To distinguish syntactical variables from tokens, variables contain only lower case letters,\nwhile tokens either contain only upper case letters or they start and end with a single quote character \"'\".",
"class Grammar():\n def __init__(self, Rules):\n self.mRules = Rules ",
"The function startItem returns the Earley item\n$$ \\langle\\hat{S} \\rightarrow \\bullet S, 0\\rangle $$\nwhere $S$ is the start variable of the given grammar and $\\hat{S}$ is a new variable.",
"def startItem(self):\n return EarleyItem('Start', (), (self.startVar(),), 0)\n\nGrammar.startItem = startItem\ndel startItem",
"The function finishItem returns the Earley item\n$$ \\langle\\hat{S} \\rightarrow S \\bullet, 0\\rangle $$\nwhere $S$ is the start variable of the given grammar and $\\hat{S}$ is a new variable.",
"def finishItem(self):\n return EarleyItem('Start', (self.startVar(),), (), 0)\n\nGrammar.finishItem = finishItem\ndel finishItem",
"The function startVar returns the start variable of the grammar. It is assumed that\nthe first rule grammar starts with the start variable of the grammar.",
"def startVar(self):\n return self.mRules[0][0]\n\nGrammar.startVar = startVar\ndel startVar",
"The function toString creates a readable presentation of the grammar rules.",
"def toString(self):\n result = ''\n for head, *body in self.mRules:\n result += f'{head}: {body};\\n'\n return result\n\nGrammar.__str__ = toString\ndel toString",
"The class EarleyParser implements the parsing algorithm of Jay Earley.\nThe class maintains the following member variables:\n- mGrammar is the grammar that is used to parse the given token string.\n- mString is the list of tokens and literals that has to be parsed.\nAs a hack, the first element of this list in None.\n Therefore, mString[i] is the $i^\\textrm{th}$ token.\n- mStateList is a list of sets of Earley items. If $n$ is the length of the given token string\n (excluding the first element None), then $Q_i = \\texttt{mStateList}[i]$. \n The idea is that the set $Q_i$ is the set of those Earley items that the parser could be in \n when it has read the tokens mString[1], $\\cdots$, mString[n]. $Q_0$ is initialized as follows:\n $$ Q_0 = \\bigl{\\langle\\hat{S} \\rightarrow \\bullet S, 0\\rangle\\bigr}. $$\nThe Earley items are interpreted as follows: If we have\n$$ \\langle C \\rightarrow \\alpha \\bullet \\beta, k\\rangle \\in Q_i, $$\nthen we know the following:\n- After having read the tokens mString[:k+1] the parser tries to parse the variable $C$\n in the token string mString[k+1:].\n- After having read the token string mString[k+1:i+1] the parser has already recognized $\\alpha$\n and now needs to recognize $\\beta$ in the token string mString[i+1:] in order to parse the variable $C$.",
"class EarleyParser():\n def __init__(self, grammar, TokenList):\n self.mGrammar = grammar \n self.mString = [None] + TokenList # hack so mString[1] is the first token\n self.mStateList = [set() for i in range(len(TokenList)+1)] \n print('Grammar:\\n')\n print(self.mGrammar)\n print(f'Input: {self.mString}\\n')\n self.mStateList[0] = { self.mGrammar.startItem() }",
"The method parse implements Earley's algorithm. For all states \n$Q_1$, $\\cdots$, $Q_n$ we proceed as follows:\n- We apply the completion operation followed by the prediction operation.\n This is done until no more states are added to $Q_i$. \n(The inner while loop is not necessary if the grammar does not contain $\\varepsilon$-rules.)\n- Finally, the scanning operation is applied to $Q_i$. This operation adds\n items to the set $Q_{i+1}$.\nAfter $Q_i$ has been computed, we proceed to process $Q_{i+1}$.\nParsing is successful iff\n$$ \\langle\\hat{S} \\rightarrow S \\bullet, 0\\rangle \\in Q_n $$",
"def parse(self):\n \"run Earley's algorithm\"\n n = len(self.mString) - 1 # mString[0] = None\n for i in range(0, n+1):\n if i + 1 <= n:\n next_token = self.mString[i+1]\n else:\n next_token = 'EOF'\n print('_' * 80)\n print(f'next token = {next_token}')\n print('_' * 80)\n change = True\n while change:\n change = self.complete(i)\n change = self.predict(i) or change\n self.scan(i)\n # print states\n print(f'\\nQ{i}:')\n Qi = self.mStateList[i]\n for item in Qi: \n print(item)\n if i + 1 <= n:\n print(f'\\nQ{i+1}:')\n Qip1 = self.mStateList[i+1]\n for item in Qip1: \n print(item)\n if self.mGrammar.finishItem() in self.mStateList[-1]:\n print('Parsing successful!')\n else:\n print('Parsing failed!')\n\nEarleyParser.parse = parse\ndel parse",
"The method complete(self, i) applies the completion operation to the state $Q_i$:\nIf we have\n- $\\langle C \\rightarrow \\gamma \\bullet, j\\rangle \\in Q_i$ and\n- $\\langle A \\rightarrow \\beta \\bullet C \\delta, k\\rangle \\in Q_j$,\nthen the parser tried to parse the variable $C$ after having read mString[:j+1]\nand we know that \n$$ C \\Rightarrow^ \\texttt{mString[j+1:i+1]}, $$\ni.e. the parser has recognized $C$ after having read mString[j+1:i+1].\nTherefore the parser should proceed to recognize $\\delta$ in state $Q_i$.\nTherefore we add the Earley item* $\\langle A \\rightarrow \\beta C \\bullet \\delta,k\\rangle$ to the set $Q_i$:\n$$\\langle C \\rightarrow \\gamma \\bullet, j\\rangle \\in Q_i \\wedge\n \\langle A \\rightarrow \\beta \\bullet C \\delta, k\\rangle \\in Q_j \\;\\rightarrow\\;\n Q_i := Q_i \\cup \\bigl{ \\langle A \\rightarrow \\beta C \\bullet \\delta, k\\rangle \\bigr}\n$$",
"def complete(self, i):\n change = False\n added = True\n Qi = self.mStateList[i]\n while added:\n added = False\n newQi = set()\n for item in Qi:\n if item.isComplete():\n C = item.mVariable\n j = item.mIndex\n Qj = self.mStateList[j]\n for newItem in Qj:\n if newItem.sameVar(C):\n moved = newItem.moveDot()\n newQi.add(moved)\n if not (newQi <= Qi):\n change = True\n added = True\n print(\"completion:\")\n for newItem in newQi:\n if newItem not in Qi:\n print(f'{newItem} added to Q{i}')\n self.mStateList[i] |= newQi\n Qi = self.mStateList[i]\n return change\n \nEarleyParser.complete = complete\ndel complete",
"The method self.predict(i) applies the prediction operation to the state $Q_i$: \nIf $\\langle A \\rightarrow \\beta \\bullet C \\delta, k \\rangle \\in Q_j$, then\nthe parser tries to recognize $C\\delta$ after having read mString[:j+1]. To this end\nit has to parse $C$ in the string mString[j+1:].\nTherefore, if $C \\rightarrow \\gamma$ is a rule of our grammar,\nwe add the Earley item $\\langle C \\rightarrow \\bullet \\gamma, j\\rangle$ to the set $Q_j$:\n$$ \\langle A \\rightarrow \\beta \\bullet C \\delta, k\\rangle \\in Q_j \n \\wedge (C \\rightarrow \\gamma) \\in R \n \\;\\rightarrow\\;\n Q_j := Q_j \\cup\\bigl{ \\langle C \\rightarrow \\bullet\\gamma, j\\rangle\\bigr}.\n$$\nAs the right hand side $\\gamma$ might start with a variable, the function uses a fix point iteration\nuntil no more Earley items are added to $Q_j$.",
"def predict(self, i):\n change = False\n added = True\n Qi = self.mStateList[i]\n while added:\n added = False\n newQi = set()\n for item in Qi:\n c = item.nextVar()\n if c != None:\n for rule in self.mGrammar.mRules:\n if c == rule[0]:\n newQi.add(EarleyItem(c, (), rule[1:], i))\n if not (newQi <= Qi):\n change = True\n added = True\n print(\"prediction:\")\n for newItem in newQi:\n if newItem not in Qi:\n print(f'{newItem} added to Q{i}')\n self.mStateList[i] |= newQi\n Qi = self.mStateList[i]\n return change\n\nEarleyParser.predict = predict\ndel predict",
"The function self.scan(i) applies the scanning operation to the state $Q_i$.\nIf $\\langle A \\rightarrow \\beta \\bullet a \\gamma, k\\rangle \\in Q_i$ and $a$ is a token,\nthen the parser tries to recognize the right hand side of the grammar rule\n$$ A \\rightarrow \\beta a \\gamma$$ \nand after having read mString[k+1:i+1] it has already recognized $\\beta$.\nIf we now have mString[i+1] == a, then the parser still has to recognize $\\gamma$ in mString[i+2:].\nTherefore, the Earley object $\\langle A \\rightarrow \\beta a \\bullet \\gamma, k\\rangle$ is added to\nthe set $Q_{i+1}$:\n$$\\langle A \\rightarrow \\beta \\bullet a \\gamma, k\\rangle \\in Q_i \\wedge x_{i+1} = a\n \\;\\rightarrow\\;\n Q_{i+1} := Q_{i+1} \\cup \\bigl{ \\langle A \\rightarrow \\beta a \\bullet \\gamma, k\\rangle \\bigr}\n$$",
"def scan(self, i):\n Qi = self.mStateList[i]\n n = len(self.mString) - 1 # remember mStateList[0] == None\n if i + 1 <= n:\n a = self.mString[i+1]\n for item in Qi:\n if item.scan(a):\n self.mStateList[i+1].add(item.moveDot())\n print('scanning:')\n print(f'{item.moveDot()} added to Q{i+1}')\n\nEarleyParser.scan = scan\ndel scan\n\nimport re",
"The function tokenize transforms the string s into a list of tokens. See below for an example.",
"def tokenize(s):\n '''Transform the string s into a list of tokens. The string s\n is supposed to represent an arithmetic expression.\n '''\n lexSpec = r'''([ \\t]+) | # blanks and tabs\n ([1-9][0-9]*|0) | # number\n ([()]) | # parentheses \n ([-+*/]) | # arithmetical operators\n (.) # unrecognized character\n '''\n tokenList = re.findall(lexSpec, s, re.VERBOSE)\n result = []\n for ws, number, parenthesis, operator, error in tokenList:\n if ws: # skip blanks and tabs\n continue\n elif number:\n result += [ 'NUMBER' ]\n elif parenthesis:\n result += [ parenthesis ]\n elif operator:\n result += [ operator ]\n else:\n result += [ f'ERROR({error})']\n return result\n\ntokenize('1 + 2 * 3')",
"The function test takes two arguments.\n- file is the name of a file containing a grammar,\n- word is a string that should be parsed.\nword is first tokenized. Then the resulting token list is parsed using Earley's algorithm.",
"def test(file, word): \n Rules = parse_grammar(file)\n grammar = Grammar(Rules)\n TokenList = tokenize(word)\n ep = EarleyParser(grammar, TokenList)\n ep.parse()\n\ntest('simple.g', '1 * 2 + 3')",
"The command below cleans the directory.",
"!del GrammarLexer.* GrammarParser.* Grammar.tokens GrammarListener.py Grammar.interp\n!rmdir /S /Q __pycache__\n\n!dir /B\n\n!rm GrammarLexer.* GrammarParser.* Grammar.tokens GrammarListener.py Grammar.interp\n!rm -r __pycache__\n\n!ls"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
kubeflow/code-intelligence
|
Label_Microservice/notebooks/Training_Pipeline.ipynb
|
mit
|
[
"Define Kubeflow Pipelines to train a model\nCreate entry point using fairing\nKubeflow Fairing is a Python package that makes training and deploying machine learning models on Kubeflow easier.\nHere, we use the preprocessor in Kubeflow Fairing to convert a notebook to be a Python script and create an entry point for that script. After preprocessing the notebook, we can call the command in the command line like the following to run\nPython\n$ python repo_mlp.py train",
"import sys\nsys.path.append(\"../../py\")\n\nimport os\nfrom fairing.preprocessors.converted_notebook import ConvertNotebookPreprocessorWithFire\n\npreprocessor = ConvertNotebookPreprocessorWithFire('IssuesLoader', notebook_file='issues_loader.ipynb')\n\nif not preprocessor.input_files:\n preprocessor.input_files = set()\ninput_files = ['../../py/code_intelligence/embeddings.py',\n '../../py/code_intelligence/inference.py',\n '../../py/label_microservice/repo_config.py']\npreprocessor.input_files = set([os.path.normpath(f) for f in input_files])\npreprocessor.preprocess()\n\npreprocessor = ConvertNotebookPreprocessorWithFire('RepoMLP', notebook_file='repo_mlp.ipynb')\n\nif not preprocessor.input_files:\n preprocessor.input_files = set()\ninput_files = ['../../py/label_microservice/mlp.py',\n '../../py/label_microservice/repo_config.py']\npreprocessor.input_files = set([os.path.normpath(f) for f in input_files])\npreprocessor.preprocess()",
"Use Fairing to build docker image",
"import os\nimport sys\nimport fairing\nfrom fairing.builders import append\nfrom fairing.builders import cluster\nfrom fairing.deployers import job\nfrom fairing.preprocessors.converted_notebook import ConvertNotebookPreprocessorWithFire\n\n# Setting up google container repositories (GCR) for storing output containers\n# You can use any docker container registry istead of GCR\nGCP_PROJECT = fairing.cloud.gcp.guess_project_name()\nprint(GCP_PROJECT)\nDOCKER_REGISTRY = 'gcr.io/{}/training'.format(GCP_PROJECT)\nprint(DOCKER_REGISTRY)\nPY_VERSION = \".\".join([str(x) for x in sys.version_info[0:3]])\nBASE_IMAGE = 'python:{}'.format(PY_VERSION)\n# ucan use Dockerfile in this repo to build and use the base_image\nbase_image = 'gcr.io/issue-label-bot-dev/ml-gpu-lite-py3.6'",
"Build Docker image\nWe use builders in Kubeflow Fairing to build docker images. We use ClusterBuilder to builds a docker image in a Kubernetes cluster and AppendBuilder to append a new layer tarball. We also include preprocessor as a parameter to send the processed inputs to docker build.",
"preprocessor = ConvertNotebookPreprocessorWithFire('RepoMLP', notebook_file='repo_mlp.ipynb')\n\nif not preprocessor.input_files:\n preprocessor.input_files = set()\ninput_files = ['../../py/label_microservice/mlp.py',\n '../../py/label_microservice/repo_config.py',\n '../../py/code_intelligence/embeddings.py',\n '../../py/code_intelligence/inference.py',\n 'issues_loader.py']\npreprocessor.input_files = set([os.path.normpath(f) for f in input_files])\npreprocessor.preprocess()\n\ncluster_builder = cluster.cluster.ClusterBuilder(registry=DOCKER_REGISTRY,\n base_image=base_image,\n namespace='chunhsiang',\n preprocessor=preprocessor,\n pod_spec_mutators=[fairing.cloud.gcp.add_gcp_credentials_if_exists],\n context_source=cluster.gcs_context.GCSContextSource())\ncluster_builder.build()\n\nbuilder = append.append.AppendBuilder(registry=DOCKER_REGISTRY,\n base_image=cluster_builder.image_tag,\n preprocessor=preprocessor)\nbuilder.build()",
"Build pipeline\nKubeflow Pipelines builds reusable end-to-end machine learning workflows.\nDefine the pipeline as a Python function. \"@kfp.dsl.pipeline\" is a required decoration including name and description properties.\nWe define two steps for our training pipelines, including scrapping issues and training model, both of which will be executed in our built image from Kubeflow Fairing. Also, we use GPU and add GCP credentials to the pipelines.",
"import kfp\nimport kfp.components as comp\nimport kfp.gcp as gcp\nimport kfp.dsl as dsl\nimport kfp.compiler as compiler\n\n# need to modify it if build a new one\ntarget_image = 'gcr.io/issue-label-bot-dev/training/fairing-job:5350A3D3'\n\n@dsl.pipeline(\n name='Training pipeline',\n description='A pipeline that loads embeddings and trains a model for a github repo.'\n)\ndef train_pipeline(owner, repo):\n scrape_op = dsl.ContainerOp(\n name='scrape issues',\n image=target_image,\n command=['python', 'issues_loader.py', 'save_issue_embeddings', f'--owner={owner}', f'--repo={repo}'],\n ).set_gpu_limit(1).apply(\n gcp.use_gcp_secret('user-gcp-sa'),\n )\n scrape_op.container.working_dir = '/app'\n\n train_op = dsl.ContainerOp(\n name='train',\n image=target_image,\n command=['python', 'repo_mlp.py', 'train', f'--owner={owner}', f'--repo={repo}'],\n ).set_gpu_limit(1).apply(\n gcp.use_gcp_secret('user-gcp-sa'),\n )\n train_op.container.working_dir = '/app'\n train_op.after(scrape_op)",
"Compile the pipeline\nWe compile our pipeline to an intermediate representation, which is a YAML file compressed in a zip file.",
"pipeline_func = train_pipeline\npipeline_filename = pipeline_func.__name__ + '.pipeline.zip'\ncompiler.Compiler().compile(pipeline_func, pipeline_filename)",
"Submit the pipeline for execution\nWe upload our created pipeline, the zip file, and run it. Then, we can see the pipeline and experiments on Kubeflow UI.",
"EXPERIMENT_NAME = 'TrainModel'\n\nclient = kfp.Client()\nexperiment = client.create_experiment(EXPERIMENT_NAME)\n\n#Specify pipeline argument values\narguments = {'owner': '', 'repo': ''}\n\n#Submit a pipeline run\nrun_name = pipeline_func.__name__ + ' run'\nrun_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
tensorflow/docs-l10n
|
site/en-snapshot/lite/performance/post_training_float16_quant.ipynb
|
apache-2.0
|
[
"Copyright 2019 The TensorFlow Authors.",
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"Post-training float16 quantization\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/lite/performance/post_training_float16_quant\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/performance/post_training_float16_quant.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/performance/post_training_float16_quant.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/tensorflow/tensorflow/lite/g3doc/performance/post_training_float16_quant.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>\n\nOverview\nTensorFlow Lite now supports\nconverting weights to 16-bit floating point values during model conversion from TensorFlow to TensorFlow Lite's flat buffer format. This results in a 2x reduction in model size. Some hardware, like GPUs, can compute natively in this reduced precision arithmetic, realizing a speedup over traditional floating point execution. The Tensorflow Lite GPU delegate can be configured to run in this way. However, a model converted to float16 weights can still run on the CPU without additional modification: the float16 weights are upsampled to float32 prior to the first inference. This permits a significant reduction in model size in exchange for a minimal impacts to latency and accuracy.\nIn this tutorial, you train an MNIST model from scratch, check its accuracy in TensorFlow, and then convert the model into a Tensorflow Lite flatbuffer\nwith float16 quantization. Finally, check the accuracy of the converted model and compare it to the original float32 model.\nBuild an MNIST model\nSetup",
"import logging\nlogging.getLogger(\"tensorflow\").setLevel(logging.DEBUG)\n\nimport tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nimport pathlib",
"Train and export the model",
"# Load MNIST dataset\nmnist = keras.datasets.mnist\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\n# Normalize the input image so that each pixel value is between 0 to 1.\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\n# Define the model architecture\nmodel = keras.Sequential([\n keras.layers.InputLayer(input_shape=(28, 28)),\n keras.layers.Reshape(target_shape=(28, 28, 1)),\n keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation=tf.nn.relu),\n keras.layers.MaxPooling2D(pool_size=(2, 2)),\n keras.layers.Flatten(),\n keras.layers.Dense(10)\n])\n\n# Train the digit classification model\nmodel.compile(optimizer='adam',\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\nmodel.fit(\n train_images,\n train_labels,\n epochs=1,\n validation_data=(test_images, test_labels)\n)",
"For the example, you trained the model for just a single epoch, so it only trains to ~96% accuracy.\nConvert to a TensorFlow Lite model\nUsing the TensorFlow Lite Converter, you can now convert the trained model into a TensorFlow Lite model.\nNow load the model using the TFLiteConverter:",
"converter = tf.lite.TFLiteConverter.from_keras_model(model)\ntflite_model = converter.convert()",
"Write it out to a .tflite file:",
"tflite_models_dir = pathlib.Path(\"/tmp/mnist_tflite_models/\")\ntflite_models_dir.mkdir(exist_ok=True, parents=True)\n\ntflite_model_file = tflite_models_dir/\"mnist_model.tflite\"\ntflite_model_file.write_bytes(tflite_model)",
"To instead quantize the model to float16 on export, first set the optimizations flag to use default optimizations. Then specify that float16 is the supported type on the target platform:",
"converter.optimizations = [tf.lite.Optimize.DEFAULT]\nconverter.target_spec.supported_types = [tf.float16]",
"Finally, convert the model like usual. Note, by default the converted model will still use float input and outputs for invocation convenience.",
"tflite_fp16_model = converter.convert()\ntflite_model_fp16_file = tflite_models_dir/\"mnist_model_quant_f16.tflite\"\ntflite_model_fp16_file.write_bytes(tflite_fp16_model)",
"Note how the resulting file is approximately 1/2 the size.",
"!ls -lh {tflite_models_dir}",
"Run the TensorFlow Lite models\nRun the TensorFlow Lite model using the Python TensorFlow Lite Interpreter.\nLoad the model into the interpreters",
"interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))\ninterpreter.allocate_tensors()\n\ninterpreter_fp16 = tf.lite.Interpreter(model_path=str(tflite_model_fp16_file))\ninterpreter_fp16.allocate_tensors()",
"Test the models on one image",
"test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)\n\ninput_index = interpreter.get_input_details()[0][\"index\"]\noutput_index = interpreter.get_output_details()[0][\"index\"]\n\ninterpreter.set_tensor(input_index, test_image)\ninterpreter.invoke()\npredictions = interpreter.get_tensor(output_index)\n\nimport matplotlib.pylab as plt\n\nplt.imshow(test_images[0])\ntemplate = \"True:{true}, predicted:{predict}\"\n_ = plt.title(template.format(true= str(test_labels[0]),\n predict=str(np.argmax(predictions[0]))))\nplt.grid(False)\n\ntest_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)\n\ninput_index = interpreter_fp16.get_input_details()[0][\"index\"]\noutput_index = interpreter_fp16.get_output_details()[0][\"index\"]\n\ninterpreter_fp16.set_tensor(input_index, test_image)\ninterpreter_fp16.invoke()\npredictions = interpreter_fp16.get_tensor(output_index)\n\nplt.imshow(test_images[0])\ntemplate = \"True:{true}, predicted:{predict}\"\n_ = plt.title(template.format(true= str(test_labels[0]),\n predict=str(np.argmax(predictions[0]))))\nplt.grid(False)",
"Evaluate the models",
"# A helper function to evaluate the TF Lite model using \"test\" dataset.\ndef evaluate_model(interpreter):\n input_index = interpreter.get_input_details()[0][\"index\"]\n output_index = interpreter.get_output_details()[0][\"index\"]\n\n # Run predictions on every image in the \"test\" dataset.\n prediction_digits = []\n for test_image in test_images:\n # Pre-processing: add batch dimension and convert to float32 to match with\n # the model's input data format.\n test_image = np.expand_dims(test_image, axis=0).astype(np.float32)\n interpreter.set_tensor(input_index, test_image)\n\n # Run inference.\n interpreter.invoke()\n\n # Post-processing: remove batch dimension and find the digit with highest\n # probability.\n output = interpreter.tensor(output_index)\n digit = np.argmax(output()[0])\n prediction_digits.append(digit)\n\n # Compare prediction results with ground truth labels to calculate accuracy.\n accurate_count = 0\n for index in range(len(prediction_digits)):\n if prediction_digits[index] == test_labels[index]:\n accurate_count += 1\n accuracy = accurate_count * 1.0 / len(prediction_digits)\n\n return accuracy\n\nprint(evaluate_model(interpreter))",
"Repeat the evaluation on the float16 quantized model to obtain:",
"# NOTE: Colab runs on server CPUs. At the time of writing this, TensorFlow Lite\n# doesn't have super optimized server CPU kernels. For this reason this may be\n# slower than the above float interpreter. But for mobile CPUs, considerable\n# speedup can be observed.\nprint(evaluate_model(interpreter_fp16))",
"In this example, you have quantized a model to float16 with no difference in the accuracy.\nIt's also possible to evaluate the fp16 quantized model on the GPU. To perform all arithmetic with the reduced precision values, be sure to create the TfLiteGPUDelegateOptions struct in your app and set precision_loss_allowed to 1, like this:\n//Prepare GPU delegate.\nconst TfLiteGpuDelegateOptions options = {\n .metadata = NULL,\n .compile_options = {\n .precision_loss_allowed = 1, // FP16\n .preferred_gl_object_type = TFLITE_GL_OBJECT_TYPE_FASTEST,\n .dynamic_batch_enabled = 0, // Not fully functional yet\n },\n};\nDetailed documentation on the TFLite GPU delegate and how to use it in your application can be found here"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ajgpitch/qutip-notebooks
|
examples/spin-chain-model.ipynb
|
lgpl-3.0
|
[
"QuTiP example: Physical implementation of Spin Chain Qubit model\nAuthor: Anubhav Vardhan (anubhavvardhan@gmail.com)\nNumerical simulation added by Boxi Li (etamin1201@gmail.com)\nFor more information about QuTiP see http://qutip.org",
"%matplotlib inline\n\nfrom qutip.qip.circuit import QubitCircuit\nfrom qutip.qip.operations import gate_sequence_product\nimport numpy as np",
"If your qutip version is lower than 4.4.1 please run the following cell",
"from qutip.qip.models.spinchain import CircularSpinChain\nfrom qutip.qip.models.spinchain import LinearSpinChain",
"Otherwise please run this cell",
"from qutip.qip.device import CircularSpinChain, LinearSpinChain\nfrom qutip.qip.noise import RandomNoise",
"Hamiltonian:\n$\\displaystyle H = - \\frac{1}{2}\\sum_n^N h_n \\sigma_z(n) - \\frac{1}{2} \\sum_n^{N-1} [ J_x^{(n)} \\sigma_x(n) \\sigma_x(n+1) + J_y^{(n)} \\sigma_y(n) \\sigma_y(n+1) +J_z^{(n)} \\sigma_z(n) \\sigma_z(n+1)]$\nThe linear and circular spin chain models employing the nearest neighbor interaction can be implemented using the SpinChain class.\nCircuit Setup",
"N = 3\nqc = QubitCircuit(N)\n\nqc.add_gate(\"CNOT\", targets=[0], controls=[2])",
"The non-adjacent interactions are broken into a series of adjacent ones by the program automatically.",
"U_ideal = gate_sequence_product(qc.propagators())\n\nU_ideal",
"Circular Spin Chain Model Implementation",
"p1 = CircularSpinChain(N, correct_global_phase=True)\n\nU_list = p1.run(qc)\n\nU_physical = gate_sequence_product(U_list)\n\nU_physical.tidyup(atol=1e-5)\n\n(U_ideal - U_physical).norm()",
"The results obtained from the physical implementation agree with the ideal result.",
"p1.qc0.gates",
"The gates are first convert to gates with adjacent interactions moving in the direction with the least number of qubits in between.",
"p1.qc1.gates",
"They are then converted into the basis [ISWAP, RX, RZ]",
"p1.qc2.gates",
"The time for each applied gate:",
"p1.get_full_tlist()",
"The pulse can be plotted as:",
"p1.plot_pulses();",
"Linear Spin Chain Model Implementation",
"p2 = LinearSpinChain(N, correct_global_phase=True)\n\nU_list = p2.run(qc)\n\nU_physical = gate_sequence_product(U_list)\n\nU_physical.tidyup(atol=1e-5)\n\n(U_ideal - U_physical).norm()",
"The results obtained from the physical implementation agree with the ideal result.",
"p2.qc0.gates",
"The gates are first convert to gates with adjacent interactions moving in the direction with the least number of qubits in between.",
"p2.qc1.gates",
"They are then converted into the basis [ISWAP, RX, RZ]",
"p2.qc2.gates",
"The time for each applied gate:",
"p2.get_full_tlist()",
"The pulse can be plotted as:",
"p2.plot_pulses();",
"Numerical simulation\nFrom QuTiP 4.5, we also add the possibility to allow numerical simulation of SpinChain-based quantum computing. One needs only to add an option analytical=False in run_state to use one of the QuTiP solvers to simulate the state evolution instead of direct matrix product. Under numerical simulation, one can go beyond simulation with perfect gate operations. All the noise defined for the class Processor can also be used for SpinChain here.",
"from qutip import basis, fidelity\nN = 1\nplus_state = (basis(2,0) + basis(2,1)).unit()\n\nqc = QubitCircuit(N=N)\nqc.add_gate(\"SNOT\", targets=0)\nprocessor = LinearSpinChain(N=N)\nprocessor.load_circuit(qc)\nend_state = processor.run_state(init_state=basis(2, 0), analytical=False).states[-1]\nfidelity(end_state, plus_state)\n\nprocessor.add_noise(RandomNoise(rand_gen=np.random.normal, dt=0.1, loc=0.1, scale=0.2))\nend_state = processor.run_state(init_state=basis(2, 0), analytical=False).states[-1]\nfidelity(end_state, plus_state)",
"As the control noise is coherent noise, the result of this noise is still a pure state. Therefore, we can visualize it on a Bloch sphere.",
"from qutip.bloch import Bloch\nb = Bloch()\nb.add_states([end_state, plus_state])\nb.make_sphere()",
"Software versions:",
"from qutip.ipynbtools import version_table\nversion_table()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
antoniomezzacapo/qiskit-tutorial
|
community/aqua/optimization/maxcut.ipynb
|
apache-2.0
|
[
"Using Qiskit Aqua for maxcut problems\nThis Qiskit Aqua Optimization notebook demonstrates how to use the VQE quantum algorithm to compute the max cut of a given graph. \nThe problem is defined as follows. Given a graph $G = (V,E)$ with weights $w_{ij}$ on the edges, we are looking for a subset $S \\subseteq V$ such that $\\sum_{(i,j) \\in E : i \\in S, j \\not\\in S} w_{ij}$ is maximized.\nThe graph provided as an input is used first to generate an Ising Hamiltonian, which is then passed as an input to VQE. As a reference, this notebook also computes the max cut using the Exact Eigensolver classical algorithm and the solver embedded in the commercial non-quantum IBM CPLEX product (if it is available in the system and the user has followed the necessary configuration steps in order for Qiskit Aqua to find it). Please refer to the Qiskit Aqua Optimization documentation for installation and configuration details for CPLEX.",
"from qiskit_aqua import Operator, run_algorithm, get_algorithm_instance\nfrom qiskit_aqua.input import get_input_instance\nfrom qiskit_aqua.translators.ising import maxcut\nimport numpy as np",
"Here an Operator instance is created for our Hamiltonian. In this case the paulis are from an Ising Hamiltonian translated from the max cut problem. We load a small sample instance of the maxcut problem.",
"w = maxcut.parse_gset_format('sample.maxcut')\nqubitOp, offset = maxcut.get_maxcut_qubitops(w)\nalgo_input = get_input_instance('EnergyInput')\nalgo_input.qubit_op = qubitOp",
"We also offer a function to generate a random graph as a input.",
"if True:\n np.random.seed(8123179)\n w = maxcut.random_graph(4, edge_prob=0.5, weight_range=10)\n qubitOp, offset = maxcut.get_maxcut_qubitops(w)\n algo_input.qubit_op = qubitOp\nprint(w)",
"Here we test for the presence of algorithms we want to use in this notebook. If Aqua is installed correctly ExactEigensolver and VQE will always be found. CPLEX.Ising is dependent on IBM CPLEX being installed (see introduction above). CPLEX is not required but if installed then this notebook will demonstrate the CPLEX.Ising algorithm , that uses CPLEX, to compute maxcut as well.",
"to_be_tested_algos = ['ExactEigensolver', 'CPLEX.Ising', 'VQE']\noperational_algos = []\nfor algo in to_be_tested_algos:\n try:\n get_algorithm_instance(algo)\n operational_algos.append(algo)\n except:\n print(\"{} is unavailable and will be skipped.\".format(algo))\nprint(operational_algos)",
"We can now use the Operator without regard to how it was created. First we need to prepare the configuration params to invoke the algorithm. Here we will use the ExactEigensolver first to return the smallest eigenvalue. Backend is not required since this is computed classically not using quantum computation. We then add in the qubitOp Operator in dictionary format. Now the complete params can be passed to the algorithm and run. The result is a dictionary.",
"if 'ExactEigensolver' not in operational_algos:\n print(\"ExactEigensolver is not in operational algorithms.\")\nelse:\n algorithm_cfg = {\n 'name': 'ExactEigensolver',\n }\n\n params = {\n 'problem': {'name': 'ising'},\n 'algorithm': algorithm_cfg\n }\n result = run_algorithm(params,algo_input)\n # print('objective function:', maxcut.maxcut_obj(result, offset))\n x = maxcut.sample_most_likely(result['eigvecs'][0])\n print('energy:', result['energy'])\n print('maxcut objective:', result['energy'] + offset)\n print('solution:', maxcut.get_graph_solution(x))\n print('solution objective:', maxcut.maxcut_value(x, w))",
"Note: IBM CPLEX is an optional installation addition for Aqua. If installed then the Aqua CPLEX.Ising algorithm will be able to be used. If not, then solving this problem using this particular algorithm will simply be skipped. \nWe change the configuration parameters to solve it with the CPLEX backend. The CPLEX backend can deal with a particular type of Hamiltonian called Ising Hamiltonian, which consists of only Pauli Z at most second order and often for combinatorial optimization problems that can be formulated as quadratic unconstrained binary optimization problems, such as the max-cut problem.\nNote that for a maxcut problem, since we are computing a bipartition of the graph, every binary vector $x$ and its complement (i.e., the vector $y$ such that $y_j = 1 - x_j$ for all $j$) represent exactly the same solution, and will have the same objective function value. Different solution methods may return solutions that look different, but in fact have the same objective function value.",
"if 'CPLEX.Ising' not in operational_algos:\n print(\"CPLEX.Ising is not in operational algorithms.\")\nelse:\n algorithm_cfg = {\n 'name': 'CPLEX.Ising',\n 'display': 0\n }\n\n params = {\n 'problem': {'name': 'ising'},\n 'algorithm': algorithm_cfg\n }\n\n result = run_algorithm(params, algo_input)\n\n x_dict = result['x_sol']\n print('energy:', result['energy'])\n print('time:', result['eval_time'])\n print('maxcut objective:', result['energy'] + offset)\n x = np.array([x_dict[i] for i in sorted(x_dict.keys())])\n print('solution:', maxcut.get_graph_solution(x))\n print('solution objective:', maxcut.maxcut_value(x, w))",
"Now we want VQE and so change it and add its other configuration parameters. VQE also needs and optimizer and variational form. While we can omit them from the dictionary, such that defaults are used, here we specify them explicitly so we can set their parameters as we desire.",
"if 'VQE' not in operational_algos:\n print(\"VQE is not in operational algorithms.\")\nelse:\n algorithm_cfg = {\n 'name': 'VQE',\n 'operator_mode': 'matrix'\n }\n\n optimizer_cfg = {\n 'name': 'L_BFGS_B',\n 'maxfun': 6000\n }\n\n var_form_cfg = {\n 'name': 'RYRZ',\n 'depth': 3,\n 'entanglement': 'linear'\n }\n\n params = {\n 'problem': {'name': 'ising'},\n 'algorithm': algorithm_cfg,\n 'optimizer': optimizer_cfg,\n 'variational_form': var_form_cfg,\n 'backend': {'name': 'statevector_simulator'}\n }\n\n result = run_algorithm(params,algo_input)\n\n x = maxcut.sample_most_likely(result['eigvecs'][0])\n print('energy:', result['energy'])\n print('time:', result['eval_time'])\n print('maxcut objective:', result['energy'] + offset)\n print('solution:', maxcut.get_graph_solution(x))\n print('solution objective:', maxcut.maxcut_value(x, w))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
bhargavvader/pycobra
|
docs/notebooks/pycobra.ipynb
|
mit
|
[
"pycobra and scikit-learn\nThis notebook demonstrates pycobras integration with the scikit-learn ecosystem.\nWe will also give an example of pycobra's performance on some real world data-sets.",
"from pycobra.cobra import Cobra\nfrom pycobra.ewa import Ewa\nfrom pycobra.diagnostics import Diagnostics\nfrom pycobra.visualisation import Visualisation\nimport numpy as np\n%matplotlib inline",
"Let's set up a synthetic data-set just to show that the COBRA estimator is scikit-learn compatible.",
"# setting up our random data-set\nrng = np.random.RandomState(1)\n\n# D1 = train machines; D2 = create COBRA; D3 = calibrate epsilon, alpha; D4 = testing\nn_features = 20\nD1, D2, D3, D4 = 200, 200, 200, 200\nD = D1 + D2 + D3 + D4\nX = rng.uniform(-1, 1, D * n_features).reshape(D, n_features)\nY = np.power(X[:,1], 2) + np.power(X[:,3], 3) + np.exp(X[:,10]) \n# Y = np.power(X[:,0], 2) + np.power(X[:,1], 3)\n\n# training data-set\nX_train = X[:D1 + D2]\nX_test = X[D1 + D2 + D3:D1 + D2 + D3 + D4]\nX_eps = X[D1 + D2:D1 + D2 + D3]\n# for testing\nY_train = Y[:D1 + D2]\nY_test = Y[D1 + D2 + D3:D1 + D2 + D3 + D4]\nY_eps = Y[D1 + D2:D1 + D2 + D3]",
"Similar to other scikit-learn estimators, we set up our machine by creating an object and then fitting it. \nSince we are not passing an Epsilon value, we pass data to find an optimal epsilon value while instantiating our object. The optimal epsilon is found through the scikit-learn CVGridSearch. The grid_points parameter decides how many possible epsilon values must be traversed.",
"cobra = Cobra()\n\ncobra.set_epsilon(X_epsilon=X_eps, y_epsilon=Y_eps, grid_points=5)\n\ncobra.epsilon\n\ncobra.fit(X_train, Y_train)",
"We now see if our object can fit into the scikit-learn pipeline and GridSearch - and it can!",
"from sklearn.utils.estimator_checks import check_estimator\ncheck_estimator(Cobra) #passes",
"Exponentially Weighted Average Aggregate\nLet us also demonstrate the EWA predictor. You can read more about it over here in the paper by A. Dalalyan and A. B. Tsybakov.",
"ewa = Ewa()\n\newa.set_beta(X_beta=X_eps, y_beta=Y_eps)",
"If we fit EWA without passing beta, we perform a CV to find the optimal beta.",
"ewa.fit(X_train, Y_train)\n\ncheck_estimator(Ewa) #passes",
"EWA assigns weights to each machine based on it's MSE. We can check the weights of each machine with the plot_machine_weights method.",
"ewa.plot_machine_weights()\n\newa.machine_weight_",
"Like the Cobra estimator, Ewa is also a scikit-learn compatible estimator. It also fits into the Visualisation class, like demonstrated in the notebook. \nPredicting?\nLike the other scikit-learn predictors, we estimate on data by simply using the predict() method.",
"query = X_test[0].reshape(1, -1)\n\ncobra.predict(query)\n\newa.predict(query)",
"Why pycobra?\nThere are scikit-learn estimators which already perform well in basic regression tasks - why use pycobra?\nThe Cobra estimator has the advantage of a theoretical bound on its performance - this means it is supposed to perform at least as well as the estimators used to create it, up to a remainder term which decays to zero. The Ewa estimator also benefits from similar bounds.\npycobra also lets you compare the scikit-learn estimators used in the aggregation - unlike the ensemble methods for regression which scikit-learn has, pycobra's algorithms is actually built on other scikit-learn like estimators. \npycobra for classification\npycobra also implements the classification algorithm as introduced by Mojirsheibani [1999] Combining Classifiers via Discretization, Journal of the American Statistical Association. \nClassifierCobra operates exactly as COBRA in the sense that data points are selected with respect to their closeness to the prediction of the new query point. Then, instead of forming a weighted average as COBRA, ClassifierCobra performs a majority vote to assign a label to the new point.",
"from sklearn import datasets\nfrom sklearn.metrics import accuracy_score\nbc = datasets.load_breast_cancer()\nX = bc.data[:-20]\ny = bc.target[:-20]\nX_test = bc.data[-20:]\ny_test = bc.target[-20:]\n\nfrom pycobra.classifiercobra import ClassifierCobra\ncheck_estimator(ClassifierCobra)\n\ncc = ClassifierCobra()\n\ncc.fit(X, y)\n\ncc.predict(X_test)",
"Let's see how it works in a practical case.",
"cc_diag = Diagnostics(cc, X_test, y_test)\n\ncc_diag.load_errors()\n\ncc_diag.machine_error",
"Quite well!\nReal-world datasets\nWe have demonstrated in the regression notebook how pycobra works on synthetic data-sets. Let's see pycobra in action on some scikit-learn regression datasets.",
"diabetes = datasets.load_diabetes()\n\ndiabetes_X_train = diabetes.data[:-40]\ndiabetes_X_test = diabetes.data[-20:]\n# part of the data to find an appropriate epsilon\ndiabetes_X_eps = diabetes.data[-40:-20]\n\ndiabetes_y_train = diabetes.target[:-40]\ndiabetes_y_test = diabetes.target[-20:]\ndiabetes_y_eps = diabetes.target[-40:-20]",
"We're unaware of what epsilon value to choose for our data-sets so by passing X_eps and y_eps we can get an idea of what might be a good epsilon value.",
"COBRA_diabetes = Cobra()\nCOBRA_diabetes.set_epsilon(X_epsilon=diabetes_X_eps, y_epsilon=diabetes_y_eps, grid_points=50)\nCOBRA_diabetes.fit(diabetes_X_train, diabetes_y_train)",
"Predicting using the COBRA predictor is again similar to using a scikit-learn estimator.",
"COBRA_diabetes.predict(diabetes_X_test)",
"Let's compare our MSEs using the diagnostics class now.",
"cobra_diagnostics = Diagnostics(COBRA_diabetes, diabetes_X_test, diabetes_y_test, load_MSE=True)\n\ncobra_diagnostics.machine_MSE",
"Let us similarily use COBRA on the Boston housing data set.",
"boston = datasets.load_boston()\n\nboston_X_train = boston.data[:-40]\nboston_X_test = boston.data[-20:]\nboston_X_eps = boston.data[-40:-20]\n\nboston_y_train = boston.target[:-40]\nboston_y_test = boston.target[-20:]\nboston_y_eps = boston.target[-40:-20]\n\nCOBRA_boston = Cobra()\nCOBRA_boston.set_epsilon(X_epsilon=boston_X_eps, y_epsilon=boston_y_eps, grid_points=50)\nCOBRA_boston.fit(boston_X_train, boston_y_train)\n\ncobra_diagnostics = Diagnostics(COBRA_boston, boston_X_test, boston_y_test, load_MSE=True)\n\ncobra_diagnostics.machine_MSE"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
laserson/phip-stat
|
notebooks/phip_modeling/phip-kinetic-computations.ipynb
|
apache-2.0
|
[
"%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport scipy as sp\nimport scipy.stats\nimport matplotlib.pyplot as plt\nimport seaborn as sns",
"PhIP-Seq kinetics computations\nReaction summary\nIP reaction (1 mL)\n* IgG\n * MW of IgG = 150 kDa\n * 2 µg IgG = 13.3 pmol = 8.03e12 molecules\n * 13.3 nM in the reaction\n* Phage\n * 100k particles per clone on average\n * Add ~1e10 total particles per mL reaction\n * 5k - 50k of each clone per reaction\n * Equiv to per clone concentration of 0.0083 fM to \n* Protein A/Protein G Beads\n * 40 µL total => 1.2 mg beads => capture 9.6 µg Ab according to manual\n * Should capture all Ab in reaction so will ignore in calculation\n * Kd maybe ~10 nM\nAb in reaction\nKd = [Ab] [L] / [AbL]\nInputs:\nDesired Kd ability to resolve\nTotal Ab and L (e.g., [Ab] + [AbL])\nrequires overwhelming Protein A/G binding sites?\nInput library",
"df = pd.read_csv('/Users/laserson/lasersonlab/larman/libraries/T7-Pep_InputCountsComplete46M.csv', header=None, index_col=0)\n\ncounts = df.values.ravel()\n\nsns.distplot(counts)",
"(min, 10%ile, 50%ile, 90%ile, max)",
"iles = (counts.min(), sp.stats.scoreatpercentile(counts, 10), sp.stats.scoreatpercentile(counts, 50), sp.stats.scoreatpercentile(counts, 90), counts.max())\niles\n\ncov = sum(counts)\ncov",
"And the same values as frequencies",
"tuple([float(val) / cov for val in iles])\n\ncounts.mean(), counts.std()\n\n(18. / cov) * 1e10\n\n(229. / cov) * 1e10\n\n(counts > 0).sum()\n\ncounts.shape\n\ndef equil_conc(total_antibody, total_phage, Kd):\n s = total_antibody + total_phage + Kd\n bound = 0.5 * (s - np.sqrt(s * s - 4 * total_antibody * total_phage))\n equil_antibody = total_antibody - bound\n equil_phage = total_phage - bound\n return (equil_antibody, equil_phage, bound)\n\nequil_conc(13e-15, 8.302889405513118e-17, 1e-9)\n\nnp.logspace?\n\nantibody_concentrations = np.logspace(-15, -3, num=25)\nphage_concentrations = np.logspace(-18, -12, num=13)\n\nantibody_labels = ['{:.1e}'.format(c) for c in antibody_concentrations]\nphage_labels = ['{:.1e}'.format(c) for c in phage_concentrations]\n\nKd = 1e-8\nfrac_antibody_bound = np.zeros((len(antibody_concentrations), len(phage_concentrations)))\nfrac_phage_bound = np.zeros((len(antibody_concentrations), len(phage_concentrations)))\nfor (i, a) in enumerate(antibody_concentrations):\n for (j, p) in enumerate(phage_concentrations):\n bound = equil_conc(a, p, Kd)[2]\n frac_antibody_bound[i, j] = bound / a\n frac_phage_bound[i, j] = bound / p\n\nfig = plt.figure(figsize=(12, 6))\nax = fig.add_subplot(121)\nsns.heatmap(frac_antibody_bound, xticklabels=phage_labels, yticklabels=antibody_labels, square=True, ax=ax)\nax.set_title('Fraction Antibody Bound')\nax.set_ylabel('total antibody clone conc')\nax.set_xlabel('total phage clone conc')\nax = fig.add_subplot(122)\nsns.heatmap(frac_phage_bound, xticklabels=phage_labels, yticklabels=antibody_labels, square=True, ax=ax)\nax.set_title('Fraction Phage Bound')\nax.set_ylabel('total antibody clone conc')\nax.set_xlabel('total phage clone conc')",
"It's most important to ensure we get maximal phage capture, and this seems to be independent of the total phage concentration. Let's instead explore the fraction phage bound as a function of the antibody concentration and Kd",
"antibody_concentrations = np.logspace(-15, -3, num=25)\nKds = np.logspace(-15, -6, num=19)\n\nantibody_labels = ['{:.1e}'.format(c) for c in antibody_concentrations]\nKd_labels = ['{:.1e}'.format(c) for c in Kds]\n\nphage_concentration = 2e-15\nfrac_antibody_bound = np.zeros((len(antibody_concentrations), len(Kds)))\nfrac_phage_bound = np.zeros((len(antibody_concentrations), len(Kds)))\nfor (i, a) in enumerate(antibody_concentrations):\n for (j, Kd) in enumerate(Kds):\n bound = equil_conc(a, phage_concentration, Kd)[2]\n frac_antibody_bound[i, j] = bound / a\n frac_phage_bound[i, j] = bound / phage_concentration\n\nfig = plt.figure(figsize=(9, 9))\n# ax = fig.add_subplot(121)\n# sns.heatmap(frac_antibody_bound, xticklabels=Kd_labels, yticklabels=antibody_labels, square=True, ax=ax)\n# ax.set_title('Fraction Antibody Bound')\n# ax.set_ylabel('total antibody clone conc')\n# ax.set_xlabel('Kd')\nax = fig.add_subplot(111)\nsns.heatmap(frac_phage_bound, xticklabels=Kd_labels, yticklabels=antibody_labels, square=True, ax=ax)\nax.set_title('Fraction Phage Bound')\nax.set_ylabel('total antibody clone conc')\nax.set_xlabel('Kd')"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
GHorace/ma2823_2016
|
lab_notebooks/Lab 6 2016-11-04 Tree-based methods.ipynb
|
mit
|
[
"2016-11-04: Tree-based methods\nIn this lab, we will apply tree-based classification methods to the Endometrium vs. Uterus cancer data. For documentation see: http://scikit-learn.org/0.17/modules/tree.html\nLet us start, as usual, by setting up our environment, loading the data, and setting up our cross-validation.",
"import numpy as np\n%pylab inline\n\n# Load the data\n# TODO\n\n# Normalize the data\nfrom sklearn import preprocessing\nX = preprocessing.normalize(X)\n\n# Set up a stratified 10-fold cross-validation\nfrom sklearn import cross_validation\nfolds = cross_validation.StratifiedKFold(y, 10, shuffle=True)\n\ndef cross_validate(design_matrix, labels, classifier, cv_folds):\n \"\"\" Perform a cross-validation and returns the predictions. \n \n Parameters:\n -----------\n design_matrix: (n_samples, n_features) np.array\n Design matrix for the experiment.\n labels: (n_samples, ) np.array\n Vector of labels.\n classifier: sklearn classifier object\n Classifier instance; must have the following methods:\n - fit(X, y) to train the classifier on the data X, y\n - predict_proba(X) to apply the trained classifier to the data X and return probability estimates \n cv_folds: sklearn cross-validation object\n Cross-validation iterator.\n \n Return:\n -------\n pred: (n_samples, ) np.array\n Vectors of predictions (same order as labels).\n \"\"\"\n pred = np.zeros(labels.shape)\n for tr, te in cv_folds:\n # Restrict data to train/test folds\n Xtr = design_matrix[tr, :]\n ytr = labels[tr]\n Xte = design_matrix[te, :]\n #print Xtr.shape, ytr.shape, Xte.shape\n\n # Fit classifier\n classifier.fit(Xtr, ytr)\n\n # Predict probabilities (of belonging to +1 class) on test data\n yte_pred = classifier.predict_proba(Xte)\n index_of_class_1 = 1 - ytr[0] # 0 if the first sample is positive, 1 otherwise\n pred[te] = yte_pred[:, index_of_class_1]\n return pred",
"1. Decision trees\nQuestion Cross-validate 5 different decision trees, with default parameters.",
"from sklearn import tree\nfrom sklearn import metrics\n# Use: clf = tree.DecisionTreeClassifier()\n\nypred_dt = [] # will hold the 5 arrays of predictions (1 per tree)\nfor tree_index in range(5):\n # TODO",
"Question Compute the mean and standard deviation of the area under the ROC curve of these 5 trees. Plot the ROC curves of these 5 trees.",
"fpr_dt = [] # will hold the 5 arrays of false positive rates (1 per tree)\ntpr_dt = [] # will hold the 5 arrays of true positive rates (1 per tree)\nauc_dt = [] # will hold the 5 areas under the ROC curve (1 per tree)\nfor tree_index in range(5):\n # TODO\n \nfor tree_index in range(4):\n plt.plot(fpr_dt[tree_index], tpr_dt[tree_index], '-', color='orange') \nplt.plot(fpr_dt[-1], tpr_dt[-1], '-', color='orange', \n label='DT (AUC = %0.2f (+/- %0.2f))' % (np.mean(auc_dt), np.std(auc_dt)))\n\nplt.xlabel('False Positive Rate', fontsize=16)\nplt.ylabel('True Positive Rate', fontsize=16)\nplt.title('ROC curves', fontsize=16)\nplt.legend(loc=\"lower right\")",
"Question What parameters of DecisionTreeClassifier can you play with to define trees differently than with the default parameters? Cross-validate these using a grid search, and plot the optimal decision tree on the previous plot. Did you manage to improve performance?",
"from sklearn import grid_search\nparam_grid = # TODO\nclf = grid_search.GridSearchCV(tree.DecisionTreeClassifier(), param_grid, \n scoring='roc_auc')\nypred_dt_opt = cross_validate(X, y, clf, folds)\nfpr_dt_opt, tpr_dt_opt, thresholds = metrics.roc_curve(y, ypred_dt_opt, pos_label=1)\nauc_dt_opt = metrics.auc(fpr_dt_opt, tpr_dt_opt)\n\n# Plot the 5 decision trees from earlier\nfor tree_index in range(4):\n plt.plot(fpr_dt[tree_index], tpr_dt[tree_index], '-', color='blue') \nplt.plot(fpr_dt[-1], tpr_dt[-1], '-', color='blue', \n label='DT (AUC = %0.2f (+/- %0.2f))' % (np.mean(auc_dt), np.std(auc_dt)))\n# Plot the optimized decision tree \nplt.plot(fpr_dt_opt, tpr_dt_opt, color='orange', label='DT optimized (AUC=%0.2f)' % auc)\n\nplt.xlabel('False Positive Rate', fontsize=16)\nplt.ylabel('True Positive Rate', fontsize=16)\nplt.title('ROC curves', fontsize=16)\nplt.legend(loc=\"lower right\")",
"Question How does the performance of decision trees compare to the performance of classifiers we have used previously on this data? Does this match your expectations?\n2. Bagging trees\nWe will resort to ensemble methods to try to improve the performance of single decision trees. Let us start with bagging trees: The different trees are to be built using a bagging sample of the data, that is to say, a sample built by using as many data points, drawn with replacement from the original data.\nNote: Bagging trees and random forests start making sense when using large number of trees (several hundreds). This is computationally more intensive, especially when the number of features is large, as in this lab. For the sake of computational time, I suggested using small numbers of trees, but you might want to repeat this lab for larger number of trees at home.\nQuestion Cross-validate a bagging ensemble of 5 decision trees on the data. Plot the resulting ROC curve, compared to the 5 decision trees you trained earlier.",
"from sklearn import ensemble\n# By default, the base estimator is a decision tree with default parameters\n# TODO: Use clf = ensemble.BaggingClassifier(n_estimators=5) \n",
"Question Use cross_validate_optimize (as defined in the previous lab) to optimize the number of decision trees to use in the bagging method. How many trees did you find to be an optimal choice?",
"def cross_validate_optimize(design_matrix, labels, classifier, cv_folds):\n \"\"\" Perform a cross-validation and returns the predictions. \n \n Parameters:\n -----------\n design_matrix: (n_samples, n_features) np.array\n Design matrix for the experiment.\n labels: (n_samples, ) np.array\n Vector of labels.\n classifier: sklearn GridSearchCV object\n GridSearchCV instance; must have the following methods/attributes:\n - fit(X, y) to train the classifier on the data X, y\n - predict_proba(X) to apply the trained classifier to the data X and return probability estimates \n cv_folds: sklearn cross-validation object\n - best_params_ the best parameter dictionary\n Cross-validation iterator.\n \n Return:\n -------\n pred: (n_samples, ) np.array\n Vector of predictions (same order as labels).\n \"\"\"\n pred = np.zeros(labels.shape)\n for tr, te in cv_folds:\n # Restrict data to train/test folds\n Xtr = design_matrix[tr, :]\n ytr = labels[tr]\n Xte = design_matrix[te, :]\n #print Xtr.shape, ytr.shape, Xte.shape\n\n # Fit classifier\n classifier.fit(Xtr, ytr)\n \n # Print best parameter\n print classifier.best_params_\n\n # Predict probabilities (of belonging to +1 class) on test data\n yte_pred = classifier.predict_proba(Xte)\n index_of_class_1 = 1 - ytr[0] # 0 if the first sample is positive, 1 otherwise\n pred[te] = yte_pred[:, index_of_class_1] \n return pred\n\nparam_grid = {'n_estimators': [5, 15, 25, 50]}\n# TODO",
"Question Plot the ROC curve of the optimized cross-validated bagging tree classifier obtained with cross_validate_optimize, and compare it to the previous ROC curves (non-optimized bagging tree, decision trees). \n3. Random forests\nWe will now use random forests.\nQuestion What is the difference between bagging trees and random forests?\nQuestion Cross-validate a random forest of 5 decision trees on the data. Plot the resulting ROC curve, compared to the 5 decision trees you trained earlier, and the bagging tree made of 5 decision trees.",
"clf = ensemble.RandomForestClassifier(n_estimators=5) \n# TODO",
"Question Use cross_validate_optimize (as defined in the previous lab) to optimize the number of decision trees to use in the random forest. How many trees do you find to be an optimal choice? How does the optimal random forest compare to the optimal bagging trees? How do the training times of the random forest and the bagging trees compare?",
"param_grid = {'n_estimators': [5, 15, 25, 50]}\n# TODO",
"Question How do your tree-based classifiers compare to the linear regression (regularized or not)? Plot ROC curves.",
"from sklearn import linear_model\nparam_grid = {'C':[1e-3, 1e-2, 1e-1, 1., 1e2, 1e3]}\nclf = grid_search.GridSearchCV(linear_model.LogisticRegression(penalty='l1'), \n param_grid, scoring='roc_auc')\nypred_l1 = cross_validate_optimize(X, y, clf, folds)\nfpr_l1, tpr_l1, thresholds_l1 = metrics.roc_curve(y, ypred_l1, pos_label=1)\n\nclf = grid_search.GridSearchCV(linear_model.LogisticRegression(penalty='l2'), \n param_grid, scoring='roc_auc')\nypred_l2 = cross_validate_optimize(X, y, clf, folds)\nfpr_l2, tpr_l2, thresholds_l2 = metrics.roc_curve(y, ypred_l2, pos_label=1)\n\n# TODO\n\nplt.xlabel('False Positive Rate', fontsize=16)\nplt.ylabel('True Positive Rate', fontsize=16)\nplt.title('ROC curves', fontsize=16)\nplt.legend(loc=\"lower right\")",
"Kaggle challenge\nYou can find the documentation for tree-based regression here: \n* What parameters can you change?\n* Cross-validate several different tree-based regressors (trees and tree ensembles) on your data, using the folds you previously set up. How do the different variants of decision trees compare to each other? How do they compare to performance obtained with other algorithms?\n* Submit predictions to the leaderboard for the best of your tree-based models. Do the results on the leaderboard data match your expectations?"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
isamu-isozaki/Machine-Learning
|
Machine learning Nanodegree/student_intervention/student_intervention.ipynb
|
mit
|
[
"Machine Learning Engineer Nanodegree\nSupervised Learning\nProject 2: Building a Student Intervention System\nWelcome to the second project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been provided for you, and it will be your job to implement the additional functionality necessary to successfully complete this project. Sections that begin with 'Implementation' in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully!\nIn addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a 'Question X' header. Carefully read each question and provide thorough answers in the following text boxes that begin with 'Answer:'. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide. \n\nNote: Code and Markdown cells can be executed using the Shift + Enter keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.\n\nQuestion 1 - Classification vs. Regression\nYour goal for this project is to identify students who might need early intervention before they fail to graduate. Which type of supervised learning problem is this, classification or regression? Why?\nAnswer: \nExploring the Data\nRun the code cell below to load necessary Python libraries and load the student data. Note that the last column from this dataset, 'passed', will be our target label (whether the student graduated or didn't graduate). All other columns are features about each student.",
"# Import libraries\nimport numpy as np\nimport pandas as pd\nfrom time import time\nfrom sklearn.metrics import f1_score\n\n# Read student data\nstudent_data = pd.read_csv(\"student-data.csv\")\nprint \"Student data read successfully!\"",
"Implementation: Data Exploration\nLet's begin by investigating the dataset to determine how many students we have information on, and learn about the graduation rate among these students. In the code cell below, you will need to compute the following:\n- The total number of students, n_students.\n- The total number of features for each student, n_features.\n- The number of those students who passed, n_passed.\n- The number of those students who failed, n_failed.\n- The graduation rate of the class, grad_rate, in percent (%).",
"# TODO: Calculate number of students\nn_students = None\n\n# TODO: Calculate number of features\nn_features = None\n\n# TODO: Calculate passing students\nn_passed = None\n\n# TODO: Calculate failing students\nn_failed = None\n\n# TODO: Calculate graduation rate\ngrad_rate = None\n\n# Print the results\nprint \"Total number of students: {}\".format(n_students)\nprint \"Number of features: {}\".format(n_features)\nprint \"Number of students who passed: {}\".format(n_passed)\nprint \"Number of students who failed: {}\".format(n_failed)\nprint \"Graduation rate of the class: {:.2f}%\".format(grad_rate)",
"Preparing the Data\nIn this section, we will prepare the data for modeling, training and testing.\nIdentify feature and target columns\nIt is often the case that the data you obtain contains non-numeric features. This can be a problem, as most machine learning algorithms expect numeric data to perform computations with.\nRun the code cell below to separate the student data into feature and target columns to see if any features are non-numeric.",
"# Extract feature columns\nfeature_cols = list(student_data.columns[:-1])\n\n# Extract target column 'passed'\ntarget_col = student_data.columns[-1] \n\n# Show the list of columns\nprint \"Feature columns:\\n{}\".format(feature_cols)\nprint \"\\nTarget column: {}\".format(target_col)\n\n# Separate the data into feature data and target data (X_all and y_all, respectively)\nX_all = student_data[feature_cols]\ny_all = student_data[target_col]\n\n# Show the feature information by printing the first five rows\nprint \"\\nFeature values:\"\nprint X_all.head()",
"Preprocess Feature Columns\nAs you can see, there are several non-numeric columns that need to be converted! Many of them are simply yes/no, e.g. internet. These can be reasonably converted into 1/0 (binary) values.\nOther columns, like Mjob and Fjob, have more than two values, and are known as categorical variables. The recommended way to handle such a column is to create as many columns as possible values (e.g. Fjob_teacher, Fjob_other, Fjob_services, etc.), and assign a 1 to one of them and 0 to all others.\nThese generated columns are sometimes called dummy variables, and we will use the pandas.get_dummies() function to perform this transformation. Run the code cell below to perform the preprocessing routine discussed in this section.",
"def preprocess_features(X):\n ''' Preprocesses the student data and converts non-numeric binary variables into\n binary (0/1) variables. Converts categorical variables into dummy variables. '''\n \n # Initialize new output DataFrame\n output = pd.DataFrame(index = X.index)\n\n # Investigate each feature column for the data\n for col, col_data in X.iteritems():\n \n # If data type is non-numeric, replace all yes/no values with 1/0\n if col_data.dtype == object:\n col_data = col_data.replace(['yes', 'no'], [1, 0])\n\n # If data type is categorical, convert to dummy variables\n if col_data.dtype == object:\n # Example: 'school' => 'school_GP' and 'school_MS'\n col_data = pd.get_dummies(col_data, prefix = col) \n \n # Collect the revised columns\n output = output.join(col_data)\n \n return output\n\nX_all = preprocess_features(X_all)\nprint \"Processed feature columns ({} total features):\\n{}\".format(len(X_all.columns), list(X_all.columns))",
"Implementation: Training and Testing Data Split\nSo far, we have converted all categorical features into numeric values. For the next step, we split the data (both features and corresponding labels) into training and test sets. In the following code cell below, you will need to implement the following:\n- Randomly shuffle and split the data (X_all, y_all) into training and testing subsets.\n - Use 300 training points (approximately 75%) and 95 testing points (approximately 25%).\n - Set a random_state for the function(s) you use, if provided.\n - Store the results in X_train, X_test, y_train, and y_test.",
"# TODO: Import any additional functionality you may need here\n\n# TODO: Set the number of training points\nnum_train = None\n\n# Set the number of testing points\nnum_test = X_all.shape[0] - num_train\n\n# TODO: Shuffle and split the dataset into the number of training and testing points above\nX_train = None\nX_test = None\ny_train = None\ny_test = None\n\n# Show the results of the split\nprint \"Training set has {} samples.\".format(X_train.shape[0])\nprint \"Testing set has {} samples.\".format(X_test.shape[0])",
"Training and Evaluating Models\nIn this section, you will choose 3 supervised learning models that are appropriate for this problem and available in scikit-learn. You will first discuss the reasoning behind choosing these three models by considering what you know about the data and each model's strengths and weaknesses. You will then fit the model to varying sizes of training data (100 data points, 200 data points, and 300 data points) and measure the F<sub>1</sub> score. You will need to produce three tables (one for each model) that shows the training set size, training time, prediction time, F<sub>1</sub> score on the training set, and F<sub>1</sub> score on the testing set.\nThe following supervised learning models are currently available in scikit-learn that you may choose from:\n- Gaussian Naive Bayes (GaussianNB)\n- Decision Trees\n- Ensemble Methods (Bagging, AdaBoost, Random Forest, Gradient Boosting)\n- K-Nearest Neighbors (KNeighbors)\n- Stochastic Gradient Descent (SGDC)\n- Support Vector Machines (SVM)\n- Logistic Regression\nQuestion 2 - Model Application\nList three supervised learning models that are appropriate for this problem. For each model chosen\n- Describe one real-world application in industry where the model can be applied. (You may need to do a small bit of research for this — give references!) \n- What are the strengths of the model; when does it perform well? \n- What are the weaknesses of the model; when does it perform poorly?\n- What makes this model a good candidate for the problem, given what you know about the data?\nAnswer: \nSetup\nRun the code cell below to initialize three helper functions which you can use for training and testing the three supervised learning models you've chosen above. The functions are as follows:\n- train_classifier - takes as input a classifier and training data and fits the classifier to the data.\n- predict_labels - takes as input a fit classifier, features, and a target labeling and makes predictions using the F<sub>1</sub> score.\n- train_predict - takes as input a classifier, and the training and testing data, and performs train_clasifier and predict_labels.\n - This function will report the F<sub>1</sub> score for both the training and testing data separately.",
"def train_classifier(clf, X_train, y_train):\n ''' Fits a classifier to the training data. '''\n \n # Start the clock, train the classifier, then stop the clock\n start = time()\n clf.fit(X_train, y_train)\n end = time()\n \n # Print the results\n print \"Trained model in {:.4f} seconds\".format(end - start)\n\n \ndef predict_labels(clf, features, target):\n ''' Makes predictions using a fit classifier based on F1 score. '''\n \n # Start the clock, make predictions, then stop the clock\n start = time()\n y_pred = clf.predict(features)\n end = time()\n \n # Print and return results\n print \"Made predictions in {:.4f} seconds.\".format(end - start)\n return f1_score(target.values, y_pred, pos_label='yes')\n\n\ndef train_predict(clf, X_train, y_train, X_test, y_test):\n ''' Train and predict using a classifer based on F1 score. '''\n \n # Indicate the classifier and the training set size\n print \"Training a {} using a training set size of {}. . .\".format(clf.__class__.__name__, len(X_train))\n \n # Train the classifier\n train_classifier(clf, X_train, y_train)\n \n # Print the results of prediction for both training and testing\n print \"F1 score for training set: {:.4f}.\".format(predict_labels(clf, X_train, y_train))\n print \"F1 score for test set: {:.4f}.\".format(predict_labels(clf, X_test, y_test))",
"Implementation: Model Performance Metrics\nWith the predefined functions above, you will now import the three supervised learning models of your choice and run the train_predict function for each one. Remember that you will need to train and predict on each classifier for three different training set sizes: 100, 200, and 300. Hence, you should expect to have 9 different outputs below — 3 for each model using the varying training set sizes. In the following code cell, you will need to implement the following:\n- Import the three supervised learning models you've discussed in the previous section.\n- Initialize the three models and store them in clf_A, clf_B, and clf_C.\n - Use a random_state for each model you use, if provided.\n - Note: Use the default settings for each model — you will tune one specific model in a later section.\n- Create the different training set sizes to be used to train each model.\n - Do not reshuffle and resplit the data! The new training points should be drawn from X_train and y_train.\n- Fit each model with each training set size and make predictions on the test set (9 in total).\nNote: Three tables are provided after the following code cell which can be used to store your results.",
"# TODO: Import the three supervised learning models from sklearn\n# from sklearn import model_A\n# from sklearn import model_B\n# from skearln import model_C\n\n# TODO: Initialize the three models\nclf_A = None\nclf_B = None\nclf_C = None\n\n# TODO: Set up the training set sizes\nX_train_100 = None\ny_train_100 = None\n\nX_train_200 = None\ny_train_200 = None\n\nX_train_300 = None\ny_train_300 = None\n\n# TODO: Execute the 'train_predict' function for each classifier and each training set size\n# train_predict(clf, X_train, y_train, X_test, y_test)",
"Tabular Results\nEdit the cell below to see how a table can be designed in Markdown. You can record your results from above in the tables provided.\n Classifer 1 - ? \n| Training Set Size | Training Time | Prediction Time (test) | F1 Score (train) | F1 Score (test) |\n| :---------------: | :---------------------: | :--------------------: | :--------------: | :-------------: |\n| 100 | | | | |\n| 200 | EXAMPLE | | | |\n| 300 | | | | EXAMPLE |\n Classifer 2 - ? \n| Training Set Size | Training Time | Prediction Time (test) | F1 Score (train) | F1 Score (test) |\n| :---------------: | :---------------------: | :--------------------: | :--------------: | :-------------: |\n| 100 | | | | |\n| 200 | EXAMPLE | | | |\n| 300 | | | | EXAMPLE |\n Classifer 3 - ? \n| Training Set Size | Training Time | Prediction Time (test) | F1 Score (train) | F1 Score (test) |\n| :---------------: | :---------------------: | :--------------------: | :--------------: | :-------------: |\n| 100 | | | | |\n| 200 | | | | |\n| 300 | | | | |\nChoosing the Best Model\nIn this final section, you will choose from the three supervised learning models the best model to use on the student data. You will then perform a grid search optimization for the model over the entire training set (X_train and y_train) by tuning at least one parameter to improve upon the untuned model's F<sub>1</sub> score. \nQuestion 3 - Choosing the Best Model\nBased on the experiments you performed earlier, in one to two paragraphs, explain to the board of supervisors what single model you chose as the best model. Which model is generally the most appropriate based on the available data, limited resources, cost, and performance?\nAnswer: \nQuestion 4 - Model in Layman's Terms\nIn one to two paragraphs, explain to the board of directors in layman's terms how the final model chosen is supposed to work. Be sure that you are describing the major qualities of the model, such as how the model is trained and how the model makes a prediction. Avoid using advanced mathematical or technical jargon, such as describing equations or discussing the algorithm implementation.\nAnswer: \nImplementation: Model Tuning\nFine tune the chosen model. Use grid search (GridSearchCV) with at least one important parameter tuned with at least 3 different values. You will need to use the entire training set for this. In the code cell below, you will need to implement the following:\n- Import sklearn.grid_search.gridSearchCV and sklearn.metrics.make_scorer.\n- Create a dictionary of parameters you wish to tune for the chosen model.\n - Example: parameters = {'parameter' : [list of values]}.\n- Initialize the classifier you've chosen and store it in clf.\n- Create the F<sub>1</sub> scoring function using make_scorer and store it in f1_scorer.\n - Set the pos_label parameter to the correct value!\n- Perform grid search on the classifier clf using f1_scorer as the scoring method, and store it in grid_obj.\n- Fit the grid search object to the training data (X_train, y_train), and store it in grid_obj.",
"# TODO: Import 'GridSearchCV' and 'make_scorer'\n\n# TODO: Create the parameters list you wish to tune\nparameters = None\n\n# TODO: Initialize the classifier\nclf = None\n\n# TODO: Make an f1 scoring function using 'make_scorer' \nf1_scorer = None\n\n# TODO: Perform grid search on the classifier using the f1_scorer as the scoring method\ngrid_obj = None\n\n# TODO: Fit the grid search object to the training data and find the optimal parameters\ngrid_obj = None\n\n# Get the estimator\nclf = grid_obj.best_estimator_\n\n# Report the final F1 score for training and testing after parameter tuning\nprint \"Tuned model has a training F1 score of {:.4f}.\".format(predict_labels(clf, X_train, y_train))\nprint \"Tuned model has a testing F1 score of {:.4f}.\".format(predict_labels(clf, X_test, y_test))",
"Question 5 - Final F<sub>1</sub> Score\nWhat is the final model's F<sub>1</sub> score for training and testing? How does that score compare to the untuned model?\nAnswer: \n\nNote: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to\nFile -> Download as -> HTML (.html). Include the finished document along with this notebook as your submission."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
Reproducible-Science-Curriculum/introduction-RR-Jupyter
|
notebooks/Workshop slides - using the notebooks.ipynb
|
cc0-1.0
|
[
"Working with Notebooks\nYou should already have Jupyter notebooks installed or remotely accessible...\nStart the notebook server\nopen the notebook homepage\nand create a new notebook.\nCode Cells\nA new notebook contains a single, executable code cell.\nAdd a line of code and execute it by:\n- clicking the run button, or\n- click in the cell, and press shift-return",
"print('hello world')",
"Adding Narrative - Markdown Cells\nAdd a new cell to the notebook: click the + button on the toolbar\nChange the cell type using the drop down list in the toolbar or by using the ESC-M keyboard shortcut.\nTo \"open\" or select a markdown cell for editing, double click the cell.\nView the rendered markdown by running the cell:\n- hit the play button on the toolbar, or\n- use the SHIFT-RETURN keyboard shortcut.\nSimple Markdown 1\nMarkdown cells use markdown to generate formatted text:\n\ninline styles\nemphasise text\nstrongly emphasise text \n\nSub-headings\nMarkdown can also show inline code styles as well as code blocks:\ndef mycode():\n ''' Here is my non-executable code '''\n pass\nSo what does the actual markdown look like?",
"# Simple Markdown 1\n\nMarkdown cells use markdown to generate formatted text:\n\n- inline styles\n - *emphasise text*\n - __strongly emphasise__ text \n \n## Sub-headings\nMarkdown can also show `inline code` styles as well as code blocks:\n\n````\ndef mycode():\n ''' Here is my non-executable code '''\n pass\n````",
"Simple Markdown 2\nMarkdown can include weblinks, eg to Data Carpentry, as well as links to named elements in the same notebook, or other notebooks.\nMarkdown can embed images:\nJupyter logo\nSo how do we do that?",
"<a name=\"\"></a>\n# Simple Markdown 2\n\nMarkdown can include weblinks, eg to [Data Carpentry](https://datacarpentry.org), as well as links to named elements [in the same notebook](#navlink), or [other notebooks](path/example.ipynb#exampleNavlink).\n\nMarkdown can embed images:\n\n[Jupyter logo](./jupyter-logo.png)",
"Markdown cells can include Latex Expressions\nMathematical expessions can be rendered inline by wrapping a LaTeX expression (no spaces) with a $ either side.\n$e^x=\\sum_{i=0}^\\infty \\frac{1}{i!}x^i$\nis rendered inline: $e^x=\\sum_{i=0}^\\infty \\frac{1}{i!}x^i$\nUse $$ to render in the centre of a new line: $$e^x=\\sum_{i=0}^\\infty \\frac{1}{i!}x^i$$\nNavigating and Selecting Cells\nTo select a cell, click on it. The selected cell will be surrounded by a box with the left hand side highlighted.\nMove the selection focus to the cell above/below using the keyboard up/down arrow keys.\nSelect multiple adjacent cells using SHIFT-UP ARROW or SHIFT-DOWN ARROW\nManaging Cells - Add, Delete\nAdd a new cell to the notebook by:\n - click the + button on the toolbar\n - Insert -> Insert Cell Above or ESC-A\n - Insert -> Insert Cell Below or ESC-B\nDelete a cell by selecting it and:\n - click the scissors button on the toolbar\n - Edit -> Delete cells or ESC-X\nUndelete the last deleted cell:\n- Edit -> Undo Delete cells or ESC-Z\nManaging Cells - Reorder\nReorder cells by:\n- moving them up and down the notebook using the up and down arrows on the toolbar\n- Edit -> Move Cell Up or Edit -> Move Cell Down \n- cutting and pasting them:\n - Edit - >Cut or Edit->Paste Cells Above or Edit->Paste Cells Below\n - on the toolbar, Cut selected cells then Paste selected cells\nYou can also copy selected cells from the toolbar, Edit -> Copy Cells or ESC-C.\nManaging Cells - Merging and Splitting\nSplitting overlong cells: Edit -> Split Cell\nMerging adjacent cells: Edit -> Merge Cell Above or Edit -> Merge Cell Below.\nCell outputs\nIf the last line of code produces an output, the output will be embedded in the notebook below the code cell:",
"a=1\nb=2\n\na+b",
"We Can Run a Cell Multiple Times\nEach time the cell us run, the state of the underlying python process is updated, even if the visual display of other cells in the notebook is not.",
"print(a)\n\n#Run this cell multiple times\na=a+1\na",
"Working code cells harder\nWe can import packages as you might expect:",
"import numpy as np\n\nnp.pi",
"Code cells also act as a commandline prompt - prefix with a !",
"! ls *.ipynb # Linux / Mac;\n#for windows: ! dir *.ipynb",
"Line numbering in code cells can be toggled with ESC-L.\nIPython Cell Magics\n\n%matplotlib inline: enable inline display of matplotlib generated graphics\n%whos: display a list of variables and their values as set in the kernel\n%env: display a list of environment variables and their current values in the host environment.\n\nCode cells can produce rich output too\nCode cell outputs can render tables and charts:",
"import pandas as pd\n\npd.DataFrame({'col1':['x','y'],'col2':[1,2]})\n\n%matplotlib inline\n\nimport matplotlib.pyplot as plt\n\n# Create 1000 evenly-spaced values from 0 to 2 pi\nx = np.linspace(0, 2*np.pi, 1000) \n\n#Plot a sine wave over those values\ny = np.sin(x)\n\nplt.plot(x, y)\n\n#You can prevent the display of object details returned from the plot by:\n## - adding a semi-colon (;) at the end of the final statement",
"Notebooks Can be Support Interactive Widgets that Help You Explore a Dataset\nIf you reate a function that accepts one or more parameters, you may be able to use it as the basis of an automatically generated application.\nFor example, suppose we have a function that will plot a sine wave over the range 0..2 pi for a specified frequency, passed into the function as a parameter:",
"#If no frequency value is specified, use the default setting: f=1\n\ndef sinplot(f=1):\n #Define a range of x values\n x = np.linspace(0, 2*np.pi, 1000) \n\n #Plot a sine wave with the specified frequency over that range\n y = np.sin(f*x)\n\n #Plot the chart\n plt.plot(x, y)\n \nsinplot(f=3)",
"Using ipywidgets interact()\nPass the name of your function, and the default values of the parameters, to the ipywidgets interact() function to automatically create interactive widgets to control the parameter values.",
"from ipywidgets import interact\n\ninteract(sinplot, f=5)",
"You can also specify the range of values Applied to an interact() slider:\ninteract(sinplot, f=[0,20])\nOr the range of values and the step size:\ninteract(sinplot, f=[0,20,5])\nChecking Reproducibility\nClear the output of a selected cell: Cell -> Current Output -> Clear\nClear the output of all cells in the notebook: Cell -> All Output -> Clear\nNote the the state of the underlying kernel will not be affected - only the rendered display in the notebook.\nRun multiple cells:\n\nCells -> Run All Above\nCells -> Run All Below\nCells -> Run All\n\nRun cells from scratch (i.e. from a fresh kernel), Kernel -> Restart and Clear Output and then run the cells you want.\nTo run all the cells in the notebook from scratch: Kernel -> Restart and Run All\nTroubleshooting\nTips and tricks for when it goes wrong...\nGetting Help\nFind help files for the notebooks from the Help menu.\nDisplay keyboard shortcuts using Help -> Keyboard Shortcuts or ESC-H.\nCode cells support autocomplete: start typing and then TAB to see what options are available...\nAccess documentation for a function - add a ? and run the cell:",
"pd.DataFrame?",
"Saving, Checkpointing and Reverting the Notebook\nThe notebook wil autosave every few minutes.\nYou can also create a checkpoint using the floppy/save icon on the toolbar or File -> Save and Checkpoint.\nYou can revert the notebook to a saved checkpoint using File -> Revert to Saved Checkpoint.\nPermanently Running Cells\nCode cells that are running (or queued for running) display an asterisk in the cell In [] indicator.\nTo stop execution of a running cell (and prevent queued cells from executing):\n- press the stop button on the toolbar\n- Kernel -> Interrupt\nIf the notebook is still hanging, you may need to restart the kernel: Kernel -> Restart\nAnd finally...\nThis slide deck was produced from a notebook styled as a slideeck:\n\nView -> Cell Toolbar -> Slideshow, and then either:\njupyter nbconvert slideTest.ipynb --to slides --post serve, or\njupyter nbconvert slideTest.ipynb --to slides && python -m SimpleHTTPServer 8000 #then go to localhost:8000 in browser"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
cosmolejo/Fisica-Experimental-3
|
Minimos_Cuadrados/.ipynb_checkpoints/Minimos_Cuadrados-checkpoint.ipynb
|
gpl-3.0
|
[
"Minimos Cuadrados\nPor: Alejandro Mesa Gómez, C.C. : 1017228006\nA-Mínimos cuadrados\n1- Grafique los datos (x,y) de la tabla.\n|X | Y |\n|------|------|\n|0.0 | 1.95 |\n|0.5 | 2.21 |\n|1.0 | 3.07 |\n|1.5 | 3.90 |\n|2.0 | 4.43 |\n|2.5 | 5.20 |\n|3.0 | 4.02 |\n|3.5 | 5.38 |\n|4.0 | 6.59 |\n|4.5 | 5.86 |\n|5.0 | 6.57|\n|5.5 | 6.36 |\n|6.0 | 6.67 |",
"########################################################\n## Librerias para el trabajo\n########################################################\nimport matplotlib.pyplot as plt\nimport numpy as np\n%matplotlib inline\n\n\ndata1= np.loadtxt('datos.csv',delimiter=',') #datos para regresion lineal\nX1=data1[:,0]\nY1=data1[:,1]\nprint \nprint 'grafica preliminar de los puntos: '\nfig=plt.figure()\nax=fig.add_subplot(111)\nax.plot(X1,Y1,'o')\nax.set_xlim(xmin=0.0, xmax=8)\nax.set_ylim(ymin=0.0, ymax=8)\nplt.show()",
"2- Aplique paso a paso el método de mínimos cuadrados de tal forma que le permita obtener la mejor curva lineal de ajuste de los datos anteriores, y determine las incertezas asociadas a los parámetros. Determine los coeficientes de correlación $ \\chi ^{2} $ y $ R ^{2} $. Reporte correctamente los parámetros con su incertidumbre y concluya sobre la conveniencia de la regresión lineal a partir de las correlaciones obtenidas.\na) $$a_{0}= \\frac{(\\sum x_{i}^{2})\\sum y_{i} - (\\sum x_{i})(\\sum x_{i}y_{i})}{n\\sum x_{i}^{2} - (\\sum x_{i})^{2}}$$",
"#n:\nn=len(X1)\n\n#suma xi cuadrado:\nsuma_xi2=0\nfor i in xrange(0,n):\n suma_xi2+=(X1[i]*X1[i])\n \n#suma yi cuadrado:\nsuma_yi2=0\nfor i in xrange(0,n):\n suma_yi2+=(Y1[i]*Y1[i])\n \n#suma xi simple:\nsuma_xi=0\nfor i in xrange(0,n):\n suma_xi+=(X1[i])\n \n#suma yi simple:\nsuma_yi=0\nfor i in xrange(0,n):\n suma_yi+=(Y1[i])\n \n#suma xi*yi:\nsuma_xiyi=0\nfor i in xrange(0,n):\n suma_xiyi+=(X1[i]*Y1[i])\n \n\na0=((suma_xi2*suma_yi)-(suma_xi*suma_xiyi))/(n*suma_xi2-(suma_xi*suma_xi))\nprint 'a0 = %.1f'%a0",
"b) $$a_{1}= \\frac{n\\sum x_{i}y_{i} - (\\sum x_{i})(\\sum y_{i})}{n\\sum x_{i}^{2} - (\\sum x_{i})^{2}}$$",
"a1=((n*suma_xiyi)-(suma_xi*suma_yi))/(n*suma_xi2-(suma_xi*suma_xi))\nprint 'a1 = %.1f'%a1",
"c) $$y= a_{0}+a_{1}x$$",
"x=np.linspace(X1[0],X1[-1],n)\ny=(a0 +a1*x)\n\nprint \nprint 'grafica de los puntos con ajuste: '\nfig=plt.figure()\nax=fig.add_subplot(111)\nax.plot(X1,Y1,'ro')\nax.plot(x,y,'b-')\nax.set_xlim(xmin=0.0, xmax=8)\nax.set_ylim(ymin=0.0, ymax=8)\nplt.show()",
"$$S_{y} = \\sqrt{\\frac{1}{n-2}\\sum_{i=1}^{n}(y_{i}-a_{0}-a_{1}x_{i})^{2}}$$ \n$$S_{my} = \\frac{S_{y}}{n^{1/2}}$$\n$$S_{ma0}^{2}= \\frac{S_{my}^{2}\\sum x_{i}^{2}}{n\\sum x_{i}^{2} - (\\sum x_{i})^{2}}$$\n$$ S_{ma1}^{2}= \\frac{n S_{my}^{2}}{n\\sum x_{i}^{2} - (\\sum x_{i})^{2}}$$",
"#desviacion estandar\n\nSy=0\nfor i in xrange(0,len(y)):\n Sy+= (y[i]-a0-a1*x[i])**2\n #print Sy\nSy*=(1/(len(y)-2.))\n#print Sy\nSy=Sy**(1/2)\nprint 'Sy %.1f'%Sy\n\n#error en y\nraiz= np.sqrt(n)\nSmy=Sy/(raiz)\nprint 'Smy %.1f'%Smy\n\n#error en a0\nS2_ma0=(Smy*Smy*suma_xi2)/(n*suma_xi2-(suma_xi*suma_xi))\nprint 'S2_ma0 %f'%S2_ma0\n\n#error en a0\nS2_ma1=(Smy*Smy*n)/(n*suma_xi2-(suma_xi*suma_xi))\nprint 'S2_ma0 %f'%S2_ma1\n ",
"$$a_{0}\\pm S_{ma0} $$ $$ a_{1}\\pm S_{ma1} $$",
"print 'a0 ± sma0: %f ± %f'%(a0,np.sqrt(S2_ma0))\nprint 'a1 ± sma1: %f ± %f'%(a1,np.sqrt(S2_ma1))",
"3- Grafique todas las posibles curvas de la regresión lineal teniendo en cuenta el error determinado para los parámetros. Concluya al respecto.",
"err_a0= np.sqrt(S2_ma0)\nerr_a1= np.sqrt(S2_ma1)\n\ny=(a0 +a1*x)\ny1=((a0+err_a0) +(a1+err_a1)*x)\ny2=((a0-err_a0) +(a1-err_a1)*x)\ny3=((a0+err_a0) +(a1-err_a1)*x)\ny4=((a0-err_a0) +(a1+err_a1)*x)\nprint \nprint 'grafica de los puntos con ajustes y errores: '\nprint 'es facil observar que todas las curvas posibles variando los errores, están muy cerca de la curva \"perfecta\", lo cual implica que el ajuste es bastante bueno y los datos tienen muy bajo error'\nfig=plt.figure()\nax=fig.add_subplot(111)\nax.plot(X1,Y1,'ro')\nax.plot(x,y,'b-')\nax.plot(x,y1,'-*')\nax.plot(x,y2,'-*')\nax.plot(x,y3,'--')\nax.plot(x,y4,'--')\nax.set_xlim(xmin=0.0, xmax=8)\nax.set_ylim(ymin=0.0, ymax=8)\nplt.show()",
"$ \\chi ^{2} $ : $$ \\chi ^{2} = \\sum_{i}^{n} \\frac{(Y_{observada}-Y_{teorica})^{2}}{Y_{teorica}}$$y $ R ^{2} $",
"#chi2\n\nchi2=0\n\nfor i in xrange(0,n):\n chi2=((y[i]-Y1[i])**2)/Y1[i]\n \nprint 'chi^2 = ',chi2\n\n# r2\n\nb=a1\nbprima=a1=((n*suma_xiyi)-(suma_xi*suma_yi))/(n*suma_yi2-(suma_yi*suma_yi))\n\nr2=b*bprima\nprint 'r^2 = ',r2",
"B- Ajuste de Curva.\nTabla de datos:\n| X | Y |\n|------|------|\n|1.0| 2.1|\n|2.0| 4.3|\n|3.0| 6.0|\n|4.0| 7.8|\n1 - Encontrar la función que mejor se ajuste a los datos.\nPruebe con las siguientes regresiones: LINEAL “y=ax + “b, y CUADRADA “$y=ax^{2}$”. Redefina la función cuadrada de forma que quede lineal y pueda usar todo lo que ya aplicó sobre regresión lineal."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ianozsvald/ipython_memory_usage
|
src/ipython_memory_usage/examples/example_usage_np_pd.ipynb
|
bsd-2-clause
|
[
"Short demo of using ipython_memory_usage to diagnose numpy and Pandas RAM usage\nAuthor Ian uses this tool in his Higher Performance Python training (https://ianozsvald.com/training/) and it is mentioned in his High Performance Python (2nd ed, O'Reilly) technical book.\nWe can use it to understand how much RAM we're currently using and which of several alternate ways to solve a problem in complex tools might be the most RAM efficient solutions.\n\ntotal RAM usage is the current RAM usage at the end of that cell's execution\nused shows the difference between the last total RAM usage and this one\npeaked shows any during-execution peak above the resulting total RAM usage (i.e. hidden RAM usage that might catch you out)",
"import ipython_memory_usage\nhelp(ipython_memory_usage) # or ipython_memory_usage?\n\n%ipython_memory_usage_start",
"Importing packages uses some RAM",
"import numpy as np # note that importing a package will increase total RAM usage a little\n\nimport pandas as pd # note that importing Pandas uses more RAM than importing numpy\n\nimport string",
"Making a large array uses a predictable amount of RAM",
"# if we make a big array - 100M items * 8 byte floats, this cell\n# uses circa 800MB (often 760 MiB - note mibi-bytes as used in the underlying memory_profiler tool)\n# The total RAM usage grows by roughly this amount\narr = np.ones(100_000_000) \n\n# deleting arr reduces RAM usage by roughly the expected amount and\n# total RAM usage should drop back down\ndel arr\n\n# if we make it again, RAM usage goes up again\narr = np.ones(100_000_000) \n\ndel arr",
"Making a big random array takes RAM + time",
"# creating random items takes some time, after \"used ... RAM\" note \"3s\" or so for several seconds\narr = np.random.normal(size=100_000_000)\nprint(arr[:5], arr.dtype)",
"Intermediate calculations can cost additional temporary RAM\nNOTE this section may work different if you're on Windows (if so - please report back to Ian by raising a bug and noting the difference.\nOn some platforms, e.g. Linux as used here, temporary intermediates can be reused in-place reducing the overall memory allocation: https://docs.scipy.org/doc/numpy-1.13.0/release.html#highlights",
"pass\n\n# arr*2 and arr*3 both have to be stored somewhere before the division can occur\n# so two more circa 762MiB arrays are made temporarily, this is reported\n# as \"peaked 762MiB above current\"\n# before they can be discard. arr_result references the final result\n# so overall we add 762MiB to the process\n# we only add 762MiB, not 762MiB*2, as on Linux we can intelligently reuse\n# one of the temporaries (else we'd peak at 762*2 MiB)\n\n# we report \"used 762...MiB\" as the final arr_result adds this to the process\n# so overall we're now _at_ 1.6GB but we actually peaked at 1.6+0.7 == 2.3GB \n# whilst this cell executed\n# if your code crashes with an out of memory exception, it could be caused\n# by a situation like this\narr_result = (arr * 2) / (arr * 3)\n\ndel arr\n\ndel arr_result",
"Pandas DataFrames can be costly on RAM\nExample with deleting columns\nProps to Jamie Brunning for this example",
"pass\n\narr_several_cols = np.random.normal(size=(100_000_000, 4))\n\narr_several_cols.shape\n\nf\"Cost per column {int(arr_several_cols.data.nbytes / arr_several_cols.shape[1]):,} bytes\"\n\n# The DataFrame in this case is a thin wrapper over the numpy array\n# and costs little extra RAM\ndf = pd.DataFrame(arr_several_cols, columns=list(string.ascii_lowercase)[:arr_several_cols.shape[1]])\ndf.info()\n\n# use Jupyter's xdel to remove all references of our expensive array, just in case\n# (but not in this case) it is also referred to in an Out[] history item\n%xdel arr_several_cols\n\ndf.info()\n\n# using del is surprisingly expensive\n# total RAM usage goes up by circa 1.5GB-2GB (>2x the cost of 1 column) \n# DOES ANYONE KNOW WHAT'S HAPPENING BEHIND THE SCENES HERE?\n# THE NEXT 2 CELLS SHOW IT ISN'T BEING QUICKLY GARBAGE COLLECTED\n# note also that using del seems to take more seconds than using df.drop (a few cells below)\n# possibly internally there's now (somehow) a 4-column original array _and_ a\n# 3 column resulting array (in the BlockManager?) costing 7-columns (i.e. circa 800MB*7 == circa 5.6GB)\ndel df['a']\n\n# we get no benefit by forcing a collection\nimport gc\ngc.collect()\n\ndf.info()\n\npass\n\n# using drop with inplace=False (the default) returns a copied DataFrame, if you don't use\n# this then maybe you end up with multiple DataFrames consuming RAM in a confusing fashion\n# e.g. you might have done `df2 = df.drop...` and then you've got the unmodified original\n# plus the modified df2 in the local namespace\n# We see total RAM usage drop by circa 800MB, the cost of 1 column, plus a lot more...\n# which is a mystery to me!\n# maybe the usage of drop forces a flush on any internal caching in pandas?\ndf = df.drop(columns=['b'])\n\ndf.info()\n\n# dropping in-place is probably more sensible, we recover another circa 800MB\ndf.drop(columns=['c'], inplace=True)\n\ndf.info()\n\npass\n\n# now we get back to where we were before we made the DataFrame and the array\ndf.drop(columns=['d'], inplace=True)",
"Diagnostics\n%xdel my_df will delete all references of my_df from the namespace including those in the Out[] history buffer, this does more cleaning than just using del my_df.\n%reset will reset all variables and imported modules, it is like starting a new kernel.",
"# %whos shows what's in the local namespace\n%whos\n\n# we can use %xdel to safely remove all references including those that might be (but not in this case)\n# in the Out[] history buffer\n%xdel df\n\n%ipython_memory_usage_stop"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
idwaker/git_python_session
|
session3.ipynb
|
unlicense
|
[
"Session 3",
"range(0, 9)\n\nfor i in range(0, 9):\n print(i)\n\nlist(range(0, 9))\n\nint('1')\n\nstr(1)\n\nint('a')\n\nlist('abcsd')\n\ntuple('kathmandu')\n\nlist(('255', '255', '255', '0'))\n\ntuple([255, 255, 254, 0])\n\ntuple({'abc': 123})\n\ndict(('name', 'hari'))\n\nlist({'abc': 123})\n\nfor item in {'a': 1}:\n print(item)\n\ndict([('name', 'hari')])\n\nlst = []\nfor i in range(0, 50):\n if i % 5 == 0:\n lst.append(i ** 2)\n\nlst\n\n# list comprehension\n[i**2 for i in range(0, 50) if i % 5 == 0]\n\nlst = [i**2 if i % 2 == 0 else i for i in range(0, 9)]\n\nlst\n\ni = 1\ni**2 if i % 2 == 0 else i\n\ni = 2\ni**2 if i % 2 == 0 else i\n\n# set comprehension\n{i for i in range(0, 19) if i % 2 == 0 or i % 3 == 0}\n\n# dictionary comprehension\n{i:i**2 for i in range(0, 19) if i % 2 == 0 or i % 3 == 0}\n\nfp = open('untitled.txt', 'r')\n\nfp\n\nopen('notafile.txt', 'r')\n\ndir(fp)\n\nfp.read()\n\nfp.read()\n\nfp.seek(0)\n\nfp.read()\n\nfp.seek(0)\n\nfp.readline()\n\nline = fp.readline()\n\nline\n\nline = line.strip()\n\nline\n\nline = line.strip(',')\n\nline\n\nitems = line.split(',')\n\nitems\n\nimport csv\n\nfp.seek(0)\n\nheaders = 'euname,modified,linked_country,iso3,iso2,grc,isonum,country,imperitive\\n'\n\nheaders = headers.strip().split(',')\n\nheaders\n\nreader = csv.DictReader(fp)\n\nreader\n\ndir(reader)\n\nreader.fieldnames\n\ncountries = list(reader)\n\ncountries\n\nafg = None\nfor row in countries:\n if row['iso3'] == 'AFG':\n afg = row\n\nafg",
"final output should be\n[('AFG', 'Afganistan'), ('NPL', 'Nepal') ...]",
"csvfile = open('untitled.txt', 'r')\n\ncsvfile\n\ncsvreader = csv.DictReader(csvfile)\n\ncountry_list = []\nfor country in csvreader:\n if country['iso3'] and country['country']:\n country_tup = (country['iso3'], country['country'])\n country_list.append(country_tup)\n\ncountry_list\n\nfp.close()\n\ncsvfile.close()\n\nwith open('newfile.csv', 'w') as csvfile:\n fieldnames = ['FirstName', 'LastName', 'Age']\n csvwriter = csv.DictWriter(csvfile, fieldnames=fieldnames)\n csvwriter.writeheader()\n csvwriter.writerow({'FirstName': 'J', 'LastName': 'k', 'Age': 20})\n csvwriter.writerow({'LastName': 'M', 'FirstName': 'n', 'Age': 24})\n csvwriter.writerows([\n {'LastName': 'A', 'FirstName': '', 'Age': 24},\n {'LastName': 'B', 'FirstName': 'L', 'Age': 25},\n {'LastName': 'B', 'FirstName': 'H', 'Age': 27}\n ])",
"Classes and Objects\n\n Dogs\n |\n -------------------\n | |\n Domestic Wild\n ------------------\n | |\n Semi Wild Feral ...\n\n Transport\n |\n-----------------------------------\n| | |\n\nLand Water Air\n |\n-------------- ...........\n| |\nBuses Train ...",
"class Animal:\n age = None\n\nclass Dog(Animal):\n pass\n\ngerman_shepherd = Dog()\n\ngerman_shepherd\n\ngerman_shepherd = Dog()\n\ngerman_shepherd\n\ngerman_shepherd.age = 22\n\ngerman_shepherd.age\n\nclass Dog(Animal):\n age = 18\n\ngerman_shepherd = Dog()\n\ngerman_shepherd.age\n\ngerman_shepherd.age = 21\n\ngerman_shepherd.age\n\ntibetan_husky = Dog()\n\ntibetan_husky.age\n\nclass Animal:\n age = None\n \n def is_alive(self):\n return True\n \n def sound(self):\n return 'Cry'\n\nclass Cat(Animal):\n \n def sound(self):\n return 'Meow'\n\ncat = Cat()\n\ncat.is_alive()\n\ncat.age\n\ncat.sound()",
"CaseStudy: Library Management",
"class Patron:\n firstname = None\n lastname = None\n address = None\n books = []\n \n def __repr__(self):\n return '{} {}'.format(self.firstname, self.lastname)\n \n def has_book(self, bookid):\n return bookid in self.books\n\nclass Student(Patron):\n faculty = None\n year = None\n \n def __init__(self, firstname, lastname, faculty, year):\n self.firstname = firstname\n self.lastname = lastname\n self.faculty = faculty\n self.year = year\n\nclass Staff(Patron):\n department = None\n\nclass Book:\n bookid = None\n isbn = None\n name = None\n \n def __init__(self, isbn, name):\n self.isbn = isbn\n self.name = name\n self.bookid = 'MYLIB' + ':' + self.isbn\n \n def __repr__(self):\n return self.bookid\n\nclass Teacher(Patron):\n faculty = None\n \n def __init__(self, firstname, lastname, faculty):\n self.firstname = firstname\n self.lastname = lastname\n self.faculty = faculty\n\nenglish = Book(isbn='abc-8236482374', name='Learning Engligh in 21 days.')\n\nmath = Book(isbn='mth-283482347', name='Quick Calculus.')\n\nnepali = Book(isbn='npl-293492834', name='बकमपबसिकमपवबसिकमप.')\n\nclass Shelve:\n books = []\n lended_books = []\n \n def is_book_available(self, bookid):\n return bookid in self.books\n \n def is_book_lendable(self, bookid):\n return bookid not in self.lended_books\n\nmy_library = Shelve()\n\n# Shelve.is_book_available(my_library, 'abc-123')\n\nmy_library.is_book_available('abc-123')\n\nmy_library.books = [english, math, nepali]\n\nmy_library.books\n\nram \n\nram = Student('Ram', 'Thapa', 'Biology', '2nd')\n\nram\n\nram.has_book('ajsdajsd')"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
solowPy/solowPy
|
examples/1 Getting started.ipynb
|
mit
|
[
"<div align='center' ><img src='https://raw.githubusercontent.com/davidrpugh/numerical-methods/master/images/sgpe-logo.jpg' width=\"1200\" height=\"100\"></div>\n<div align='right'><img src='https://raw.githubusercontent.com/davidrpugh/numerical-methods/master/images/SIRElogolweb.jpg' width=\"1200\" height=\"100\"></div>",
"import numpy as np\nimport sympy as sym\nimport solowpy",
"1 Creating an instance of the solow.Model class\nIn this notebook I will walk you through the creation of an instance of the solow.Model class. To create an instance of the solow.Model we must define two primitives: an aggregate production function and a dictionary of model parameter values.\n1.1 Defining the production function $F$:\nAt each point in time the economy in a Solow growth model has some amounts of capital, $K$, labor, $L$, and knowledge (or technology), $A$, that can be combined to produce output, $Y$, according to some function, $F$:\n$$ Y(t) = F(K(t), A(t)L(t)) \\tag{1.1.1} $$\nwhere $t$ denotes time. Note that $A$ and $L$ are assumed to enter multiplicatively. Typically $A(t)L(t)$ denotes \"effective labor\", and technology that enters in this fashion is known as labor-augmenting or \"Harrod neutral.\"\nA key assumption of the model is that the function $F$ exhibits constant returns to scale in capital and labor inputs. Specifically,\n$$ F(cK(t), cA(t)L(t)) = cF(K(t), A(t)L(t)) = cY(t) \\tag {1.1.2} $$\nfor any $c \\ge 0$. For reference, the above information is contained in the docstring of the solow.Model.output attribute.",
"solow.Model.output?",
"Examples:\nA common functional form for aggregate production in a Solow model that satisies the above assumptions is the Cobb-Douglas production function\n\\begin{equation}\n \\lim_{\\rho \\rightarrow 0} Y(t) = K(t)^{\\alpha}(A(t)L(t))^{1-\\alpha}. \\tag{1.1.3}\n\\end{equation}\nThe Cobb-Douglas production function is actually a special case of a more general class of production functions called constant elasticity of substitution (CES) production functions.\n\\begin{equation}\n Y(t) = \\bigg[\\alpha K(t)^{\\rho} + (1-\\alpha) (A(t)L(t))^{\\rho}\\bigg]^{\\frac{1}{\\rho}} \\tag{1.1.4}\n\\end{equation}\nwhere $0 < \\alpha < 1$ and $-\\infty < \\rho < 1$. The parameter $\\rho = \\frac{\\sigma - 1}{\\sigma}$ where $\\sigma$ is the elasticity of substitution between factors of production. Taking the limit of equation 1.2 as the elasticity of subsitution goes to unity (i.e., $\\sigma=1 \\implies \\rho=0$) recovers the Cobb-Douglas functional form.",
"# define model variables\nA, K, L = sym.symbols('A, K, L')\n\n# define production parameters\nalpha, sigma = sym.symbols('alpha, sigma')\n\n# define a production function\ncobb_douglas_output = K**alpha * (A * L)**(1 - alpha)\n\nrho = (sigma - 1) / sigma\nces_output = (alpha * K**rho + (1 - alpha) * (A * L)**rho)**(1 / rho)",
"1.2 Defining model parameters\nA generic Solow growth model has several parameters that need to be specified. To see which parameters are required, we can check the docstring of the solow.Model.params attribute.",
"solow.Model.params?",
"In addition to the standard parameters $g, n, s, \\delta$, one will also need to specify any required parameters for the production function. In order to make sure that parameter values are consistent with the models assumptions some basic validation of the solow.Model.params attribute is done when ever the attribute is set.",
"# these parameters look fishy...why?\ndefault_params = {'A0': 1.0, 'L0': 1.0, 'g': 0.0, 'n': -0.03, 's': 0.15,\n 'delta': 0.01, 'alpha': 0.33}\n\n# ...raises an AttributeError\nmodel = solowpy.Model(output=cobb_douglas_output, params=default_params)",
"Examples:\nHere are some examples of how one successfully creates an instance of the solow.Model class...",
"cobb_douglas_params = {'A0': 1.0, 'L0': 1.0, 'g': 0.02, 'n': 0.03, 's': 0.15,\n 'delta': 0.05, 'alpha': 0.33}\n\ncobb_douglas_model = solow.Model(output=cobb_douglas_output,\n params=cobb_douglas_params)\n\nces_params = {'A0': 1.0, 'L0': 1.0, 'g': 0.02, 'n': 0.03, 's': 0.15,\n 'delta': 0.05, 'alpha': 0.33, 'sigma': 0.95}\n\nces_model = solowpy.Model(output=ces_output, params=ces_params)",
"1.3 Other attributes of the solow.Model class\nThe intensive form of the production function\nThe assumption of constant returns to scale allows us to work with the intensive form of the aggregate production function, $F$. Defining $c=1/AL$ one can write\n$$ F\\bigg(\\frac{K}{AL}, 1\\bigg) = \\frac{1}{AL}F(A, K, L) \\tag{1.3.1} $$\nDefining $k=K/AL$ and $y=Y/AL$ to be capital per unit effective labor and output per unit effective labor, respectively, the intensive form of the production function can be written as\n$$ y = f(k). \\tag{1.3.2}$$\nAdditional assumptions are that $f$ satisfies $f(0)=0$, is concave (i.e., $f'(k) > 0, f''(k) < 0$), and satisfies the Inada conditions: $\\lim_{k \\rightarrow 0} = \\infty$ and $\\lim_{k \\rightarrow \\infty} = 0$. The <cite data-cite=\"inada1964\">(Inada, 1964)</cite> conditions are sufficient (but not necessary!) to ensure that the time path of capital per effective worker does not explode. Much of the above information is actually taken straight from the docstring for the solow.Model.intensive_output attribute.",
"solowpy.Model.intensive_output?\n\nces_model.intensive_output",
"One can numerically evaluate the intensive output for various values of capital stock (per unit effective labor) as follows...",
"ces_model.evaluate_intensive_output(np.linspace(1.0, 10.0, 25))",
"The marginal product of capital\nThe marginal product of capital is defined as follows:\n$$ \\frac{\\partial F(K, AL)}{\\partial K} \\equiv f'(k) \\tag{1.3.3}$$\nwhere $k=K/AL$ is capital stock (per unit effective labor).",
"solowpy.Model.marginal_product_capital?\n\nces_model.marginal_product_capital",
"One can numerically evaluate the marginal product of capital for various values of capital stock (per unit effective labor) as follows...",
"ces_model.evaluate_mpk(np.linspace(1.0, 10.0, 25))",
"Equation of motion for capital (per unit effective labor)\nBecause the economy is growing over time due to technological progress, $g$, and population growth, $n$, it makes sense to focus on the capital stock per unit effective labor, $k$, rather than aggregate physical capital, $K$. Since, by definition, $k=K/AL$, we can apply the chain rule to the time derative of $k$.\n\\begin{align}\n\\dot{k}(t) =& \\frac{\\dot{K}(t)}{A(t)L(t)} - \\frac{K(t)}{[A(t)L(t)]^2}\\bigg[\\dot{A}(t)L(t) + \\dot{L}(t)A(t)\\bigg] \\\n=& \\frac{\\dot{K}(t)}{A(t)L(t)} - \\bigg(\\frac{\\dot{A}(t)}{A(t)} + \\frac{\\dot{L}(t)}{L(t)}\\bigg)\\frac{K(t)}{A(t)L(t)} \\tag{1.3.4}\n\\end{align}\nBy definition, $k=K/AL$, and by assumption $\\dot{A}/A$ and $\\dot{L}/L$ are $g$ and $n$ respectively. Aggregate capital stock evolves according to\n$$ \\dot{K}(t) = sF(K(t), A(t)L(t)) - \\delta K(t). \\tag{1.3.5}$$\nSubstituting these facts into the above equation yields the equation of\nmotion for capital stock (per unit effective labor).\n\\begin{align}\n\\dot{k}(t) =& \\frac{sF(K(t), A(t)L(t)) - \\delta K(t)}{A(t)L(t)} - (g + n)k(t) \\\n=& \\frac{sY(t)}{A(t)L(t)} - (g + n + \\delta)k(t) \\\n=& sf(k(t)) - (g + n + \\delta)k(t) \\tag{1.3.6}\n\\end{align}\nThe above information is available for reference in the docstring for the solow.Model.k_dot attribute.",
"solowpy.Model.k_dot?\n\nces_model.k_dot",
"One can numerically evaluate the equation of motion for capital (per unit effective labor) for various values of capital stock (per unit effective labor) as follows...",
"ces_model.evaluate_k_dot(np.linspace(1.0, 10.0, 25))",
"1.4 Sub-classing the solow.Model class\nSeveral commonly used functional forms for aggregate production, including both the Cobb-Douglas and Constant Elasticity of Substitution (CES) production functions, have been sub-classed from solow.Model. For these functional forms, one only needs to specify a valid dictionary of model parameters.",
"solowpy.cobb_douglas?\n\ncobb_douglas_model = solowpy.CobbDouglasModel(params=cobb_douglas_params)\n\nsolowpy.ces?\n\nces_model = solowpy.CESModel(params=ces_params)",
"Now that you understand the basics, we can move on to finding the steady state of the Solow growth model."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
dracolytch/ml-agents
|
python/Basics.ipynb
|
apache-2.0
|
[
"Unity ML Agents\nEnvironment Basics\nThis notebook contains a walkthrough of the basic functions of the Python API for Unity ML Agents. For instructions on building a Unity environment, see here.\n1. Load dependencies",
"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom unityagents import UnityEnvironment\n\n%matplotlib inline",
"2. Set environment parameters\nBe sure to set env_name to the name of the Unity environment file you want to launch.",
"env_name = \"3DBall\" # Name of the Unity environment binary to launch\ntrain_mode = True # Whether to run the environment in training or inference mode",
"3. Start the environment\nUnityEnvironment launches and begins communication with the environment when instantiated.\nEnvironments contain brains which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python.",
"env = UnityEnvironment(file_name=env_name)\n\n# Examine environment parameters\nprint(str(env))\n\n# Set the default brain to work with\ndefault_brain = env.brain_names[0]\nbrain = env.brains[default_brain]",
"4. Examine the observation and state spaces\nWe can reset the environment to be provided with an initial set of observations and states for all the agents within the environment. In ML-Agents, states refer to a vector of variables corresponding to relevant aspects of the environment for an agent. Likewise, observations refer to a set of relevant pixel-wise visuals for an agent.",
"# Reset the environment\nenv_info = env.reset(train_mode=train_mode)[default_brain]\n\n# Examine the state space for the default brain\nprint(\"Agent state looks like: \\n{}\".format(env_info.states[0]))\n\n# Examine the observation space for the default brain\nfor observation in env_info.observations:\n print(\"Agent observations look like:\")\n if observation.shape[3] == 3:\n plt.imshow(observation[0,:,:,:])\n else:\n plt.imshow(observation[0,:,:,0])",
"5. Take random actions in the environment\nOnce we restart an environment, we can step the environment forward and provide actions to all of the agents within the environment. Here we simply choose random actions based on the action_space_type of the default brain.",
"for episode in range(10):\n env_info = env.reset(train_mode=train_mode)[default_brain]\n done = False\n episode_rewards = 0\n while not done:\n if brain.action_space_type == 'continuous':\n env_info = env.step(np.random.randn(len(env_info.agents), \n brain.action_space_size))[default_brain]\n else:\n env_info = env.step(np.random.randint(0, brain.action_space_size, \n size=(len(env_info.agents))))[default_brain]\n episode_rewards += env_info.rewards[0]\n done = env_info.local_done[0]\n print(\"Total reward this episode: {}\".format(episode_rewards))",
"6. Close the environment when finished\nWhen we are finished using an environment, we can close it with the function below.",
"env.close()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
quantumlib/Stim
|
doc/getting_started.ipynb
|
apache-2.0
|
[
"In this tutorial you'll:\n\nLearn what Stim is.\nInstall the Stim python package.\nCreate a simple circuit, and sample from it.\nAdd detector annotations to a circuit, and sample them.\nGenerate example error correction circuits.\nCombine Stim with PyMatching to correct the errors in a circuit.\nEstimate the threshold of an error correcting code.\n\nPrereqs:\nThis tutorial assumes you can read and write Python code, and have a working Python 3 environment.\nThis tutorial assumes you are comfortable with quantum stabilizer circuits (circuits with Clifford operations and Pauli measurements). For example, it assumes you know that stabilizer circuits can represent protocols like error correction and that it's possible to simulate them cheaply.\nThis tutorial assumes you are a little familiar with quantum error correcting codes. For example, it assumes you know that the surface code is a well known code and that a common task is to estimate the threshold of a code.\n1. What is Stim?\nStim is an open source tool for high performance analysis and simulation of quantum stabilizer circuits, intended to help with research into quantum error correcting codes.\n\nSee also: the paper describing Stim published in Quantum.\nSee also: a 15 minute lightning talk presenting Stim at QPL2021.\n2. Install the Stim python package.\nThe first thing to do is to install and import stim.\nThanks to the python ecosystem, this is easy!\nStim is available as a pypi package, and can be installed using pip install stim and then imported with import stim.\nJust like any other python package.\nIt may take a few minutes to install Stim the first time you do it, because Stim is a C++ python extension compiled locally for maximum performance.\nC++ has legendarily awful compile times.",
"!pip install stim\n\nimport stim",
"3. Create a simple circuit, and sample from it.\nIn Stim, circuits are instances of the stim.Circuit class. You create a new empty circuit with stim.Circuit(), and add operations to it by calling append_operation(name_of_gate, list_of_targets).\nYou can find the name of the gate you want from the stim gates reference. Most of them are what you'd expect, like \"H\" for the Hadamard gate. Targets are usually just a number indicating a qubit. There's a qubit 0, a qubit 1, etc.\nThe first circuit you'll make is a circuit that prepares a Bell pair and then measures it:",
"circuit = stim.Circuit()\n\n# First, the circuit will initialize a Bell pair.\ncircuit.append_operation(\"H\", [0])\ncircuit.append_operation(\"CNOT\", [0, 1])\n\n# Then, the circuit will measure both qubits of the Bell pair in the Z basis.\ncircuit.append_operation(\"M\", [0, 1])\n\n# Let's see the circuit's representation using stim's circuit language:\nprint(repr(circuit))",
"You can sample from the circuit using the circuit.compile_sampler() method to get a sampler object, and then calling sample on that object. For large circuits (thousands of qubits, millions of operations), it may take a few seconds to create the sampler (because it involves performing a stabilizer tableau simulation of the circuit to get a reference sample). Once the reference sample is acquired, the sampler is returned and samples can be acquired in bulk very cheapy.\nTry taking 10 shots from the circuit:",
"sampler = circuit.compile_sampler()\nprint(sampler.sample(shots=10))",
"Notice how there are ten rows (because you took ten shots) with two results per row (because there were two measurements in the circuit).\nAlso notice how the results are random from row to row, but always agree within each row.\nThat makes sense; that's what's supposed to happen when you repeatedly prepare and measure the |00> + |11> state.\n4. Add detector annotations to a circuit, and sample them.\nStim circuits can include error correction annotations.\nIn particular, you can annotate that certain sets of measurements can be used to detect errors.\nFor example, in the circuit you created above, the two measurement results should always be equal.\nYou can tell Stim you care about that by adding a DETECTOR annotation to the circuit.\nThe DETECTOR annotation will take two targets: the two measurements whose parity you are asserting should be consistent from run to run. You point at the measurements by using the stim.target_rec method (short for \"target measurement record\"). The most recent measurement is stim.target_rec(-1) (also known as rec[-1] in stim's circuit language), and the second most recent measurement is stim.target_rec(-2):",
"# Indicate the two previous measurements are supposed to consistently agree.\ncircuit.append_operation(\"DETECTOR\", [stim.target_rec(-1), stim.target_rec(-2)])\nprint(repr(circuit))",
"A slightly subtle point about detectors is that they only assert the parity is consistent.\nA detector doesn't say what the parity should be.\nYou annotate that a pair of measurements is always different in the same way that you annotate that a pair of measurements is always the same; it's the consistency that's key.\nAnyways, now that you've annotated the circuit with a detector, you can sample from the circuit's detectors instead of sampling from its measurements.\nYou do that by creating a detector sampler, using the compile_detector_sampler method, and then calling sample on it.",
"sampler = circuit.compile_detector_sampler()\nprint(sampler.sample(shots=5))",
"There are 5 rows in the results, because you took 5 shots.\nThere's one entry per row, because you put one detector in the circuit.\nNotice how the results are always 0.\nThe detector is never producing a detection event.\nThat's because there's no noise in the circuit; nothing to disturb the peace and quiet of a perfectly working machine.\nWell... time to fix that!\nYou should adjust the circuit so that it does have noise.\nStim has a variety of error channels to pick from, like single qubit depolarization (DEPOLARIZE1) and phase damping (Z_ERROR), but in this content you should use X_ERROR.\nThe X_ERROR noise channel independently applies a Pauli X error, with a given probability, to each of its targets.\nOne place where Stim is lacking, as of this writing, is the ability to make random access changes to circuits (e.g. replacing individual instructions or inserting instructions in the center of a circuit). To work around this, you can just redefine the circuit from scratch with the noise inserted using Stim's domain specific language for circuits:",
"circuit = stim.Circuit(\"\"\"\n H 0\n CX 0 1\n X_ERROR(0.2) 0 1\n M 0 1\n DETECTOR rec[-1] rec[-2]\n\"\"\")",
"Now that you've put noise before the measurements, try sampling some more detector shots and see what happens:",
"sampler = circuit.compile_detector_sampler()\nprint(sampler.sample(shots=10))",
"It's no longer all zeroes (...unless you got pretty lucky).\nThere are 1s appearing amongst the 0s.\nThe detection fraction of the circuit is how often detectors fire on average.\nGiven that an X error is being applied to each qubit with 20% probability, and the detector will fire when one of the qubits is hit (but not both), the detection fraction should be $0.8 \\cdot 0.2 \\cdot 2 = 0.32$.\nYou can estimate the detection fraction by just taking a lot of shots, and dividing by the number of shots and the number of detectors:",
"import numpy as np\nprint(np.sum(sampler.sample(shots=10**6)) / 10**6)",
"As you can see, the directly estimate value is close to the expected value $0.32$.\n5. Generate example error correction circuits.\nNow it's time for you to work with a real error correcting circuit.\nWell... a classical error correcting circuit.\nThe repetition code.\nYou could generate a repetition code circuit for yourself, but for the purposes of this tutorial it's easiest to use the example one included with Stim.\nYou can do this by calling stim.Circuit.generated with an argument of \"repetition_code:memory\".\n(You can find other valid arguments in the method's doc string, or just by passing in a bad one and looking at the exception message that comes out.)\nStim takes a few different parameters when generating circuits.\nYou have to decide how many times the stabilizers of the code are measured by specifying rounds, you have to decide the code distance by specifying distance, and you can specify what kind of noise to include using a few optional parameters.\nTo start with, you should just set before_round_data_depolarization=0.03 which will insert a DEPOLARIZE1(0.03) operation targeting every data qubit at the start of each round of measuring the stabilizers of the code.\nThis is a \"phenomenological noise model\":",
"circuit = stim.Circuit.generated(\n \"repetition_code:memory\",\n rounds=100,\n distance=9,\n before_round_data_depolarization=0.03)\nprint(repr(circuit))",
"The circuits generated by Stim include a lot of \"nice to have\" features.\nOf course it annotates the measurements to compare (as DETECTORs), but the DETECTOR instructions also include optional space and time coordinates (which have no effect but could be useful when debugging or drawing the circuit).\nThe circuit also has TICK annotations indicating divisions between layers of gates in the circuit, a REPEAT block to avoid restating the same ten operations a hundred times, and an OBSERVABLE_INCLUDE instruction identifying the measurement corresponding to the logical value preserved by the circuit.\n(Normally observables are the parity of many measurements, but the repetition code is very simple so it's just one measurement.)\n(The 0 in OBSERVABLE_INCLUDE(0) is identifying which observable value the measurement is being included in.)\nWith the circuit in hand, you can try sampling from it.\nTry sampling the measurements once, and printing out the results split up just right so that time advances from line to line:",
"sampler = circuit.compile_sampler()\none_sample = sampler.sample(shots=1)[0]\nfor k in range(0, len(one_sample), 9):\n timeslice = one_sample[k:k+9]\n print(\"\".join(\"_1\"[e] for e in timeslice))",
"See how the 1s seem to come in pairs of streaks?\nThat's because once a data qubit is flipped it stays flipped, and the measurements to its left and right permanently change parity.\nIf you sample the circuit's detectors, instead of its measurements, the streaks are replaced by spackle.\nYou get much sparser data:",
"detector_sampler = circuit.compile_detector_sampler()\none_sample = detector_sampler.sample(shots=1)[0]\nfor k in range(0, len(one_sample), 9):\n timeslice = one_sample[k:k+9]\n print(\"\".join(\"_1\"[e] for e in timeslice))",
"Notice how the 1s tend to come in pairs, except near the sides.\nNote that the logical observable is annotated to be a measurement of the leftmost data qubit.\nThe number of 1s that match to the left boundary tells you how many times that data qubit was flipped.\nIf you just had a syndrome decoder, you could use it to solve the matching problem and figure out if the leftmost data qubit was flipped and odd number of times or not...\n6. Combine Stim with PyMatching to correct the errors in a circuit.\nStim has a key feature that makes it easier to use a decoder: converting a circuit into a detector error model.\nA detector error model is just a list of all the independent error mechanisms in a circuit, as well as their symptoms (which detectors they set off) and frame changes (which logical observables they flip).\nYou can get the detector error mode for a circuit by calling circuit.detector_error_model():",
"print(repr(circuit.detector_error_model()))",
"This format is easier for decoders to consume than a raw circuit, because everything is explained in terms of observable symptoms and hidden symptoms, which is how decoders usually conceptualize of the problem space.\nTo use this error model with a decoder, you need a decoder that can consume this format.\nUnfortunately, there aren't a lot of those.\nYou will probably need to write custom glue code to configure a decoder using this format.\nYou probably don't want this tutorial to get sidetracked into the details of writing glue code, so how about just borrowing some open source Apache 2 licensed code from https://github.com/strilanc/honeycomb_threshold that glues Stim to PyMatching?\nHonestly, unless you're really interested in the details of glueing two pieces of software together, you should probably just scroll past this next block of code.\nIt's better than having to work from the raw circuit, but it's still a pain point.\nOf course, feel free to use parts of this code for glueing to your own decoder if needed.",
"!pip install pymatching\n\n##########################################################\n#################### BEGIN GLUE CODE #####################\n##########################################################\n\nimport math\nimport networkx as nx\nimport pymatching\nfrom typing import Callable, List\n\n\ndef predict_observable_errors_using_pymatching(circuit: stim.Circuit,\n det_samples: np.ndarray,\n ) -> np.ndarray:\n \"\"\"Turn detection events into predicted observable errors.\"\"\"\n error_model = circuit.detector_error_model(decompose_errors=True)\n matching_graph = detector_error_model_to_pymatching_graph(error_model)\n\n num_shots = det_samples.shape[0]\n num_obs = circuit.num_observables\n num_dets = circuit.num_detectors\n assert det_samples.shape[1] == num_dets\n\n predictions = np.zeros(shape=(num_shots, num_obs), dtype=np.bool8)\n for k in range(num_shots):\n expanded_det = np.resize(det_samples[k], num_dets + 1)\n expanded_det[-1] = 0\n predictions[k] = matching_graph.decode(expanded_det)\n return predictions\n\n\ndef detector_error_model_to_pymatching_graph(model: stim.DetectorErrorModel) -> pymatching.Matching:\n \"\"\"Convert a stim error model into a pymatching graph.\"\"\"\n g = detector_error_model_to_nx_graph(model)\n num_detectors = model.num_detectors\n num_observables = model.num_observables\n\n # Add spandrels to the graph to ensure pymatching will accept it.\n # - Make sure there's only one connected component.\n # - Make sure no detector nodes are skipped.\n # - Make sure no observable nodes are skipped.\n for k in range(num_detectors):\n g.add_node(k)\n g.add_node(num_detectors + 1)\n for k in range(num_detectors + 1):\n g.add_edge(k, num_detectors + 1, weight=9999999999)\n g.add_edge(num_detectors, num_detectors + 1, weight=9999999999, qubit_id=list(range(num_observables)))\n\n return pymatching.Matching(g)\n\n\ndef detector_error_model_to_nx_graph(model: stim.DetectorErrorModel) -> nx.Graph:\n \"\"\"Convert a stim error model into a NetworkX graph.\"\"\"\n\n g = nx.Graph()\n boundary_node = model.num_detectors\n g.add_node(boundary_node, is_boundary=True, coords=[-1, -1, -1])\n\n def handle_error(p: float, dets: List[int], frame_changes: List[int]):\n if p == 0:\n return\n if len(dets) == 0:\n # No symptoms for this error.\n # Code probably has distance 1.\n # Accept it and keep going, though of course decoding will probably perform terribly.\n return\n if len(dets) == 1:\n dets = [dets[0], boundary_node]\n if len(dets) > 2:\n raise NotImplementedError(\n f\"Error with more than 2 symptoms can't become an edge or boundary edge: {dets!r}.\")\n if g.has_edge(*dets):\n edge_data = g.get_edge_data(*dets)\n old_p = edge_data[\"error_probability\"]\n old_frame_changes = edge_data[\"qubit_id\"]\n # If frame changes differ, the code has distance 2; just keep whichever was first.\n if set(old_frame_changes) == set(frame_changes):\n p = p * (1 - old_p) + old_p * (1 - p)\n g.remove_edge(*dets)\n g.add_edge(*dets, weight=math.log((1 - p) / p), qubit_id=frame_changes, error_probability=p)\n\n def handle_detector_coords(detector: int, coords: np.ndarray):\n g.add_node(detector, coords=coords)\n\n eval_model(model, handle_error, handle_detector_coords)\n\n return g\n\n\ndef eval_model(\n model: stim.DetectorErrorModel,\n handle_error: Callable[[float, List[int], List[int]], None],\n handle_detector_coords: Callable[[int, np.ndarray], None]):\n \"\"\"Interprets the error model instructions, taking care of loops and shifts.\n\n Makes callbacks as error mechanisms are declared, and also when detector\n coordinate data is declared.\n \"\"\"\n det_offset = 0\n coords_offset = np.zeros(100, dtype=np.float64)\n\n def _helper(m: stim.DetectorErrorModel, reps: int):\n nonlocal det_offset\n nonlocal coords_offset\n for _ in range(reps):\n for instruction in m:\n if isinstance(instruction, stim.DemRepeatBlock):\n _helper(instruction.body_copy(), instruction.repeat_count)\n elif isinstance(instruction, stim.DemInstruction):\n if instruction.type == \"error\":\n dets: List[int] = []\n frames: List[int] = []\n t: stim.DemTarget\n p = instruction.args_copy()[0]\n for t in instruction.targets_copy():\n if t.is_relative_detector_id():\n dets.append(t.val + det_offset)\n elif t.is_logical_observable_id():\n frames.append(t.val)\n elif t.is_separator():\n # Treat each component of a decomposed error as an independent error.\n # (Ideally we could configure some sort of correlated analysis; oh well.)\n handle_error(p, dets, frames)\n frames = []\n dets = []\n # Handle last component.\n handle_error(p, dets, frames)\n elif instruction.type == \"shift_detectors\":\n det_offset += instruction.targets_copy()[0]\n a = np.array(instruction.args_copy())\n coords_offset[:len(a)] += a\n elif instruction.type == \"detector\":\n a = np.array(instruction.args_copy())\n for t in instruction.targets_copy():\n handle_detector_coords(t.val + det_offset, a + coords_offset[:len(a)])\n elif instruction.type == \"logical_observable\":\n pass\n else:\n raise NotImplementedError()\n else:\n raise NotImplementedError()\n _helper(model, 1)\n\n##########################################################\n##################### END GLUE CODE ######################\n##########################################################",
"A notable detail about this glue code is that it calls circuit.detector_error_model(decompose_errors=True) on the circuit you give it, instead of just circuit.detector_error_model().\nSpecifying decompose_errors=True tells Stim it has to output a graph-like error model, where errors are decomposed into pieces with exactly 2 detection events (edges) or exactly 1 detection event (boundary edges).\nThis won't work on all circuits; for example it won't work on a color code circuit.\nBut it does work on repetition codes and surface codes.\nWith your glue code in hand, you can make the decoder attempt to predict whether or not the logical observable have been flipped, given the observed detection events.\nTo do this, you need to sample the circuit's detectors with append_observables=True specified. This will sample whether or not each declared observable was flipped in addition to the usual detector samples, and put the observable flipped data after the end of the detector data.\nYou can then split the shots into the detector part and the observable part, feed the detector part to the decoder, and check whether its prediction of whether the observables were flipped matches the held back ground truth of whether the observables were flipped.\nYou can write a method for doing this to an arbitrary circuit:",
"def count_logical_errors(circuit: stim.Circuit, num_shots: int) -> int:\n shots = circuit.compile_detector_sampler().sample(num_shots, append_observables=True)\n\n detector_parts = shots[:, :circuit.num_detectors]\n actual_observable_parts = shots[:, circuit.num_detectors:]\n predicted_observable_parts = predict_observable_errors_using_pymatching(circuit, detector_parts)\n\n num_errors = 0\n for actual, predicted in zip(actual_observable_parts, predicted_observable_parts):\n if not np.array_equal(actual, predicted):\n num_errors += 1\n return num_errors\n",
"And then try it on your repetition code circuit:",
"circuit = stim.Circuit.generated(\"repetition_code:memory\", rounds=100, distance=9, before_round_data_depolarization=0.03)\nnum_shots = 1000\nnum_logical_errors = count_logical_errors(circuit, num_shots)\nprint(\"logical_error_rate at 3%:\", num_logical_errors / num_shots)",
"Contrast this smooth sailing with what you get from doing no error correction, and just assuming that the logical observable is correct:",
"num_naive_logical_errors = np.sum(circuit.compile_detector_sampler().sample(shots=num_shots, append_observables=True)[:, -1])\nprint(\"naive logical error rate at 3%:\", num_naive_logical_errors / num_shots)",
"Wow, the correction is working a whole lot better than the naive approach.\nActually, with the physical error rate so low, the correction is almost working too well.\nThere's a decent chance you didn't see any logical errors at all!\nIt's not very interesting for things to always work.\nYou should try bumping the physical error rate up from 3% to 13%:",
"circuit = stim.Circuit.generated(\n \"repetition_code:memory\",\n rounds=100,\n distance=9,\n before_round_data_depolarization=0.13)\nnum_shots = 1000\nnum_logical_errors = count_logical_errors(circuit, num_shots)\nprint(\"logical_error_rate at 13%:\", num_logical_errors / num_shots)",
"Ah. The existence of failure. Perfect.\n7. Estimate the threshold of an error correcting code.\nEstimating the threshold of an error correcting code really just comes down to trying a bunch of physical error rates and code distances.\nYou plot out the logical error rate vs physical error rate curve for each distance, and see where the curves cross.\nThat's where the physical error rate gets bad enough that increasing the distance starts to make the logical error rate worse, instead of better.\nThat's the threshold physical error rate.\nDecoding so many cases might take a minute to run, but once it's done you can find the threshold just by looking for where the lines touch.",
"import matplotlib.pyplot as plt\n\nnum_shots = 1000\nfor d in [3, 5, 7]:\n xs = []\n ys = []\n for noise in [0.1, 0.2, 0.3, 0.4, 0.5]:\n circuit = stim.Circuit.generated(\n \"repetition_code:memory\",\n rounds=d * 3,\n distance=d,\n before_round_data_depolarization=noise)\n xs.append(noise)\n ys.append(count_logical_errors(circuit, num_shots) / num_shots)\n plt.plot(xs, ys, label=\"d=\" + str(d))\nplt.semilogy()\nplt.xlabel(\"physical error rate\")\nplt.ylabel(\"logical error rate\")\nplt.legend()\nplt.show()",
"From the results here you can see that the repetition code has an amazingly high threshold! Somewhere around 30%-40%. Well... it's not quite so amazing when you remember that you're using a phenomenological noise model (instead of a circuit level noise model) and also that you're inserting depolarizing errors instead of bit flip errors (the repetition code is immune to Z errors, and when a depolarizing error occurs it's a Z error one third of the time).\nStill, you can see that it's not so hard to try a few different cases and look for where lines cross.\nNow that you know the basic workflow, you should do another threshold estimate.\nA proper quantum threshold estimate, using a quantum error correcting code instead of the classical repetition code code.\nOh, and using circuit level noise instead of phenomenological noise!\nYou might think that these changes would make things subsantially harder, but actually the workflow is completely identical!\nOnly the stim.Circuits being used are different.\nNow, if you had to generate the circuit for yourself then there would be a lot of work to do writing code to make the circuit.\nBut you can lean on Stim's example circuits some more, because Stim can make simple surface code circuits:",
"print(stim.Circuit.generated(\n \"surface_code:unrotated_memory_z\",\n rounds=100,\n distance=3,\n after_clifford_depolarization=0.001,\n after_reset_flip_probability=0.001,\n before_measure_flip_probability=0.001,\n before_round_data_depolarization=0.001))",
"You're specifying several more error parameters now, in order to get circuit level noise instead of phenomenological noise.\nBecause of that, and because this is a quantum code instead of a classical code, the threshold is going to be noticeably lower.\nYou're also going to start noticing some performance issues.\nStim isn't struggling to sample from these circuits, but PyMatching uses an algorithm that doesn't have ideal asymptotic scaling for these scenarios.\nDecoding is starting to become a serious bottleneck.\nAnyways, the following code might take a couple minutes to run, but once it spits out a plot you can once again spot the threshold by looking for where the lines touch:",
"num_shots = 1000\nfor d in [3, 5, 7]:\n xs = []\n ys = []\n for noise in [0.0025, 0.0050, 0.0075, 0.0100]:\n circuit = stim.Circuit.generated(\n \"surface_code:unrotated_memory_z\",\n rounds=d * 3,\n distance=d,\n after_clifford_depolarization=noise,\n after_reset_flip_probability=noise,\n before_measure_flip_probability=noise,\n before_round_data_depolarization=noise)\n xs.append(noise)\n ys.append(count_logical_errors(circuit, num_shots) / num_shots)\n plt.plot(xs, ys, label=\"d=\" + str(d))\nplt.semilogy()\nplt.xlabel(\"physical error rate\")\nplt.ylabel(\"logical error rate\")\nplt.legend()\nplt.show()",
"You can see from the plot that the threshold of the surface code is somewhere between 0.5% and 1%.\nAt least, that's the threshold for this specific type of circuit level noise and when using a minimum weight perfect matching decoder (PyMatching).\nEnd of Tutorial\nCongratulations for making it this far! Historically, estimating the threshold of a quantum error correcting code would have taken weeks or months (because you had to write the simulator and the matching graph generation and the decoder).\nBy leveraging open source tools, you just did it in a single sitting.\nNicely done!\nIf you ever run into any problems using Stim, or find yourself confused or misled about how to do something with Stim, please open an issue at Stim's Github repository or ask a question tagged Stim on the quantum computing stack exchange.\nThat kind of feedback is very helpful and appreciated.\n\nIf you're looking for an example of a research paper that used Stim to estimate the threshold of a new quantum code, see \"A Fault-Tolerant Honeycomb Memory\" or the video showing the creation of the initial estimate that eventually led to that paper."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
lukasmerten/CRPropa3
|
doc/pages/example_notebooks/trajectories/trajectories.v4.ipynb
|
gpl-3.0
|
[
"3D trajectories in a turbulent field\nThe following simulation tracks a single UHE nucleus and its secondary nucleons/nuclei through a turbulent magnetic field.\nFirst we create a random realization of a turbulent field with a Kolmogorov power spectrum on 60-800 kpc lengthscales and an RMS field strength of 8 nG.\nThe field is stored on a $256^3$ grid with 30 kpc grid spacing, and thus has an extent of $(256 \\cdot 30 \\rm{kpc})^3$.\nThe field is by default periodically repeated in space to cover an arbitrary volume.\nThe chosen grid size consumes only very little memory. For practical purposes a larger grid is advised in order to represent more variations of turbulent modes, provide a larger turbulent range, or a higher resolution.",
"from crpropa import *\n\nrandomSeed = 42\nturbSpectrum = SimpleTurbulenceSpectrum(Brms=8*nG, lMin = 60*kpc, lMax=800*kpc, sIndex=5./3.)\ngridprops = GridProperties(Vector3d(0), 256, 30*kpc)\nBField = SimpleGridTurbulence(turbSpectrum, gridprops, randomSeed)\n\n# print some properties of our field\nprint('Lc = {:.1f} kpc'.format(BField.getCorrelationLength() / kpc)) # correlation length\nprint('sqrt(<B^2>) = {:.1f} nG'.format(BField.getBrms() / nG)) # RMS\nprint('<|B|> = {:.1f} nG'.format(BField.getMeanFieldStrength() / nG)) # mean\nprint('B(10 Mpc, 0, 0) =', BField.getField(Vector3d(10,0,0) * Mpc) / nG, 'nG')",
"Saving and loading fields\nIn addition to creating random turbulent fields, we can also load and save custom magnetic field grids.\nAs input and output we currently support binary files in single precision and ASCII files.",
"# save the field\n# format: (Bx, By, Bz)(x, y, z) with z changing the quickest.\n#dumpGrid(BField.getGrid(), 'myfield.dat') # binary, single precision\n#dumpGridToTxt(Bfield.getGrid(), 'myfield.txt') # ASCII\n# load your own field\n#vgrid=Grid3f(gridprops)\n#loadGrid(vgrid, 'myfield.dat')\n#loadGridFromTxt(vgrid, 'myfield.txt')",
"Running the simulation\nNow that we have our magnetic field ready we can fire up our simulation and hope that something visually interesting is going to happen.",
"sim = ModuleList()\nsim.add(PropagationCK(BField))\nsim.add(PhotoPionProduction(CMB()))\nsim.add(PhotoPionProduction(IRB_Kneiske04()))\nsim.add(PhotoDisintegration(CMB()))\nsim.add(PhotoDisintegration(IRB_Kneiske04()))\nsim.add(ElectronPairProduction(CMB()))\nsim.add(ElectronPairProduction(IRB_Kneiske04()))\nsim.add(NuclearDecay())\nsim.add(MaximumTrajectoryLength(25 * Mpc))\noutput = TextOutput('trajectory.txt', Output.Trajectory3D) \nsim.add(output)\n\nx = Vector3d(0,0,0) # position\np = Vector3d(1,1,0) # direction\nc = Candidate(nucleusId(16, 8), 100 * EeV, x, p)\n\nsim.run(c, True)",
"(Optional) Plotting\nWe plot the trajectory of our oxygen-16 nucleus. To distinguish between secondary nuclei the following colors are used: protons are blue, alpha particles are green, everthing heavier is red.",
"%matplotlib inline\nfrom pylab import *\nfrom mpl_toolkits.mplot3d import axes3d\n\noutput.close()\ndata = genfromtxt('trajectory.txt', names=True)\n\n# trajectory points\nx, y, z = data['X'], data['Y'], data['Z']\n\n# translate particle ID to charge number\nZ = [chargeNumber(int(Id)) for Id in data['ID'].astype(int)]\n\n# translate the charge number to color and size\n# --> protons are blue, Helium is green, everthing else is red\ncolorDict = {0:'k', 1:'b', 2:'g', 3:'r', 4:'r', 5:'r', 6:'r', 7:'r', 8:'r'}\nsizeDict = {0:4, 1:4, 2:8, 3:10, 4:10, 5:10, 6:10, 7:10, 8:10}\ncolors = [colorDict[z] for z in Z]\nsizes = [sizeDict[z] for z in Z]\n\nfig = plt.figure(figsize=(12, 5))#plt.figaspect(0.5))\nax = fig.gca(projection='3d')# , aspect='equal'\n\nax.scatter(x,y,z+6, 'o', s=sizes, color=colors)\n\nax.set_xlabel('x / Mpc', fontsize=18)\nax.set_ylabel('y / Mpc', fontsize=18)\nax.set_zlabel('z / Mpc', fontsize=18)\nax.set_xlim((-1, 16))\nax.set_ylim((-1, 16))\nax.set_zlim((-1, 16))\nax.xaxis.set_ticks((0, 5, 10, 15))\nax.yaxis.set_ticks((0, 5, 10, 15))\nax.zaxis.set_ticks((0, 5, 10, 15))\n\nshow()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
dhuppenkothen/BayesPSD
|
docs/Demo.ipynb
|
bsd-2-clause
|
[
"How To Search for QPOs with BayesPSD\nThis notebook is a demonstration for how to use the code in this package\nto search for quasi-periodic oscillations (QPOs) in X-ray data of bursts.\nThis code requires\n* python 2.7 or later (not really tested with python 3)\n* numpy\n* scipy\n* matplotlib\nRecommended\n* emcee (https://github.com/dfm/emcee)\n* acor (https://github.com/dfm/acor)\n* statsmodels (https://github.com/statsmodels/statsmodels; only for using crazy minimization algorithms that don't return the inverse covariance)\n* seaborn for making pretty plots (http://stanford.edu/~mwaskom/software/seaborn/)\nBasics\nThe module contains both the code to do Bayesian inference on bursty time series, as well \nas some basic class definitions that are useful for time series analysis in general, and for\nFermi/GBM data in particular.\nLet's start with a simple time series in a data file. This is actually a magnetar bursts from \na source called SGR J1550-5418, but that's not important right now. \nI've made things easy for you here: the data are individual photon events and energies only from \nthe part of the observation where the burst was observed. We'll have a look at more complicated \ndata and how to automate the steps outlined below later.\nFor now, let's import some code and load the time series.",
"%matplotlib inline\nimport matplotlib.pyplot as plt\n\n## this is just to make plots prettier\n## comment out if you don't have seaborn\nimport seaborn as sns \nsns.set()\n########################################\n\nimport numpy as np\n",
"NOTE: You need to have the directory where you saved the BayesPSD code in your PYTHONPATH variable for the following to work! If you haven't set your variable externally, you can do it in the following way:",
"import sys\nsys.path.append(\"/Users/danielahuppenkothen/work/repositories/BayesPSD\")",
"Be aware that you need to replace the directory structure with your own, and that you need to add the path to the directory in which the BayesPSD folder is located. In my case, that's in my repositories folder in my work directory on my home folder, but it will be different for you!\nAlso, when importing below, bayespsd needs to be written exactly as the name of the folder (it's case sensitive!). \nNow we can import functions and classes from that package:",
"from BayesPSD import Lightcurve, PowerSpectrum",
"The data are saved in a simple text file:",
"## the directory where we've stored the data\ndatadir = \"../data/\" \n\ndata = np.loadtxt(datadir+\"090122283_+071.87300_eventfile.dat\")\nprint(\"Data shape : \" + str(data.shape))",
"The result is a numpy-array with 130001 rows and 2 columns. Each row is a photon, the first column contains the photon arrival times, the second column the energies.\nFor now, let's not care about the energies (don't worry, we'll get back to that later!).\nLet's make a light curve and plot it:",
"timestep = 0.001 ##the time resolution for the light curve\nlc = Lightcurve(data[:,0], timestep=timestep)\n\nplt.figure()\nplt.plot(lc.time, lc.counts)",
"There you go, it's a burst. The class Lightcurve has several useful attributes. Have a look at the code if you're interested. It works both with time tagges arrival times (in which case it takes a value timestep for the output time resolution) or with counts, in which case you should use the keyword times for time bins and counts for the counts per bin.\nThe next step is to make a periodogram. This is easy if we already have a Lightcurve object.\nNote that we'll use loglog for the figure rather than plot, because the spectrum is better plotted on a log-log scale.",
"ps = PowerSpectrum(lc, norm=\"leahy\")\n\nplt.loglog(ps.freq[1:], ps.ps[1:], lw=2, linestyle=\"steps-mid\")",
"The PowerSpectrum class comes with three normalizations built-in: leahy for the Leahy normalization, rms for a rms-normalized periodogram and variance for a periodogram that is normalized to the total variance.\nFor most purposes here, the Leahy normalization (default) is probably what you want, but the other options are there if you want them.\nML/MAP Fitting\nSo far, we haven't actually done any analysis. Let's first talk about models.\nAll parametric models are saved in the model parametricmodels.py.",
"from BayesPSD import pl, bpl, const, qpo",
"We can now, for example, fit one of these models to the data.\nThis kind of fitting is implemented in the class PerMaxLike, which, despite its name\nactually usually does maximum-a-posteriori estimates rather than maximum likelihood estimates.",
"from BayesPSD import PerMaxLike\n\npsfit = PerMaxLike(ps, obs=True, fitmethod=\"bfgs\")",
"PerMaxLike takes a PowerSpectrum object. The variable obs controls whether output plots and logs are produced (set True if you want those!) and fitmethod sets one of the optimization algorithms specified in scipy.optimize.\nMy recommendation is to set to bfgs unless you have a good reason not to.\nIn order to actually fit a model to the periodogram, we'll need to specify what model, and will also have to set starting guesses for the parameters. Note that the number of parameters. Look at the function definitions for details about the different models. In the case below, we'll use a simple power law model, which takes the power law index, the log(amplitude) and log(background).\nThe actual fitting is implemented in method mlest. For details on all parameters this method takes, see the documentation. Below are the most important ones. Again, we can set whether the code should produce plots and be verbose, and we can set whether the periodogram we use is the simple Fourier transform of a light curve (m=1) or whether it is the average of several periodograms (or frequency bins). \nIf map is True, the code will produce the maximum-a-posteriori estimate, otherwise it'll return a Maximum Likelihood estimate.",
"## starting parameters\npars = [2,10,np.log(2.0)]\n\nfitparams = psfit.mlest(pl, pars, obs=True, m=1, map=True)",
"The fitting routine returns a dictionary with lots of interesting and useful information. Let's have a look at the dictionary keys:",
"print(fitparams.keys())",
"Here are the most interesting ones:",
"print(\"The best-fit parameters are \" + str(fitparams[\"popt\"]))\nprint(\"The covariance matrix for the parameters is: \" + str(fitparams[\"cov\"]))\nprint(\"The MAP estimate (or Maximum Likelihood estimate) is %.3f\"%fitparams[\"result\"])\nprint(\"The deviance (-2*log(maximum likelihood)) is %.3f\"%fitparams[\"deviance\"])\nprint(\"The Akaike Information Criterion is %.3f.\"%fitparams[\"aic\"])\nprint(\"The Bayesian Information Criterion is %.3f.\"%fitparams[\"bic\"])",
"A side note: likelihoods, priors and posteriors are implemented in Posterior and its subclasses. \nCurrently (fairly uninformative) priors are hard-coded. If you need different priors, my suggestion is to fork the repository and implement them in a separate branch. Or you could subclass Posterior to make your own.\nBayesian QPO Detection\nSo now comes the fun bit: we can do the actual QPO search! Most of that is implemented in the class Bayes. Its constructor takes a PowerSpectrum object and various parameters:",
"from BayesPSD import Bayes\n\nbb = Bayes(ps, namestr='demo', plot=True, m=1)",
"The variable namestr allows you do set a string identifier for all output plots and text files. This is especially useful if you run many bursts and need to save each in its own separate file.\nThe variable plot controls whether the code produces output plots for diagnostic purposes. This is usually a useful feature to leave on. \nFinally, the periodogram we just produced above is a simple Fourier transform of a single light curve. However, in some applications, you might want to average periodograms or frequency bins, in which case the statistical distributions used in the likelihood need to change. Set m to the number of periodograms or frequency bins averaged.\nThere will be two steps: First, we need to make some statement about what kind of model to use for the broadband noise (that annoying lower-law-shaped component in the plot above). In the second step we'll use that broadband noise model to infer the presence of a QPO.\nNote that unless you care in detail whether you are using the parsimonious model, you can skip step (1) and just use a more complex model. We'll demonstrate the functionality here anyway.",
"## the two noise models\nmodel1 = pl\nmodel2 = bpl\n\n## input parameters for both\npar1 = [1.,4.,np.log(2.0)]\npar2 = [1.,3.,2.,3., np.log(2.0)]\n\n## parameters for the MCMC run\nnchain = 200 ## number of emcee walkers\nniter = 200 ## number of iterations per chain\nnsim = 100 ## number of simulations to run\n\npsfit, fakeper, summary = bb.choose_noise_model(model1, par1,\n model2, par2, \n fitmethod=\"bfgs\",\n nchain = nchain, niter = niter,\n nsim = nsim, writefile=True)\n",
"Running this code will print a lot of diagnostics on the screen (and also to a log file): the results of the MAP fitting for both models, mean, standard deviations and quantiles for the three parameters and more.\nIt also returns a bunch of objects useful for further analysis:\n* psfit contains a PerMaxLike object. In its attributes plfit and bplfit it contains summary dictionaries for the power law and bent power law models, respectively, with results from the initial MAP fit on the data, as we've created when we did the fitting by hand above\n* fakeper contains the list of all fake periodograms created during the simulation stage\n* summary is a dictionary with the results printed at the very end of the analysis.\nYou should also have a couple of plots, all starting with whatever string you gave it for variable namestr above (in my case \"demo\"):\n* demo_ps_fit.png shows the periodogram and the MAP fits for the two broadband noise models\n* demo_scatter.png shows a triangle plot of the posterior distributions of the parameters for model 1. This can quite useful for diagnosing how well behaved the posterior distribution is, and whether there are correlations between parameters\n* demo_quantiles.png shows the 80% quantiles for all parameters and all Markov chains/walkers. This plot is more useful for Metropolis-Hastings than it is for emcee.\n* demo_rhat.png plots $\\hat{R}$ for all parameters in model 1. $\\hat{R}$ is another quantity for diagnosing convergence. It compares the variance within Markov chains to the variance between Markov chains. In general, well-mixed chains will have $\\hat{R} \\approx 1$. If it's much larger than 1.2, I'd start worrying a bit (and perhaps either increase the number of chains (in emcee) or the number of samples (in both MH and emcee). \n* demo_lrt.png shows a histogram of the posterior distribution of the likelihood ratios constructed from model 1, together with the observed value.\nIn the very end, the code will print to the screen a bunch of posterior predictive p-values; the most important one here is the one for the likelihood ratio. What we've done in this part of the analysis is essentially this:\n1. compute MAP estimates for both models chosen to compare for the data\n2. compute the likelihood ratio of both models for the data\n3. pick parameter sets from the posterior PDF for model 1 via MCMC\n4. from nsim randomly picked parameter sets, simulate a periodogram for each\n5. fit all nsim fake periodograms with both models, compute the likelihood ratio for each\n6. build a posterior distribution for the likelihood ratio of both models under the null hypothesis that model 1 is actually representative of the data\n7. compute how many samples in the distribution lie above the value computed for the data and divide by the total number of samples for the p-value.\nThis number essentially tells you what likelihood ratio would be expected if the data were generated from model 1. \nIf the observed likelihood ratio is an outlier with respect to the derived distribution, this could be taken as an indicator that the data are unlikely to be generated. However, it is explicitly not an indicator that the data were instead generated from model 2. Strictly speaking, this conclusion is not supported by the test we've done, although we will use it in a way that might seem to imply it. \nThere could be other reasons for why the likelihood ratio we have observed is an outlier with respect to the distributions derived from the posterior sample: we could have simply picked two models that don't particularly describe the data very well, so that neither is an appropriate model for the data. Or perhaps our statistical distributions aren't quite what we expected them to be (which can be the case for dead time), which can also have an effect. \nIn conclusion, be wary of any results you derive from the analysis above. Model comparison is always tricky, but for QPO detection, having a model that is a reasonable approximation of the broadband shape of the periodogram is good enough. So you can rightfully skip this step and just use the more complex model (usually a bent power law) and get away with it.\nNow that we've got that out of the way, let's do the actual QPO fitting. There's a method for that, too!",
"noise_model = bpl\npar = [1,3,2,3,np.log(2.0)]\nnchain = 200\nniter = 1000\nnsim = 1000\n\nresults = bb.find_periodicity(noise_model, par, \n nchain=nchain, niter=niter, nsim=nsim)",
"Much like the choose_noise_model method above, this code prints a bunch of useful information to the screen (and a log file) and makes some figures. \nMost importantly, it prints a bunch of blocks that start with \"The posterior p-value for the maximum residual power [...]\". Each of these blocks details the results for the binned periodogram at the specified frequency. \nThe rationale behind this is that QPOs can be narrow or broad: a single QPO might be spread out over several frequency bins, or it might be so coherent it is mostly concentrated in one bin (although even then, sampling will usually cause it to be spread over two adjacent bins). So in searching for QPOs in various frequency bins, we do not risk missing signals that may be broader than the native frequency resolution. \nOf course, the fact that we've searched more than one periodgram needs to be taken into account when computing the number of trials: while strictly speaking, the binned periodograms are not statistically independent of each other, the conservative assumption to make is that they are, and multiply the number of trials by 7 (the number of frequency bins searched).\nNote that after that initial statement, which includes the frequency bin width as well as the posterior p-value (uncorrected for the number of trials in general, including the frequency binning, but it does take into account the fact that we searched many frequencies per periodogram), it also prints $T_{R,j} = \\max_j{2I_j/S_j}$, where $I_j$ is the power at frequency $j$ and $S_j$ is the model power spectrum at frequency $j$. \nFinally, it also prints the upper limit (actually, strictly speaking we show the sensitivity) the fractional rms amplitude that we could have detected at a few useful frequencies. Note that this sensitivity depends on frequency, because the broadband noise depends on frequency and changes the sensitivity at various frequencies. \nAgain, this method produces two different plots:\n* demo_scatter.png, which is the same plot as produced during the model comparison step, but for whatever model was used for finding QPOs\n* demo_maxpow.png shows the posterior distribution for four different frequency bins. Actually, what it shows are the data at the native time resolution as well as for three different Wiener filter smoothed periodograms (which I thought might be useful at some point, but don't really use in practice). \nUsually, when searching for QPOs, I will do the search with nsim=1000 and derive p-values up to $10^{-3}$ to conserve computation. Then I will pick all light curves that have a p-value (corrected for number of trials) of $<10^{-2}$ in at least two or more frequency bins, and re-run those with however many simulations are required to be confident the detection is real.\nFor example, if I had 250 bursts, each of which I decided to run once (for the full light curve), and decided that I'll report any signal that has a posterior predictive p-value, corrected for the number of trials, of $10^{-2}$, then I would have to re-run those bursts that have candidate signals with at least $\\frac{10^{-2}}{250 \\times 7 \\times 100}$ simulations, $250$ for the number of bursts, $7$ for the number of frequency bins, and then another factor of $100$ to make sure I have enough simulations to actually trace out the tail of the distribution sufficiently. This can be a very large number, in which case memory can become an issue. If it does, move to a bigger computer, or let me know!\nRunning Several Bursts at Once\nThe code above will work well for the occasional burst, but for large data sets, you'd want to automate it. Of course, you could do that yourself, but for various useful corner cases, there's some code set up that will make things simple.\nEspecially for Fermi/GBM data, the following code will be useful. \nBelow will be two cases:\n1. Data that comes out of my Fermi/GBM processing pipeline\n2. Data that is stored in an ascii text file (the file format either needs to match the one below, or you'll need to change the code to something that matches your file name structure). \nAt the heart of this is a set of classes called Burst and GBMBurst that define useful methods for bursts (and can easily be extended). In particular, class Burst has a method that runs the Bayesian QPO search automatically.\nWhat you're going to need for the following analysis are\n1. the data files with the TTE data; this must contain in the first column photon arrival times, in the second (optionally) photon energies or channels\n2. a file that has at least three columns:\n * the identifier for the observations (e.g. the Fermi/GBM trigger ID)\n * the burst start times in the same format as the TTE photon arrival times (seconds since trigger works pretty well)\n * the burst duration in seconds \nUsing the Burst class to simplify things\nFor a single light curve, like the one used above, we can make a GBMBurst object as done below. When creating this object, it will automatically create a light curve and periodogram up to whatever Nyquist frequency is specified. It is also possible to specify the normalization of the periodogram, the fluence of the burst and the peak energy for various purposes.",
"from BayesPSD import GBMBurst\n\n## load data\ndata = np.loadtxt('../data/090122283_+071.87300_eventfile.dat')\ntimes = data[:,0] ## photon arrival times\nevents = data[:,1] ## photon energies\n\n## we can get ObsID and start time from the file name in this case\n## for others, we might need to read it from a file\n\n#split filename into components and get ObsID and start time\nfsplit = \"090122283_+071.87300_eventfile.dat\".split(\"_\")\nprint(fsplit)\nbid = fsplit[0] ##ObsID\nbst = np.float(fsplit[1]) ## burst start time\n\n## let's pretend we know the burst duration\nblen = 0.1\n\n## we're going to search between 8 and 200 keV\nenergies = [8.0, 200.0]\n\n## We want to search up to 4096 Hz:\nfnyquist = 4096.\n\n## How much of the burst duration do we want to \n## add on either side of the burst?\naddfrac = 0.2\n\n## create GBMBurst object; note the confusing syntax:\n## - energies contains energy ranges to use\n## - events actually contains the list of photon energies\nburst = GBMBurst(bid=bid, bstart=bst, blength=blen,\n energies=energies, photons=times, events=events, \n instrument=\"gbm\", fnyquist=fnyquist, addfrac=addfrac)\n",
"Note that for convenience, the code adds 20% of the burst duration on either side of the light curve, to make sure the light curve goes back to the background on either side (anything can cause funny effects in the Fourier transform!). \nWe can now look at the light curve:",
"lc = burst.lc\n\nplt.figure()\nplt.plot(lc.time, lc.counts)",
"Or the periodogram:",
"ps = burst.ps\n\nplt.figure()\nplt.loglog(ps.freq[1:], ps.ps[1:], lw=2, linestyle=\"steps-mid\")",
"Or we can run the entire Bayesian QPO search:",
"namestr = \"%s_%.3f_test\"%(bid, bst) ## a string identifier for output files\nnchain = 500 ## number of emcee walkers\nniter = 100 ## number of iterations in the Markov chain\nnsim = 100 ## number of simulations\n\nm = 1 ## periodogram not averaged\nfitmethod = \"bfgs\" ## use BFGS for optimization\n\nburst.bayesian_analysis(namestr = namestr,\n nchain = nchain,\n niter = niter,\n nsim = nsim,\n m = 1, fitmethod = fitmethod)\n\n",
"And then it goes through the entire analysis above.\nRunning Many Bursts\nI've provided you with two files:\n* sgr1550_burstdata.dat has some burst IDs, start times and durations as specified above\n* 090122037a_tte_combined.dat contains the photon arrival times and energies needed to run the bursts in the file above.\nThe following defines some (simple) code that will read in sgr1550_burstdata.dat.",
"def read_burstdata(filename, datadir=\"./\"):\n \"\"\"\n Run Bayesian QPO search on all bursts in file filename. \n \n Parameters\n ----------\n filename: string\n Name of a file with minimal burst data. Needs to have columns:\n 1. ObsID\n 2. MET trigger time\n 3. Seconds since trigger\n 4. Burst duration in seconds\n Note that this is the way my file is currently set up. You can change \n this by changing the indices of the columns read out below.\n \n datadir: string\n Directory where the data (including the file in filename) is located.\n \"\"\"\n \n ## read in data\n ## type needs to be string, otherwise code fails on ObsID column, \n ## which doesn't purely consist of numbers\n data = np.loadtxt(datadir+filename, dtype=np.string_)\n \n ## ObsIDs are in first column, need to remain string\n obsids = data[:,0]\n \n ## trigger time is in second column, should be float\n trigtime = data[:,1].astype(\"float64\")\n \n ## start time in seconds since trigger is in third column,\n ## should be float\n bstart = data[:,2].astype(\"float64\")\n \n ## burst duration in seconds is in fourth column,\n ## should be float\n blength = data[:,3].astype(\"float64\")\n \n \n return obsids, trigtime, bstart, blength\n ",
"Now run the function above on the example file:",
"## now run the function above on the example file\nobsids, trigtime, bstart, blength = read_burstdata(\"sgr1550_burstdata.dat\", datadir=\"../data/\")",
"Next, we'll need to load the data, snip out the parts of the light curve that have bursts and make a GBMBurst object, so that we can then easily run the QPO search. \nWe'll loop over the ObsIDs first, because if there is more than one burst in a single observation, it makes no sense reading in the data several times!\nThen we'll find all bursts that have this ObsIDs, and for those bursts, we make GBMBurst objects with the data.",
"## empty list to store all burst objects in\nallbursts = []\n\n## which energy range do we want to run over?\nenergies = [8.,200.]\n\n## what's the Nyquist frequency supposed to be?\n## This depends on the time resolution of your instrument\n## and the frequencies where you expect QPOs to appear\nfnyquist = 4096.\n\n## get the unique set of ObsIDs\nobsid_set = np.unique(obsids)\n\n## loop over all ObsIDs\nfor o in obsid_set:\n ## this filename structure should reflect what your data files look like\n ## mine all look like ObsID_tte_combined.dat\n ## and contain TTE data (seconds since trigger) and photon energies\n datafile = datadir+\"%s_tte_combined.dat\"%o\n data = np.loadtxt(datafile)\n times = data[:,0]\n events = data[:,1]\n \n ## find all bursts in this observation\n bst = bstart[obsids == o]\n blen = blength[obsids == o]\n ttrig = trigtime[obsids == o]\n print(len(bst))\n \n ## loop over all bursts\n for s,l in zip(bst, blen):\n burst = GBMBurst(bid=o, bstart=s, blength=l,\n energies=energies, photons=times, events=events, \n instrument=\"gbm\", fnyquist=fnyquist)\n\n allbursts.append(burst)\n",
"Now that we have all bursts in a list, running the Bayesian QPO search is easy:",
"\nnchain = 500 ## number of chains\nniter = 200 ## number of iterations per chain\nnsim = 100 ## number of simulations, small to make it run fast for demo purposes\nfitmethod = \"bfgs\" ## scipy.optimize minimization algorithm\n\n## we'll just run 1 burst for brevity; delete [:1] if you want to run \n## on all bursts\nfor burst in allbursts[:1]:\n ## identifier for the output\n namestr = \"%s_%.3f\"%(burst.bid, burst.blen)\n \n ## run Bayesian analysis\n burst.bayesian_analysis(namestr = namestr,\n nchain = nchain,\n niter = niter,\n nsim = nsim,\n m = 1, fitmethod = fitmethod)\n\n\n",
"And we're done with the basic QPO search for burst light curves!\nFor convenience, I've made a script that takes the code below and allows you to run it directly from the command line:\n`shell> python run_bursts.py -f \"sgr1550_burstdata.dat\" -d \"../data/\"`"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
google/applied-machine-learning-intensive
|
content/06_other_models/04_knn/colab.ipynb
|
apache-2.0
|
[
"<a href=\"https://colab.research.google.com/github/google/applied-machine-learning-intensive/blob/master/content/06_other_models/04_knn/colab.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\nCopyright 2020 Google LLC.",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"K-Nearest-Neighbors (KNN)\nThe k-nearest neighbors (KNN) algorithm is a simple concept: define some distance metric between the items in your dataset, and find the K closest items. You can then use those items to predict some property of a test item. This prediction is achieved by having them somehow \"vote\" on it.\nKNN for Classification\nIn this example we will use KNN to predict whether or not a person will be diagnosed with diabetes. The dataset is the Pima Indians Diabetes Database.\nUpload your kaggle.json file and run the code below.",
"! chmod 600 kaggle.json && (ls ~/.kaggle 2>/dev/null || mkdir ~/.kaggle) && mv kaggle.json ~/.kaggle/ && echo 'Done'\n! kaggle datasets download uciml/pima-indians-diabetes-database\n! ls",
"Unzip the dataset.",
"! unzip pima-indians-diabetes-database.zip",
"And then load the dataset into a DataFrame.",
"import pandas as pd \n\ndiabetes = pd.read_csv('diabetes.csv')\ndiabetes.sample(n=10)",
"Take a quick look at the data to see how many rows and columns we are dealing with.",
"diabetes.describe()",
"Our features are:\n- Pregnancies\n- Glucose\n- BloodPressure\n- SkinThickness\n- Insulin\n- BMI\n- DiabetesPedigreeFunction\n- Age\nOur target is Outcome, which is currently encoded with a 1 for a positive diagnosis and 0 for a negative diagnosis.",
"print(diabetes.groupby('Outcome').size())",
"Notice there are several zeros in the feature columns (check the min values). These are likely cases where the data simply wasn't collected or stored properly. (For example, a blood pressure of 0 does not make sense.) We need to clean these up or they will have an incorrect effect on the outcome of our KNN.",
"import numpy as np\nno_zero = ['Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI']\n\nfor column in no_zero:\n diabetes[column] = diabetes[column].replace(0, np.NaN)\n mean = int(diabetes[column].mean(skipna=True))\n diabetes[column] = diabetes[column].replace(np.NaN, mean)\n\ndiabetes.describe()",
"We create training and testing sets (20% for testing), remembering to separate 'Outcome' as our target value.",
"from sklearn.model_selection import train_test_split\n\nX = diabetes.iloc[:,0:8]\ny = diabetes.iloc[:,8]\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)",
"Now we scale our features using StandardScaler.",
"from sklearn.preprocessing import StandardScaler\n\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test= sc_X.transform(X_test)",
"Finally, we use the scikit-learn KNN model.",
"from sklearn.neighbors import KNeighborsClassifier\n\nn_neighbors = 14\n\nKNN = KNeighborsClassifier(n_neighbors=n_neighbors, p=2, metric='euclidean')\nKNN.fit(X_train, y_train)\n\ny_pred = KNN.predict(X_test)",
"We now evaluate our model in terms of the confusion matrix, F1 score, and accuracy.",
"from sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import accuracy_score\n\ncm = confusion_matrix(y_test,y_pred)\nf1 = f1_score(y_test,y_pred)\naccuracy = accuracy_score(y_test,y_pred)\n\n\nprint('The confusion matrix is', cm)\nprint('The F1 score is', f1)\nprint('The accuracy score is', accuracy)",
"K-Nearest-Neighbors for Regression\nWe can also use KNN for regression. In this example we'll actually build the model from scratch in order to demonstrate its simplicity.\nFor our model we'll use MovieLens data. MovieLens data is available in relation to the following paper:\ntext\nF. Maxwell Harper and Joseph A. Konstan. 2015.\nThe MovieLens Datasets: History and Context.\nACM Transactions on Interactive Intelligent Systems (TiiS) 5, 4: 19:1–19:19.\nhttps://doi.org/10.1145/2827872",
"! wget http://files.grouplens.org/datasets/movielens/ml-latest-small.zip\n! unzip ml-latest-small.zip",
"We'll use KNN to guess the rating of a movie by looking at the 10 movies that are closest to it in terms of genres and popularity.\nTo start, let's load up every rating in the dataset into a Pandas DataFrame:",
"import pandas as pd\n\nratings = pd.read_csv('./ml-latest-small/ratings.csv')\nratings.sample(n=10)",
"Now we'll group everything by movieId and compute the mean rating for the movie.",
"import numpy as np\n\nmean_ratings = ratings[['movieId', 'rating']].groupby('movieId').agg({'rating': ['sum', 'mean']})\nmean_ratings.columns = ['rating_count', 'mean_rating']\nmean_ratings.sample(n=10)",
"There is likely a fair amount of variance in the sum of ratings, so we'll normalize that column.",
"mean_ratings['rating_count'] = (\n (mean_ratings['rating_count'] - mean_ratings['rating_count'].min()) / \n (mean_ratings['rating_count'].max() - mean_ratings['rating_count'].min()))\n\nmean_ratings['rating_count'].describe()",
"Now let's get the genre information from the movies.csv file. In the genres column, we see the list of genres for each movie separated by a '|'. Note that a movie may have more than one genre. \nFirst we read the file into a DataFrame.",
"movies = pd.read_csv('./ml-latest-small/movies.csv')\nmovies.sample(n=10)\n\nmovies.describe()",
"Now we split the genres column on the '|' and create a new DataFrame called movies_split.",
"movies_split = movies.genres.str.split('|', expand=True)\nmovies_split.head()",
"We now create a list of all the unique genres that appear in this DataFrame and remove values that indicate that a genre wasn't specified.",
"genres = list(pd.unique(movies_split.values.ravel()))\ngenres.remove(None)\ngenres.remove('(no genres listed)')\ngenres = sorted(genres)\ngenres",
"In the movies DataFrame, we want to recode the values of the genres column to be a list of 20 0s and 1s that correspond to the values in list (in the order that they appear in list). For example, if a movie has genres 'Adventure and Children', then we would like the element in the genres column to be: \\\n[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]",
"genre_to_id = {v:i for i, v in enumerate(genres)}\ngenre_to_id",
"The function defined below iterates through a list of genres and compares the values to the elements of genres_list. It then returns an appropriate array of 0s and 1s as described above.",
"# Create the array of 0s and 1s based on genres.\ndef encode_genres(l):\n encoding = np.zeros(len(genres)).astype(int)\n for genre in l:\n if genre in genre_to_id:\n encoding[genre_to_id[genre]] = 1\n return encoding\n\n# Test that f works as expected on an example list.\nencode_genres(['Adventure', 'Children'])",
"We split the genres column of the movies DataFrame to be a list. We do this in preparation for applying the function, encode_genres.",
"movies['genres'] = movies.genres.str.split('|')\nmovies.sample(n=10)",
"We apply the function encode_genres to the genres column to change the elements to arrays of 0s and 1s representing the genres. We also set the index to be the movieId.",
"movies['genres'] = movies.genres.apply(encode_genres)\nmovies = movies.set_index('movieId')\nmovies.sample(n=10)",
"Now we can add the mean rating and the count of ratings to the movies. Let's first make sure that every index is accounted for.",
"np.setdiff1d(movies.index.to_numpy(), mean_ratings.index.to_numpy())\n\nnp.setdiff1d(mean_ratings.index.to_numpy(), movies.index.to_numpy())",
"It looks like we are missing some IDs from the ratings, so we need to be sure to do an inner join. We don't want to include movies with no ratings or ratings with no movies.",
"movies = movies.join(mean_ratings, how='inner')\nmovies.sample(n=10)",
"Now let's define a function that computes the \"distance\" between two movies based on how similar their genres are and how similar their popularity is. To make sure it works, we'll compute the distance between movie IDs 2 and 2728:",
"from scipy import spatial\n\ndef distance(a, b):\n genre_distance = spatial.distance.euclidean(a['genres'], b['genres'])\n popularity_distance = abs(a['rating_count'] - b['rating_count'])\n return genre_distance + popularity_distance\n \ndistance(movies[movies.index == 2].iloc[0], movies[movies.index == 2728].iloc[0])",
"Remember, the higher the distance, the less similar the movies are. Let's check what movies 2 and 2728 actually are, and then let's confirm they're not all that similar:",
"print(movies[movies.index == 2].iloc[0])\nprint(movies[movies.index == 2728].iloc[0])",
"Now we just need a little code to compute the distance between some given test movie (Toy Story, in this example) and all of the movies in our dataset.\nWe'll find the 10 nearest neighbors utilizing a heapq to keep our memory usage low. Note that heapq pops the smallest values first, so we need to take the negative of the distance in order to remove the largest neighbors first.",
"import heapq\n\ndef k_nearest_neighbors(movie_id, K):\n distances = []\n central_movie = movies[movies.index == movie_id].iloc[0]\n for mid, movie in movies.iterrows():\n if (mid != movie_id):\n dist = distance(central_movie, movie)\n if len(distances) < K:\n heapq.heappush(distances, (-dist, mid))\n else:\n _ = heapq.heappushpop(distances, (-dist, mid))\n return [x[1] for x in distances]\n\navg_rating = 0.0\nfor id in k_nearest_neighbors(1, 10):\n neighbor = movies[movies.index == id].iloc[0]\n print(neighbor['title'], neighbor['mean_rating'])\n avg_rating += neighbor['mean_rating']\n\nprint(\"\\nPredicted Rating: \", avg_rating/10)",
"How does this compare to Toy Story's actual average rating?",
"movies[movies.index == 1].iloc[0]['mean_rating']",
"Exercise: KNeighborsRegressor\nEarlier in the lab, we built a KNN regressor from scratch. Scikit-learn offers the KNeighborsRegressor, which can perform the regression for us.\nIn this exercise we'll again use the MovieLens dataset to predict rating. Instead of writing your own regressor, use the KNeighborsRegressor. You'll need to load the data, prepare it for the regressor, and then build and train your model.\nInstead of building one model, build one hundred. Try using a neighbor count from 1 to 101. Train your model using a new neighbor count each time. Keep some holdout data for testing, and calculate the root mean squared error for each neighbor count on the holdout data. Plot the RMSE data vs. the neighbor count to try to determine the optimal number of neighbors to consider for this dataset.\nExplain your work. Use as many code and text blocks as you need.\nStudent Solution",
"# Your code goes here.",
""
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
epequeno/apl-jun2015
|
data types.ipynb
|
unlicense
|
[
"Data types\nToday we'll be taking a look at the standard data types that come with python and some examples of how we can use these data types to represent real world information. This list isn't comprehensive, there are many more types available but this is the majority of what you'll see day to day.\nWhy?\nData types are the building blocks of applications. They are the basic elements we can combine to form more complex structures.\nIntegers\nIntegers are whole numbers. They can be either positive or negative. Not much to say here, they do what you'd expect!",
"1\n\n-5\n\nprint 2 + 10 # addition\nprint 5 - 3 # subtraction\nprint 6 * 4 # multiplication\nprint 10 / 5 # division\nprint 2**4 # exponents",
"There is something I should mention here. python 2 can trip people up when trying to do something like the following:",
"2 / 3",
"This is because python defaults to returning an integer (rounded down) when you ask to divide two integers. Read more about this here: http://python-history.blogspot.com/2009/03/problem-with-integer-division.html\nTo get the answer you'd expect you'll need at least one of the numbers to be a float:",
"2 / 3.0",
"Floating point numbers\nFloating point numbers can be a bit tricky. Let's take a look at some examples:",
"1.5\n\ntype(1.5)",
"So you might think that floats are simply numbers that have decimal parts but...",
"0.1 + 0.2",
"The python docs discuss this behavior: (https://docs.python.org/2/tutorial/floatingpoint.html#representation-error):\n\nNote that this is in the very nature of binary floating-point: this is not a bug in Python, and it is not a bug in your code either. You’ll see the same kind of thing in all languages that support your hardware’s floating-point arithmetic (although some languages may not display the difference by default, or in all output modes).\n\nFor more info:\n\nhttp://en.wikipedia.org/wiki/IEEE_floating_point\nhttp://cr.yp.to/2005-590/goldberg.pdf\n\nIn short: if you are doing work which requires numerical precision you'll want to use the decimal library. Note that we have to pass the Decimal class a string as an argument.",
"from decimal import Decimal\nDecimal('0.1') + Decimal('0.2')",
"Strings\nNow we get to the fun stuff. I say we're getting into the fun stuff because there aren't a lot of methods for numerical types but the rest of the types we'll discuss have plenty of methods available to them which can be very useful.\nStrings are text, generally. A string is any collection of symbols surrounded by quotes:",
"'Hello python learners'",
"Strings can use single, double, and triple quotes:",
"print 'Hello'\nprint \"there\"\nprint '''python'''\nprint \"\"\"learners!\"\"\"",
"It's useful that we can use all types of quotes as it allows us to have strings with quote's inside them.",
"print \"This string contains single quotes but that's ok since it's surrounded by double quotes\"\nprint 'This string is surrounded by single quotes. The cow says: \"mooo\"'\nprint '''This string want's to mix both \"types\" of quotes and that's ok since we surrounded it with triple quotes! '''\n\n\"We can \" + \"concatenate strings \" + \"together using the + operator\"\n\nfirst = \"Sometimes it's better \"\nmiddle = \"to assign parts of a long string \"\nlast = \"to variables then concatenate the strings by the variable names\"\nsentence = first + middle + last\nprint sentence\n\n(\"And sometimes \"\n\"we can split a string \"\n\"on seperate lines and they will be \"\n\"put together since they are surrounded by parentheses!\")",
"One of the most common built-in functions you'll use is len(), as you might imagine it returns the length of the argument you pass it:",
"name = \"Eve\"\nlen(name)",
"These methods on strings allow us to modify and ask questions about a string.",
"for i in dir('Hello'):\n if not i.startswith('_'):\n print i",
"Here are some examples of what we can do with these methods:",
"word = \"hello\"\nprint \"capitalize:\", word.capitalize() # capitalize the first letter of the string\nprint \"count:\", word.count('l') # count how many times the string we pass as an argument appear in 'word'\nprint \"endswith:\", word.endswith('o') # T/F if it ends with the string we pass as an argument\nprint \"index:\", word.index('o') # Returns index of the string we pass as an argument (remember indexes start at 0)\nprint \"isalpha:\", word.isalpha() # methods that start with 'is' give us a clue that the method returns True or False\nprint \"upper:\", word.upper() # changes all letters of the string to uppercase\nword_two = \"HeLlO\"\nprint \"swapcase:\", word_two.swapcase() # for every letter in the string, swap between upper and lower case\nname = \"guido van rossum\"\nprint \"title:\", name.title() # Assumes the string is a name and will change the first letter of each word to uppercase\nsentence = \"The quick brown fox\"\nprint \"split:\", sentence.split() # Splits the string into individual words grouped into a list.",
"Lists\nSo far we've talked about data types that exist as singular objects. Now we can move on to data types that act as collections of items. The first we'll discuss is lists.\nA list is an ordered series of things. A list can contain objects of any type, including other lists! We use square brackets [] around a comma seperated series of objects to define a list.",
"a = [1, 2, 3]\nprint a\nprint len(a)\n\nb = [1, 'one', 1.0]\nprint b\nprint len(b)\n\nc = [[1, 2, 3], ['one', 'two', 'three'], [1.0, 2.0, 3.0]]\nprint c\nprint len(c)",
"Just like strings there are methods available to us to work with lists",
"for i in dir([]):\n if not i.startswith('_'):\n print i",
"Let's take a look at how these work. We'll start off with a list of two names, alice and bob. From there we'll use each of the methods to modify the 'names' list.",
"names = ['alice', 'bob']\nnames",
"append() will add the argument to the end of the list",
"names.append('eve') \nnames",
"We'll append again to show off the next method",
"names.append('bob') \nnames",
"count() tells us how many times the argument occurs in the list",
"print \"The word 'bob' is seen:\", names.count('bob') ",
"append() only adds a single item at a time, if we want to extend our original list by several items we can use the extend() method and pass in a list of things to add to the end.",
"names.extend(['bill', 'sally']) \nnames",
"We can find the position of an item using index(), remember lists start counting at 0",
"print \"'sally' is at index:\", names.index('sally') ",
"We can use insert() to put an item at a specific position in the list",
"names.insert(2, 'mike')\nnames",
"pop() can be used for a couple of things, if we simply need to remove the last item from the list we can call it by itself",
"names.pop()\nnames",
"But, we can also keep that last item in another variable:",
"last_person = names.pop()\nprint names\nprint last_person",
"remove() will remove the 1st occurance of the argument we give it. Notice that alice and mike are now next to each other and the last bob is still in the list",
"names.remove('bob')\nnames",
"reverse() does pretty much what you'd expect it to",
"names.reverse()\nnames",
"As does sort()",
"names.sort()\nnames",
"Interlude: Index notation\nBefore we move on to our discussion of tuples I'd like to discuss a common way to select items from objects. If we know the index of an item we can select it like this:",
"print names\n\nprint names[0]",
"But this will work for other types as well",
"'alice'[3]",
"Tuples\nTuples are a bit like lists but have some very important differences. First let's take a look at how they are similar:\n\nordered\nseries of things seperated by commas\ncan be of any length\ncan be a mix of any type of things\n\nIt'll be easier to show thier differences through example. First, let's look at a typical tuple:",
"a = (1, 2, 3)\nprint a\nprint type(a)",
"We normally use parentheses to define a tuple but really any object followed by a comma becomes a tuple. Either way, python will add the parentheses for us anyway:",
"a = \"example\",\nprint type(a)\nprint a",
"Probably the most important difference between a list and a tuple has to do with 'immutability.' Let's take a look at an example:",
"names = ['alice', 'bob']\npeople = ('alice', 'bob')\nprint names\nprint people",
"So far, not much difference. But lets say that we wanted to get rid of bob and replace him with eve.",
"names[1] = 'eve'\nnames\n\npeople[1] = 'eve'",
"Uh-oh python has told us that the tuple does not allow us to 'mute' an item in the tuple the way we can with a list. In other words lists are mutable, tuples are immutable.\nLet's see what methods we have available to us for tuples:",
"for i in dir(()):\n if not i.startswith('_'):\n print i",
"As a result of the immutability of tuples we don't have many built in methods.\nDictionaries\nSo the data types we've seen so far are great for collections of things but there are times where we have pieces of information that are related in some way and we'd like to keep track of those relationships.\nLet's start off with an example:",
"eng_to_spn = {'one': 'uno', \n 'two': 'dos', \n 'three': 'tres'}\neng_to_spn",
"Here we have a relationship between pairs of strings, each pair is seperated by a ':' The object to the left of the ':' is called the key and that thing we will use to select a relationship from the dictionary. The object to the right of the ':' is the value. \nSo we have a relationship that can be described as english numbers : spanish numbers\nThe whole collection of these pairs is the dictionary. We represent dictionaries in python with curly brackets {}\nLet's try picking out some data from the dictionary:",
"eng_to_spn['one']",
"Good, when I use a key to select from the dictionary I get the value associated with that key as a response. Let's try another way:",
"eng_to_spn[0]",
"An important thing to note about dictionaries is that while they are similar to lists and tuples they are unordered.\nThis is where the similarity to a real-world dictionary breaks down. We call this data type a dictionary because it keeps track of the relationship between one thing (a word) and some other thing (it's definition). In the real world dictionaries are ordered alphabetically but in python the dictionary data type has no order.\nAnd because of that we can't select the first item using the 0 index like we could with a list or tuple.\nAs a matter of fact reading from a dict will produce an arbitrary order:",
"for key in eng_to_spn:\n print key",
"Dictionaries are mutable like a list so we can change the relationship of a pair like this:",
"eng_to_spn['one'] = 1\neng_to_spn['two'] = 2\neng_to_spn['three'] = 3\neng_to_spn",
"We can also add pairs:",
"eng_to_spn['four'] = 4\neng_to_spn",
"But for now let's keep the dictionary as a set of english words mapped to thier spanish translations:",
"eng_to_spn = {'one': 'uno', \n 'two': 'dos', \n 'three': 'tres'}\neng_to_spn",
"Let's take a look at the methods available to us for dictionaries:",
"for i in dir({}):\n if not i.startswith('_'):\n print i",
"copy() will return a \"shallow copy\" of the dictionary. I won't get into detail here but if you'd like more information see: http://stackoverflow.com/a/3975388",
"eng_to_spn2 = eng_to_spn.copy()\neng_to_spn2",
"fromkeys() will take the keys from one dict and make a new dict with the same keys but with the keys that we specify",
"eng_to_spn3 = eng_to_spn.fromkeys(eng_to_spn, 'english')\neng_to_spn3",
"get() will pull the value from a dictionary:",
"eng_to_spn.get('one')",
"What's useful about the get() method is that we can specify a default value in the case that what we are asking for doesn't exist yet in the dictionary. This can avoid errors:",
"eng_to_spn['four']\n\nprint eng_to_spn.get('four', None)",
"We can also ask if a key exists using has_key():",
"eng_to_spn.has_key('four')",
"Although in this example we could have gotten the same result by doing the following:",
"'four' in eng_to_spn",
"Even though there are methods available to us (the ones we can see with dir()) there may be built-in tools of the language that may be a better choice. \nWe can get the pairs as a list of tuples using the items() method:",
"eng_to_spn.items()",
"iteritems() gives an item that we can call .next() on. This is valuable in the case that don't want to load the entire dictionary into memory but still want to iterate through the items.",
"items = eng_to_spn.iteritems()\nprint items.next()\nprint items.next()",
"We can do the same with the keys using iterkeys():",
"keys = eng_to_spn.iterkeys()\nprint keys.next()\nprint keys.next()",
"We can remove a key and return the value using pop()",
"three = eng_to_spn.pop('three')\nprint three\neng_to_spn",
"popitem() will remove a associaton and return it as a tuple but you don't get to pick which item you'd like to pop out!",
"anything = eng_to_spn.popitem()\nprint anything\neng_to_spn",
"setdefault() works a bit like get() but will set the value for us if it doesn't exist in the dictionary:",
"eng_to_spn.setdefault('four', 'quatro')\neng_to_spn",
"update() allows us to add values from another dictionary:",
"new_numbers = {'five': 'cinco', 'six': 'seis'}\neng_to_spn.update(new_numbers)\neng_to_spn",
"We can see all the values from a dictionary using values()",
"eng_to_spn.values()",
"These next methods, viewitems(), viewkeys() and viewvalues() each return a dictionary view object. The python docs discuss thier purpose: https://docs.python.org/2/library/stdtypes.html#dictionary-view-objects\n\nThe objects returned by dict.viewkeys(), dict.viewvalues() and dict.viewitems() are view objects. They provide a dynamic view on the dictionary’s entries, which means that when the dictionary changes, the view reflects these changes.\nDictionary views can be iterated over to yield their respective data, and support membership tests:",
"eng_to_spn.viewitems()\n\neng_to_spn.viewkeys()\n\neng_to_spn.viewvalues()",
"We skipped over clear() but here's a good time to see what it does, clear the dictionary out!",
"eng_to_spn.clear()\neng_to_spn"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ONCdb/ONCdb
|
notebooks/generate_ONCdb.ipynb
|
mit
|
[
"%matplotlib inline\nimport ONCdb\nimport os\nfrom ONCdb import make_onc as mo\nfrom astrodbkit import astrodb, astrocat\nDIR_PATH = os.path.dirname(os.path.realpath(ONCdb.__file__))",
"Make the ONCdb\nHere are step-by-step instructions on how to generate the ONCdb from VizieR catalogs.\nStep 1: Initialize the database and ingest the raw data",
"# Initialize a database\nonc = astrocat.Catalog()\n\n# Ingest a VizieR catalog by supplying a path, catalog name, and column name of a unique identifier \nonc.ingest_data(DIR_PATH+'/raw_data/viz_acs.tsv', 'ACS', 'ONCacs', count=10)\n\n# The raw dataset is stored as an attribute\nprint(onc.ACS)\n\n# Add another one! (This is a test file with a fake match of the ACS catalog)\nonc.ingest_data(DIR_PATH+'/raw_data/viz_wfpc2.tsv', 'WFPC2', 'ONCpc2', count=10)\nprint(onc.WFPC2)",
"Step 2: Cross-match the sources",
"# Now let's group the sources by some critical distance in arcseconds \n# and assign IDs for our new custom database sources\nonc.group_sources()\n\n# Summary of what we've done\nonc.info\n\n# Take a look again\nonc.catalog\n\n# # Now let's correct the WFPC2 sources for some systematic offset\n# onc.correct_offsets('WFPC2', truth='ACS')\n\n# # And now the corrected data\n# print('Corrected and original WFPC2 sources:')\n# print(onc.catalog[onc.catalog['cat_name']=='WFPC2'][['oncID','ra_corr','dec_corr','_RAJ2000','_DEJ2000']])\n\nonc.info",
"Step 3: Generate the SQL database",
"# Generate the ONCdb\nmo.generate_ONCdb(onc)\n\n# Check that it worked\ndb = astrodb.Database(DIR_PATH+'/orion.sql')\n\ndb.query(\"SELECT * FROM browse\", fmt='table')",
"Hooray!"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
stijnvanhoey/hydropy
|
hydropy_tutorial.ipynb
|
bsd-2-clause
|
[
"%matplotlib inline\n\nfrom __future__ import print_function\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n# import seaborn as sns\n# sns.set_style('whitegrid')\n\nfrom IPython.display import HTML\n\nflowdata = pd.read_pickle(\"./data/FlowData\")\nraindata = pd.read_pickle(\"./data/RainData\")",
"Hydropy-package\nGo to the hydropy website for more information on the package and the code on Github.",
"#Loading the hydropy package\nimport hydropy as hp",
"We have a Dataframe with river discharge at different locations in the Maarkebeek basin (Belgium):",
"HTML('<iframe src=http://biomath.ugent.be/~stvhoey/maarkebeek_data/ width=700 height=350></iframe>')",
"Data downloaded from http://www.waterinfo.be/, made available by the Flemish Environmental Agency (VMM).",
"flowdata.head()\n\nflowdata.tail()\n\nprint(len(flowdata), 'records', 'from', flowdata.index[0], 'till', flowdata.index[-1])",
"Converting the dataframe to a hydropy time series datatype, provides extra functionalities:",
"myflowserie = hp.HydroAnalysis(flowdata)",
"Select the summer of 2009:",
"myflowserie.get_year('2009').get_season('summer').plot(figsize=(12,6))",
"Select only the recession periods of the discharges (catchment drying) in June 2011:",
"myflowserie.get_year('2011').get_month(\"Jun\").get_recess().plot(figsize=(12,6))",
"Peak values above the 90th percentile for the station LS06_347 in July 2010:",
"fig, ax = plt.subplots(figsize=(13, 6))\nmyflowserie['LS06_347'].get_year('2010').get_month(\"Jul\").get_highpeaks(150, above_percentile=0.9).plot(style='o', ax=ax)\nmyflowserie['LS06_347'].get_year('2010').get_month(\"Jul\").plot(ax=ax)",
"Select storms and make plot",
"raindata.columns\n\nstorms = myflowserie.derive_storms(raindata['P05_019'], 'LS06_347',\n number_of_storms=3, drywindow=50,\n makeplot=True)\n\nstorms = myflowserie.derive_storms(raindata['P06_014'], 'LS06_347',\n number_of_storms=3, drywindow=96,\n makeplot=True)",
"Season averages (Pandas!)",
"myflowserie.data.groupby('season').mean()",
"Goals\n\nUse the power of Python Pandas\nProvide domain specific (hydrological) functionalities\nProvide intuitive interface for hydrological time series (main focus on flow series)\nCombine different earlier written loose functionalities in package\nIndependent, but useful in global Phd-developed framework: enables the user to quickly look at different properties of model behaviour\n\nWhere?\n\nCode : https://github.com/stijnvanhoey/hydropy --> Fork and contribute\nWebsite : https://stijnvanhoey.github.io/hydropy/\n\nHow to start?\n\nFork the github repo\n\nGet the code on your computer\ngit clone https://github.com/yourname/hydropy\n\n\n\nRun the python setup script (install as development package):\npython setup.py develop\n4. Improve implementation, add functionalities,...\n\n\nMake a new branch\n\nMake improvements on this branch\npush the branch towards the repo and perform a pull request\n\n\n\nFunctionalities extended",
"import hydropy as hp\nflowdata = pd.read_pickle(\"./data/FlowData\")\nraindata = pd.read_pickle(\"./data/RainData\")\nmyflowserie = hp.HydroAnalysis(flowdata)",
"Forwarding Pandas functionalities",
"# Data inspection\nmyflowserie.summary() #head(), tail(),\n\n# Resampling frequencies\ntemp1 = myflowserie.frequency_resample('7D', 'mean') # 7 day means\ntemp1.head()\n\ntemp2 = myflowserie.frequency_resample(\"M\", \"max\") # Monthly maxima\ntemp2.head()\n\ntemp3 = myflowserie.frequency_resample(\"A\", 'sum') # Yearly sums\ntemp3.head(6)\n\n#slicing of the dataframes\nmyflowserie['L06_347']['2009'].plot()",
"Easy period selection",
"# get_month, get_year, get_season, get_date_range\nmyflowserie.get_date_range(\"01/01/2010\",\"03/05/2010\").plot(figsize=(13, 6))\n\n# or combine different statements:\nmyflowserie.get_year('2010').get_month(6).plot(figsize=(13, 6))",
"For the seasons some options are available: Meteorologic (first of the month) or astrologic (21st of the month)",
"myflowserie.current_season_dates()\n\nmyflowserie.info_season_dates('north', 'astro')",
"'Hydrological' selections",
"# Peaks (high or low)\nmyflowserie['LS06_348'].get_year('2012').get_highpeaks(60, above_percentile=0.8).data.dropna().head()\n\n# Recessions and climbing periods get_recess, get_climbing\nmyflowserie.get_year(\"2012\").get_month(\"april\").get_climbing().plot(figsize=(13, 6))\n\n# above/below certain percentile values\nmyflowserie[\"LS06_347\"].get_above_percentile(0.6).get_year('2011').get_season('summer').plot()",
"Furthermore:\n\nget_storms_per_year (in combination with rain-data)\nget_above_baseflow (in combination with baseflow-data) => next on the list\n..."
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
maxentile/msm-learn
|
notebooks/CV issue.ipynb
|
mit
|
[
"Problem: Given a collection of trajectories from a stochastic process, and some alternative discretizations, we would like to perform model selection using cross-validated GMRQ. What happens if our train / test trajectories are in separate metastable regions?",
"# construct and simulate toy example: diffusive dynamics in a double-well potential\n\nimport numpy as np\nimport numpy.random as npr\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\noffset = np.array([3,0])\n\ndef q(x):\n ''' unnormalized probability '''\n return np.exp(-np.sum((x-offset)**2)) + np.exp(-np.sum((x+offset)**2))\n\ndef simulate_diffusion(x_0,q,step_size=0.01,max_steps=10000):\n ''' starting from x_0, simulate RW-MH '''\n traj = np.zeros((max_steps+1,len(x_0)))\n traj[0] = x_0\n old_q = q(x_0)\n \n for i in range(max_steps):\n \n prop = traj[i]+npr.randn(len(x_0))*step_size\n new_q = q(prop)\n\n if new_q/old_q>npr.rand():\n traj[i+1] = prop\n old_q = new_q\n else:\n traj[i+1] = traj[i]\n \n return traj\n\n# collect some trajectories\n\nnpr.seed(0) # for repeatability\n\ntrajs = []\nrun_ids = []\nfor i,offset_ in enumerate([-offset,offset]): # analogous to 3 RUNs on Folding@Home\n for _ in range(10): # for each RUN, collect 10 clones\n trajs.append(simulate_diffusion(np.zeros(2)+offset_,q,max_steps=10000,step_size=0.1))\n run_ids.append(i)\n\nlen(trajs)\n\n# plot trajectories\n\nr = 6\n\ndef plot_trajectories(trajs,alpha=1.0):\n from matplotlib.pyplot import cm\n cmap = cm.get_cmap('Spectral')\n N = len(trajs)\n for i,traj in enumerate(trajs):\n c = cmap(float(i)/(N-1))\n plt.plot(traj[:,0],traj[:,1],color=c,alpha=alpha)\n \n plt.xlabel(r'$x$')\n plt.ylabel(r'$y$')\n plt.title('Trajectories')\n\n plt.xlim(-r,r)\n plt.ylim(-r,r)\nplot_trajectories(trajs)",
"Two candidate discretizations",
"n_bins=50",
"Discretization_fast: finely resolving a fast DOF",
"offsets = np.linspace(-r,r,n_bins)\n\nplot_trajectories(trajs,alpha=0.3)\n\nfor offset in offsets:\n plt.hlines(offset,-r,r,colors='grey')\n \n\nplt.xlim(-r,r)\nplt.ylim(-r,r)\n\nplt.xlabel(r'$x$')\nplt.ylabel(r'$y$')\nplt.title('Trajectories + discretization_fast')",
"Discretization_slow: finely resolving a slow DOF",
"offsets = np.linspace(-r,r,n_bins)\n\nplot_trajectories(trajs,alpha=0.3)\n\nfor offset in offsets:\n plt.vlines(offset,-r,r,colors='grey')\n \n\nplt.xlim(-r,r)\nplt.ylim(-r,r)\n\nplt.xlabel(r'$x$')\nplt.ylabel(r'$y$')\nplt.title('Trajectories + discretization_slow')",
"Extract discrete trajectories",
"def axis_aligned_discretization(trajs,offsets,dim=0):\n dtrajs = []\n \n for traj in trajs:\n ax = traj[:,dim]\n \n bins = np.zeros((len(offsets)+1))\n bins[0] = -np.inf\n bins[1:] = offsets\n dtraj = np.digitize(ax,bins)\n dtrajs.append(dtraj)\n \n return dtrajs\n\ndtrajs_fast = axis_aligned_discretization(trajs,offsets,dim=1)\ndtrajs_slow = axis_aligned_discretization(trajs,offsets,dim=0)\n\nfrom msmbuilder.msm import MarkovStateModel\n\nm = 6 # how to choose m beforehand?\n\nmsm = MarkovStateModel(n_timescales=m)\nmsm.fit(dtrajs_fast)\nmsm.score_\n\nmsm = MarkovStateModel(n_timescales=m)\nmsm.fit(dtrajs_slow)\nmsm.score_",
"Cross-validation",
"def two_fold_cv(dtrajs,msm):\n train_scores = []\n test_scores = []\n \n split = len(dtrajs)/2\n \n A = dtrajs[:split]\n B = dtrajs[split:]\n \n msm.fit(A)\n train_scores.append(msm.score_)\n try:\n test_scores.append(msm.score(B))\n except:\n test_scores.append(np.nan)\n \n msm.fit(B)\n train_scores.append(msm.score_)\n try:\n test_scores.append(msm.score(A))\n except:\n test_scores.append(np.nan)\n \n return train_scores,test_scores\n\nlen(dtrajs_fast),len(dtrajs_slow)\n\ntrain_scores_fast, test_scores_fast = two_fold_cv(dtrajs_fast,msm)\ntrain_scores_slow, test_scores_slow = two_fold_cv(dtrajs_slow,msm)\n\ntrain_scores_fast, test_scores_fast\n\ntrain_scores_slow, test_scores_slow\n\nnp.mean(train_scores_fast), np.mean(test_scores_fast)\n\nnp.mean(train_scores_slow), np.mean(test_scores_slow)\n\ndef leave_one_out_gmrq(dtrajs,msm):\n \n train_scores = []\n test_scores = []\n \n for i,test in enumerate(dtrajs):\n train = dtrajs[:i]+dtrajs[i+1:]\n msm.fit(train)\n train_scores.append(msm.score_)\n try:\n test_scores.append(msm.score(test))\n except:\n test_scores.append(np.nan)\n \n return train_scores,test_scores\n\ntrain_scores_fast, test_scores_fast = leave_one_out_gmrq(dtrajs_fast,msm)\ntrain_scores_slow, test_scores_slow = leave_one_out_gmrq(dtrajs_slow,msm)\n\nnp.mean(train_scores_fast), np.mean(test_scores_fast)\n\nnp.mean(train_scores_slow), np.mean(test_scores_slow)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
arcyfelix/Courses
|
18-05-28-Complete-Guide-to-Tensorflow-for-Deep-Learning-with-Python/02-Tensorflow-Basics/06-Regression-Exercise.ipynb
|
apache-2.0
|
[
"Regression Exercise\nCalifornia Housing Data\nThis data set contains information about all the block groups in California from the 1990 Census. In this sample a block group on average includes 1425.5 individuals living in a geographically compact area. \nThe task is to aproximate the median house value of each block from the values of the rest of the variables. \nIt has been obtained from the LIACC repository. The original page where the data set can be found is: http://www.liaad.up.pt/~ltorgo/Regression/DataSets.html.\nThe Features:\n\nhousingMedianAge: continuous. \ntotalRooms: continuous. \ntotalBedrooms: continuous. \npopulation: continuous. \nhouseholds: continuous. \nmedianIncome: continuous. \nmedianHouseValue: continuous. \n\nThe Data\n Import the cal_housing_clean.csv file with pandas. Separate it into a training (70%) and testing set(30%).",
"import pandas as pd\n\ndf = pd.read_csv('./data/cal_housing_clean.csv')\n\ndf.head()\n\ndf.describe().T\n\ny = df['medianHouseValue']\nx = df.drop('medianHouseValue', axis = 1)\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, Y_train, Y_test = train_test_split(x, y, \n test_size = 0.3, \n random_state = 7) \n\nX_train.head()\n\nY_train.head()",
"Scale the Feature Data\n Use sklearn preprocessing to create a MinMaxScaler for the feature data. Fit this scaler only to the training data. Then use it to transform X_test and X_train. Then use the scaled X_test and X_train along with pd.Dataframe to re-create two dataframes of scaled data.",
"from sklearn.preprocessing import MinMaxScaler\n\nscaler = MinMaxScaler()\n\nscaler.fit(X_train)\n\n# Keeping Pandas DataFrame format after re-scaling\nX_train = pd.DataFrame(data = scaler.transform(X_train), \n columns = X_train.columns,\n index = X_train.index)\n\nX_test = pd.DataFrame(data = scaler.transform(X_test), \n columns = X_test.columns,\n index = X_test.index)\n\nX_train.head()",
"Create Feature Columns\n Create the necessary tf.feature_column objects for the estimator. They should all be trated as continuous numeric_columns.",
"df.columns\n\nimport tensorflow as tf\n\nage = tf.feature_column.numeric_column('housingMedianAge')\nrooms = tf.feature_column.numeric_column('totalRooms')\nbedrooms = tf.feature_column.numeric_column('totalBedrooms')\npop = tf.feature_column.numeric_column('population')\nhouseholds = tf.feature_column.numeric_column('households')\nincome = tf.feature_column.numeric_column('medianIncome')\n\nfeature_columns = [age, rooms, bedrooms, pop, households, income]",
"Create the input function for the estimator object. (play around with batch_size and num_epochs)",
"input_feature_func = tf.estimator.inputs.pandas_input_fn(x = X_train,\n y = Y_train, \n batch_size = 10,\n num_epochs = 1000,\n shuffle = True)",
"Create the estimator model. Use a DNNRegressor. Play around with the hidden units!",
"dnn_model = tf.estimator.DNNRegressor(hidden_units = [6, 5, 5], feature_columns = feature_columns)",
"Train the model for ~1,000 steps. (Later come back to this and train it for more and check for improvement)",
"dnn_model.train(input_fn = input_feature_func, steps = 5000)",
"Create a prediction input function and then use the .predict method off your estimator model to create a list or predictions on your test data.",
"prediction_input_func = tf.estimator.inputs.pandas_input_fn(x = X_test,\n batch_size = 10,\n num_epochs = 1,\n shuffle = False)\n\nprediction_generator = dnn_model.predict(prediction_input_func)\n\nprecitions = list(prediction_generator)",
"Calculate the RMSE. You should be able to get around 100,000 RMSE (remember that this is in the same units as the label.) Do this manually or use sklearn.metrics",
"final_pred = []\n\nfor pred in precitions:\n final_pred.append(pred['predictions'])\n\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error\n\n# RMSE = sqrt(MSE) = MSE ** 0.5\nmean_squared_error(Y_test, final_pred) ** 0.5",
"Great Job!"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
VVard0g/ThreatHunter-Playbook
|
docs/notebooks/windows/02_execution/WIN-190410151110.ipynb
|
mit
|
[
"Basic PowerShell Execution\nMetadata\n| Metadata | Value |\n|:------------------|:---|\n| collaborators | ['@Cyb3rWard0g', '@Cyb3rPandaH'] |\n| creation date | 2019/04/10 |\n| modification date | 2020/09/20 |\n| playbook related | [] |\nHypothesis\nAdversaries might be leveraging PowerShell to execute code within my environment\nTechnical Context\nNone\nOffensive Tradecraft\nAdversaries can use PowerShell to perform a number of actions, including discovery of information and execution of code.\nTherefore, it is important to understand the basic artifacts left when PowerShell is used in your environment.\nSecurity Datasets\n| Metadata | Value |\n|:----------|:----------|\n| docs | https://securitydatasets.com/notebooks/atomic/windows/execution/SDWIN-190518182022.html |\n| link | https://raw.githubusercontent.com/OTRF/Security-Datasets/master/datasets/atomic/windows/execution/host/empire_launcher_vbs.zip |\nAnalytics\nInitialize Analytics Engine",
"from openhunt.mordorutils import *\nspark = get_spark()",
"Download & Process Security Dataset",
"sd_file = \"https://raw.githubusercontent.com/OTRF/Security-Datasets/master/datasets/atomic/windows/execution/host/empire_launcher_vbs.zip\"\nregisterMordorSQLTable(spark, sd_file, \"sdTable\")",
"Analytic I\nWithin the classic PowerShell log, event ID 400 indicates when a new PowerShell host process has started. You can filter on powershell.exe as a host application if you want to or leave it without a filter to capture every single PowerShell host\n| Data source | Event Provider | Relationship | Event |\n|:------------|:---------------|--------------|-------|\n| Powershell | Windows PowerShell | Application host started | 400 |\n| Powershell | Microsoft-Windows-PowerShell/Operational | User started Application host | 4103 |",
"df = spark.sql(\n'''\nSELECT `@timestamp`, Hostname, Channel\nFROM sdTable\nWHERE (Channel = \"Microsoft-Windows-PowerShell/Operational\" OR Channel = \"Windows PowerShell\")\n AND (EventID = 400 OR EventID = 4103)\n'''\n)\ndf.show(10,False)",
"Analytic II\nLooking for non-interactive powershell session might be a sign of PowerShell being executed by another application in the background\n| Data source | Event Provider | Relationship | Event |\n|:------------|:---------------|--------------|-------|\n| Process | Microsoft-Windows-Security-Auditing | Process created Process | 4688 |",
"df = spark.sql(\n'''\nSELECT `@timestamp`, Hostname, NewProcessName, ParentProcessName\nFROM sdTable\nWHERE LOWER(Channel) = \"security\"\n AND EventID = 4688\n AND NewProcessName LIKE \"%powershell.exe\"\n AND NOT ParentProcessName LIKE \"%explorer.exe\"\n'''\n)\ndf.show(10,False)",
"Analytic III\nLooking for non-interactive powershell session might be a sign of PowerShell being executed by another application in the background\n| Data source | Event Provider | Relationship | Event |\n|:------------|:---------------|--------------|-------|\n| Process | Microsoft-Windows-Sysmon/Operational | Process created Process | 1 |",
"df = spark.sql(\n'''\nSELECT `@timestamp`, Hostname, Image, ParentImage\nFROM sdTable\nWHERE Channel = \"Microsoft-Windows-Sysmon/Operational\"\n AND EventID = 1\n AND Image LIKE \"%powershell.exe\"\n AND NOT ParentImage LIKE \"%explorer.exe\"\n'''\n)\ndf.show(10,False)",
"Analytic IV\nMonitor for processes loading PowerShell DLL system.management.automation\n| Data source | Event Provider | Relationship | Event |\n|:------------|:---------------|--------------|-------|\n| Module | Microsoft-Windows-Sysmon/Operational | Process loaded Dll | 7 |",
"df = spark.sql(\n'''\nSELECT `@timestamp`, Hostname, Image, ImageLoaded\nFROM sdTable\nWHERE Channel = \"Microsoft-Windows-Sysmon/Operational\"\n AND EventID = 7\n AND (lower(Description) = \"system.management.automation\" OR lower(ImageLoaded) LIKE \"%system.management.automation%\")\n'''\n)\ndf.show(10,False)",
"Analytic V\nMonitoring for PSHost* pipes is another interesting way to find PowerShell execution\n| Data source | Event Provider | Relationship | Event |\n|:------------|:---------------|--------------|-------|\n| Named Pipe | Microsoft-Windows-Sysmon/Operational | Process created Pipe | 17 |",
"df = spark.sql(\n'''\nSELECT `@timestamp`, Hostname, Image, PipeName\nFROM sdTable\nWHERE Channel = \"Microsoft-Windows-Sysmon/Operational\"\n AND EventID = 17\n AND lower(PipeName) LIKE \"\\\\\\\\pshost%\"\n'''\n)\ndf.show(10,False)",
"Analytic VI\nThe \"PowerShell Named Pipe IPC\" event will indicate the name of the PowerShell AppDomain that started. Sign of PowerShell execution\n| Data source | Event Provider | Relationship | Event |\n|:------------|:---------------|--------------|-------|\n| Powershell | Microsoft-Windows-PowerShell/Operational | Application domain started | 53504 |",
"df = spark.sql(\n'''\nSELECT `@timestamp`, Hostname, Message\nFROM sdTable\nWHERE Channel = \"Microsoft-Windows-PowerShell/Operational\"\n AND EventID = 53504\n'''\n)\ndf.show(10,False)",
"Known Bypasses\nFalse Positives\nNone\nHunter Notes\n\nExplore the data produced in your environment with the analytics above and document what normal looks like from a PowerShell perspective.\nIf execution of PowerShell happens all the time in your environment, I suggest to categorize the data you collect by business unit to build profiles and be able to filter out potential noise.\nYou can also stack the values of the command line arguments being used. You can hash the command line arguments too and stack the values.\n\nHunt Output\n| Type | Link |\n| :----| :----|\n| Sigma Rule | https://github.com/SigmaHQ/sigma/blob/master/rules/windows/pipe_created/sysmon_powershell_execution_pipe.yml |\n| Sigma Rule | https://github.com/SigmaHQ/sigma/blob/master/rules/windows/process_creation/win_non_interactive_powershell.yml |\nReferences\n\nhttps://github.com/darkoperator/Presentations/blob/master/PSConfEU%202019%20Tracking%20PowerShell%20Usage.pdf\nhttps://posts.specterops.io/abusing-powershell-desired-state-configuration-for-lateral-movement-ca42ddbe6f06"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ecell/ecell4-notebooks
|
en/tests/Birth_Death.ipynb
|
gpl-2.0
|
[
"Birth-Death\nThis is for an integrated test of E-Cell4. Here, we test a simple birth-death process in volume.",
"%matplotlib inline\nfrom ecell4.prelude import *",
"Parameters are given as follows. D and radius mean a diffusion constant and a radius of molecules, respectively. Dimensions of length and time are assumed to be micro-meter and second.",
"D = 1 # 0.01\nradius = 0.005\n\nN = 20 # a number of samples\n\ny0 = {} # {'A': 60}\nduration = 3\nV = 8",
"Make a model for all algorithms. No birth reaction with more than one product is accepted.",
"with species_attributes():\n A | {'radius': radius, 'D': D}\n\nwith reaction_rules():\n ~A > A | 45.0\n A > ~A | 1.5\n\nm = get_model()",
"Save a result with ode as obs, and plot it:",
"ret1 = run_simulation(duration, y0=y0, volume=V, model=m)\nret1",
"Simulating with gillespie (Bars represent standard error of the mean):",
"ret2 = ensemble_simulations(duration, ndiv=20, y0=y0, volume=V, model=m, solver='gillespie', repeat=N)\nret2.plot('o', ret1, '-')",
"Simulating with meso:",
"ret2 = ensemble_simulations(\n duration, ndiv=20, y0=y0, volume=V, model=m, solver=('meso', Integer3(3, 3, 3), 0.25), repeat=N)\nret2.plot('o', ret1, '-')",
"Simulating with spatiocyte:",
"ret2 = ensemble_simulations(\n duration, ndiv=20, y0=y0, volume=V, model=m, solver=('spatiocyte', radius), repeat=N)\nret2.plot('o', ret1, '-')",
"Simulating with egfrd:",
"ret2 = ensemble_simulations(\n duration, ndiv=20, y0=y0, volume=V, model=m, solver=('egfrd', Integer3(8, 8, 8)), repeat=N)\nret2.plot('o', ret1, '-')",
"Simulating with bd:",
"ret2 = ensemble_simulations(\n duration, ndiv=20, y0=y0, volume=V, model=m, solver=('bd', Integer3(8, 8, 8), 0.1), repeat=N)\nret2.plot('o', ret1, '-')"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
tleonhardt/machine_learning
|
SL6_Kernel_Methods_and_SVMs.ipynb
|
apache-2.0
|
[
"Support Vector Machines (SVMs)\nSupport vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection.\nAdvantages of SVMs\n\nEffective in high dimensional spaces\nStill effective in cases where number of dimensions is greater than the number of samples\nUses a subset of training points in the decision function (called support vectors), so it is also memory efficient\nVersatile: different Kernel functions can be specified for the decision function\n\nDisadvantages of SVMs\n\nIf the number of features is much greater than the number of samples, the method is likely to give poor performances.\nSVMs do not directly provide probability estimates, these are calculated using an expensive five-fold cross-validation\n\nDisclaimer: Much of the code in this notebook was borrowed from the excellent book Introduction to Machine Learning with Python by Andreas Muller and Sarah Guido.\nLinear SVMs\nLinear support vector machines (linear SVMs) are one of the most common linear classification algorithms. In scikit-learn, linear SVMs for classification are implemented in LinearSVC.",
"import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport numbers\nfrom sklearn.utils import check_array, check_random_state\nfrom sklearn.utils import shuffle as shuffle_\n\ndef make_blobs(n_samples=100, n_features=2, centers=2, cluster_std=1.0,\n center_box=(-10.0, 10.0), shuffle=True, random_state=None):\n \"\"\"Generate isotropic Gaussian blobs for clustering.\n\n Read more in the :ref:`User Guide <sample_generators>`.\n\n Parameters\n ----------\n n_samples : int, or tuple, optional (default=100)\n The total number of points equally divided among clusters.\n\n n_features : int, optional (default=2)\n The number of features for each sample.\n\n centers : int or array of shape [n_centers, n_features], optional\n (default=3)\n The number of centers to generate, or the fixed center locations.\n\n cluster_std: float or sequence of floats, optional (default=1.0)\n The standard deviation of the clusters.\n\n center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))\n The bounding box for each cluster center when centers are\n generated at random.\n\n shuffle : boolean, optional (default=True)\n Shuffle the samples.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n Returns\n -------\n X : array of shape [n_samples, n_features]\n The generated samples.\n\n y : array of shape [n_samples]\n The integer labels for cluster membership of each sample.\n\n Examples\n --------\n >>> from sklearn.datasets.samples_generator import make_blobs\n >>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,\n ... random_state=0)\n >>> print(X.shape)\n (10, 2)\n >>> y\n array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])\n\n See also\n --------\n make_classification: a more intricate variant\n \"\"\"\n generator = check_random_state(random_state)\n\n if isinstance(centers, numbers.Integral):\n centers = generator.uniform(center_box[0], center_box[1],\n size=(centers, n_features))\n else:\n centers = check_array(centers)\n n_features = centers.shape[1]\n\n if isinstance(cluster_std, numbers.Real):\n cluster_std = np.ones(len(centers)) * cluster_std\n\n X = []\n y = []\n\n n_centers = centers.shape[0]\n if isinstance(n_samples, numbers.Integral):\n n_samples_per_center = [int(n_samples // n_centers)] * n_centers\n for i in range(n_samples % n_centers):\n n_samples_per_center[i] += 1\n else:\n n_samples_per_center = n_samples\n\n for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):\n X.append(centers[i] + generator.normal(scale=std,\n size=(n, n_features)))\n y += [i] * n\n\n X = np.concatenate(X)\n y = np.array(y)\n\n if shuffle:\n X, y = shuffle_(X, y, random_state=generator)\n\n return X, y\n\ndef make_forge():\n # a carefully hand-designed dataset lol\n X, y = make_blobs(centers=2, random_state=4, n_samples=30)\n y[np.array([7, 27])] = 0\n mask = np.ones(len(X), dtype=np.bool)\n mask[np.array([0, 1, 5, 26])] = 0\n X, y = X[mask], y[mask]\n return X, y\n\nfrom matplotlib.colors import ListedColormap\ncm2 = ListedColormap(['#0000aa', '#ff2020'])\n\ndef plot_2d_separator(classifier, X, fill=False, ax=None, eps=None, alpha=1,\n cm=cm2, linewidth=None, threshold=None, linestyle=\"solid\"):\n # binary?\n if eps is None:\n eps = X.std() / 2.\n\n if ax is None:\n ax = plt.gca()\n\n x_min, x_max = X[:, 0].min() - eps, X[:, 0].max() + eps\n y_min, y_max = X[:, 1].min() - eps, X[:, 1].max() + eps\n xx = np.linspace(x_min, x_max, 100)\n yy = np.linspace(y_min, y_max, 100)\n\n X1, X2 = np.meshgrid(xx, yy)\n X_grid = np.c_[X1.ravel(), X2.ravel()]\n try:\n decision_values = classifier.decision_function(X_grid)\n levels = [0] if threshold is None else [threshold]\n fill_levels = [decision_values.min()] + levels + [decision_values.max()]\n except AttributeError:\n # no decision_function\n decision_values = classifier.predict_proba(X_grid)[:, 1]\n levels = [.5] if threshold is None else [threshold]\n fill_levels = [0] + levels + [1]\n if fill:\n ax.contourf(X1, X2, decision_values.reshape(X1.shape),\n levels=fill_levels, alpha=alpha, cmap=cm)\n else:\n ax.contour(X1, X2, decision_values.reshape(X1.shape), levels=levels,\n colors=\"black\", alpha=alpha, linewidths=linewidth,\n linestyles=linestyle, zorder=5)\n\n ax.set_xlim(x_min, x_max)\n ax.set_ylim(y_min, y_max)\n ax.set_xticks(())\n ax.set_yticks(())\n\nimport matplotlib as mpl\nfrom matplotlib.colors import colorConverter\n\ndef discrete_scatter(x1, x2, y=None, markers=None, s=10, ax=None,\n labels=None, padding=.2, alpha=1, c=None, markeredgewidth=None):\n \"\"\"Adaption of matplotlib.pyplot.scatter to plot classes or clusters.\n\n Parameters\n ----------\n\n x1 : nd-array\n input data, first axis\n\n x2 : nd-array\n input data, second axis\n\n y : nd-array\n input data, discrete labels\n\n cmap : colormap\n Colormap to use.\n\n markers : list of string\n List of markers to use, or None (which defaults to 'o').\n\n s : int or float\n Size of the marker\n\n padding : float\n Fraction of the dataset range to use for padding the axes.\n\n alpha : float\n Alpha value for all points.\n \"\"\"\n if ax is None:\n ax = plt.gca()\n\n if y is None:\n y = np.zeros(len(x1))\n\n unique_y = np.unique(y)\n\n if markers is None:\n markers = ['o', '^', 'v', 'D', 's', '*', 'p', 'h', 'H', '8', '<', '>'] * 10\n\n if len(markers) == 1:\n markers = markers * len(unique_y)\n\n if labels is None:\n labels = unique_y\n\n # lines in the matplotlib sense, not actual lines\n lines = []\n\n current_cycler = mpl.rcParams['axes.prop_cycle']\n\n for i, (yy, cycle) in enumerate(zip(unique_y, current_cycler())):\n mask = y == yy\n # if c is none, use color cycle\n if c is None:\n color = cycle['color']\n elif len(c) > 1:\n color = c[i]\n else:\n color = c\n # use light edge for dark markers\n if np.mean(colorConverter.to_rgb(color)) < .4:\n markeredgecolor = \"grey\"\n else:\n markeredgecolor = \"black\"\n\n lines.append(ax.plot(x1[mask], x2[mask], markers[i], markersize=s,\n label=labels[i], alpha=alpha, c=color,\n markeredgewidth=markeredgewidth,\n markeredgecolor=markeredgecolor)[0])\n\n if padding != 0:\n pad1 = x1.std() * padding\n pad2 = x2.std() * padding\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n ax.set_xlim(min(x1.min() - pad1, xlim[0]), max(x1.max() + pad1, xlim[1]))\n ax.set_ylim(min(x2.min() - pad2, ylim[0]), max(x2.max() + pad2, ylim[1]))\n\n return lines",
"We can apply the LinearSVC model to a synthetic dataset, and visualize the decision boundary:",
"from sklearn.svm import LinearSVC\n\nX, y = make_forge()\nmodel = LinearSVC()\nclf = model.fit(X,y)\nplot_2d_separator(clf, X, fill=False, eps=0.5, alpha=.7)\ndiscrete_scatter(X[:, 0], X[:, 1], y)\nplt.title(\"{}\".format(clf.__class__.__name__))\nplt.xlabel(\"Feature 0\")\nplt.ylabel(\"Feature 1\")\nplt.legend()",
"In this figure, we have the first feature of the dataset on the x-axis and the second feature on the y-axis. We display the decision boundary found by LinearSVC as a straight line, separating the area classified as class 1 on the top from the area classified as class 0 on the bottom. In other words, any new data point that lies above the black line will be classified as class 1 by the respective classifier, while any point that lies below the black line will be classified as class 0.\nNote that the model misclassifies two of the points. By default, LinearSVC applies an L2 regularization, in the same way that Ridge does for regression.\nFor LinearSVC the trade-off parameter that determines the strength of the regularization is called C, and higher values of C correspond to less regularization. In other words, when you use a high value for the parameter C, LinearSVC tries to fit the training set as best as possible, while with low values of the parameter C, the model puts more emphasis on finding a coefficient vector (w) that is close to zero.\nThere is another interesting aspect of how the parameter C acts. Using low values of C will cause the algorithm to try to adjust to the “majority” of data points, while using a higher value of C stresses the importance that each individual data point be classified correctly. Here is an illustration:",
"from sklearn.svm import SVC\n\ndef plot_linear_svc_regularization():\n X, y = make_blobs(centers=2, random_state=4, n_samples=30)\n fig, axes = plt.subplots(1, 3, figsize=(12, 4))\n\n # a carefully hand-designed dataset lol\n y[7] = 0\n y[27] = 0\n x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n\n for ax, C in zip(axes, [1e-2, 1, 1e2]):\n discrete_scatter(X[:, 0], X[:, 1], y, ax=ax)\n\n svm = SVC(kernel='linear', C=C, tol=0.00001).fit(X, y)\n w = svm.coef_[0]\n a = -w[0] / w[1]\n xx = np.linspace(6, 13)\n yy = a * xx - (svm.intercept_[0]) / w[1]\n ax.plot(xx, yy, c='k')\n ax.set_xlim(x_min, x_max)\n ax.set_ylim(y_min, y_max)\n ax.set_xticks(())\n ax.set_yticks(())\n ax.set_title(\"C = %f\" % C)\n axes[0].legend(loc=\"best\")\n \nplot_linear_svc_regularization()",
"On the lefthand side, we have a very small C corresponding to a lot of regularization. Most of the points in class 0 are at the top, and most of the points in class 1 are at the bottom. The strongly regularized model chooses a relatively horizontal line, misclassifying two points. In the center plot, C is slightly higher, and the model focuses more on the two misclassified samples, tilting the decision boundary. Finally, on the righthand side, the very high value of C in the model tilts the decision boundary a lot, now correctly classifying all points in class 0. One of the points in class 1 is still misclassified, as it is not possible to correctly classify all points in this dataset using a straight line. The model illustrated on the righthand side tries hard to correctly classify all points, but might not capture the overall layout of the classes well. In other words, this model is likely overfitting.\nSimilarly to the case of regression, linear models for classification might seem very restrictive in low-dimensional spaces, only allowing for decision boundaries that are straight lines or planes. Again, in high dimensions, linear models for classification become very powerful, and guarding against overfitting becomes increasingly important when considering more features.\nLinear SVMs for multiclass classification\nMany linear classification models are for binary classification only, and don’t extend naturally to the multiclass case (with the exception of logistic regression). A common technique to extend a binary classification algorithm to a multiclass classification algorithm is the one-vs.-rest approach. In the one-vs.-rest approach, a binary model is learned for each class that tries to separate that class from all of the other classes, resulting in as many binary models as there are classes. To make a prediction, all binary classifiers are run on a test point. The classifier that has the highest score on its single class “wins,” and this class label is returned as the prediction.\nHaving one binary classifier per class results in having one vector of coefficients (w) and one intercept (b) for each class. The class for which the result of the classification confidence formula given here is highest is the assigned class label:\nw[0] * x[0] + w[1] * x[1] + ... + w[p] * x[p] + b\n\nThe mathematics behind multiclass logistic regression differ somewhat from the one-vs.-rest approach, but they also result in one coefficient vector and intercept per class, and the same method of making a prediction is applied.\nLet’s apply the one-vs.-rest method to a simple three-class classification dataset. We use a two-dimensional dataset, where each class is given by data sampled from a Gaussian distribution.",
"X, y = make_blobs(centers=3, random_state=42)\ndiscrete_scatter(X[:, 0], X[:, 1], y, c=['b', 'r', 'g'])\nplt.xlabel(\"Feature 0\")\nplt.ylabel(\"Feature 1\")\nplt.legend([\"Class 0\", \"Class 1\", \"Class 2\"])",
"Now, we train a LinearSVC classifier on the dataset:",
"linear_svm = LinearSVC().fit(X, y)\nprint(\"Coefficient shape: \", linear_svm.coef_.shape)\nprint(\"Intercept shape: \", linear_svm.intercept_.shape)",
"We see that the shape of the coef_ is (3, 2), meaning that each row of coef_ contains the coefficient vector for one of the three classes and each column holds the coefficient value for a specific feature (there are two in this dataset). The intercept_ is now a one-dimensional array, storing the intercepts for each class.\nLet’s visualize the lines given by the three binary classifiers:",
"discrete_scatter(X[:, 0], X[:, 1], y, c=['b', 'r', 'g'])\nline = np.linspace(-15, 15)\nfor coef, intercept, color in zip(linear_svm.coef_, linear_svm.intercept_, ['b', 'r', 'g']):\n plt.plot(line, -(line * coef[0] + intercept) / coef[1], c=color)\nplt.ylim(-10, 15)\nplt.xlim(-10, 8)\nplt.xlabel(\"Feature 0\")\nplt.ylabel(\"Feature 1\")\nplt.legend(['Class 0', 'Class 1', 'Class 2', 'Line class 0', 'Line class 1', 'Line class 2'], loc=(1.01, 0.3))",
"You can see that all the points belonging to class 0 in the training data are above the line corresponding to class 0, which means they are on the “class 0” side of this binary classifier. The points in class 0 are above the line corresponding to class 2, which means they are classified as “rest” by the binary classifier for class 2. The points belonging to class 0 are to the left of the line corresponding to class 1, which means the binary classifier for class 1 also classifies them as “rest.” Therefore, any point in this area will be classified as class 0 by the final classifier (the result of the classification confidence formula for classifier 0 is greater than zero, while it is smaller than zero for the other two classes).\nBut what about the triangle in the middle of the plot? All three binary classifiers classify points there as “rest.” Which class would a point there be assigned to? The answer is the one with the highest value for the classification formula: the class of the closest line.\nThe following example (Figure 2-21) shows the predictions for all regions of the 2D space:",
"cm3 = ListedColormap(['#0000aa', '#ff2020', '#50ff50'])\n\ndef plot_2d_classification(classifier, X, fill=False, ax=None, eps=None, alpha=1, cm=cm3):\n # multiclass\n if eps is None:\n eps = X.std() / 2.\n\n if ax is None:\n ax = plt.gca()\n\n x_min, x_max = X[:, 0].min() - eps, X[:, 0].max() + eps\n y_min, y_max = X[:, 1].min() - eps, X[:, 1].max() + eps\n xx = np.linspace(x_min, x_max, 1000)\n yy = np.linspace(y_min, y_max, 1000)\n\n X1, X2 = np.meshgrid(xx, yy)\n X_grid = np.c_[X1.ravel(), X2.ravel()]\n decision_values = classifier.predict(X_grid)\n ax.imshow(decision_values.reshape(X1.shape), extent=(x_min, x_max,\n y_min, y_max),\n aspect='auto', origin='lower', alpha=alpha, cmap=cm)\n ax.set_xlim(x_min, x_max)\n ax.set_ylim(y_min, y_max)\n ax.set_xticks(())\n ax.set_yticks(())\n\nplot_2d_classification(linear_svm, X, fill=True, alpha=.7)\ndiscrete_scatter(X[:, 0], X[:, 1], y, c=['b', 'r', 'g'])\nline = np.linspace(-15, 15)\nfor coef, intercept, color in zip(linear_svm.coef_, linear_svm.intercept_, ['b', 'r', 'g']):\n plt.plot(line, -(line * coef[0] + intercept) / coef[1], c=color)\nplt.legend(['Class 0', 'Class 1', 'Class 2', 'Line class 0', 'Line class 1', 'Line class 2'], loc=(1.01, 0.3))\nplt.xlabel(\"Feature 0\")\nplt.ylabel(\"Feature 1\")",
"Kernelized Support Vector Machines\nKernelized support vector machines (often just referred to as SVMs) are an extension that allows for more complex models that are not defined simply by hyperplanes in the input space. While there are support vector machines for classification and regression, we will restrict ourselves to the classification case, as implemented in SVC. Similar concepts apply to support vector regression, as implemented in SVR.\nThe math behind kernelized support vector machines is a bit involved. You can find the details in Chapter 1 of Hastie, Tibshirani, and Friedman’s The Elements of Statistical Learning.\nLinear models and nonlinear features\nAs you saw above, linear models can be quite limiting in low-dimensional spaces, as lines and hyperplanes have limited flexibility. One way to make a linear model more flexible is by adding more features—for example, by adding interactions or polynomials of the input features.\nLet’s look at another synthetic dataset to help illustrate this:",
"c=['b', 'r', 'g']\nX, y = make_blobs(centers=4, random_state=8)\ny = y % 2\n\ndiscrete_scatter(X[:, 0], X[:, 1], y, c=c)\nplt.xlabel(\"Feature 0\")\nplt.ylabel(\"Feature 1\")",
"A linear model for classification can only separate points using a line, and will not be able to do a very good job on this dataset:",
"from sklearn.svm import LinearSVC\nlinear_svm = LinearSVC().fit(X, y)\n\nplot_2d_separator(linear_svm, X)\ndiscrete_scatter(X[:, 0], X[:, 1], y, c=c)\nplt.xlabel(\"Feature 0\")\nplt.ylabel(\"Feature 1\")",
"Now let’s expand the set of input features, say by also adding feature1^2, the square of the second feature, as a new feature. Instead of representing each data point as a two-dimensional point, (feature0, feature1), we now represent it as a three-dimensional point, (feature0, feature1, feature1^22). This new representation is illustrated below in a three-dimensional scatter plot:",
"# add the squared first feature\nX_new = np.hstack([X, X[:, 1:] ** 2])\n\nfrom mpl_toolkits.mplot3d import Axes3D, axes3d\nfigure = plt.figure()\n# visualize in 3D\nax = Axes3D(figure, elev=-152, azim=-26)\n# plot first all the points with y == 0, then all with y == 1\nmask = y == 0\nax.scatter(X_new[mask, 0], X_new[mask, 1], X_new[mask, 2], c='b', cmap=cm2, s=60)\nax.scatter(X_new[~mask, 0], X_new[~mask, 1], X_new[~mask, 2], c='r', marker='^', cmap=cm2, s=60)\nax.set_xlabel(\"feature0\")\nax.set_ylabel(\"feature1\")\nax.set_zlabel(\"feature1 ** 2\")",
"In the new representation of the data, it is now indeed possible to separate the two classes using a linear model, a plane in three dimensions. We can confirm this by fitting a linear model to the augmented data:",
"linear_svm_3d = LinearSVC().fit(X_new, y)\ncoef, intercept = linear_svm_3d.coef_.ravel(), linear_svm_3d.intercept_\n\n# show linear decision boundary\nfigure = plt.figure()\nax = Axes3D(figure, elev=-152, azim=-26)\nxx = np.linspace(X_new[:, 0].min() - 2, X_new[:, 0].max() + 2, 50)\nyy = np.linspace(X_new[:, 1].min() - 2, X_new[:, 1].max() + 2, 50)\n\nXX, YY = np.meshgrid(xx, yy)\nZZ = (coef[0] * XX + coef[1] * YY + intercept) / -coef[2]\nax.plot_surface(XX, YY, ZZ, rstride=8, cstride=8, alpha=0.3)\nax.scatter(X_new[mask, 0], X_new[mask, 1], X_new[mask, 2], c='b', cmap=cm2, s=60)\nax.scatter(X_new[~mask, 0], X_new[~mask, 1], X_new[~mask, 2], c='r', marker='^', cmap=cm2, s=60)\n\nax.set_xlabel(\"feature0\")\nax.set_ylabel(\"feature1\")\nax.set_zlabel(\"feature0 ** 2\")",
"As a function of the original features, the linear SVM model is not actually linear anymore. It is not a line, but more of an ellipse, as you can see from the plot created here:",
"ZZ = YY ** 2\ndec = linear_svm_3d.decision_function(np.c_[XX.ravel(), YY.ravel(), ZZ.ravel()])\nplt.contourf(XX, YY, dec.reshape(XX.shape), levels=[dec.min(), 0, dec.max()], cmap=cm2, alpha=0.5)\ndiscrete_scatter(X[:, 0], X[:, 1], y, c=c)\nplt.xlabel(\"Feature 0\")\nplt.ylabel(\"Feature 1\")",
"The kernel trick\nThe lesson here is that adding nonlinear features to the representation of our data can make linear models much more powerful. However, often we don’t know which features to add, and adding many features (like all possible interactions in a 100-dimensional feature space) might make computation very expensive. Luckily, there is a clever mathematical trick that allows us to learn a classifier in a higher-dimensional space without actually computing the new, possibly very large representation. This is known as the kernel trick, and it works by directly computing the distance (more precisely, the scalar products) of the data points for the expanded feature representation, without ever actually computing the expansion.\nThere are two ways to map your data into a higher-dimensional space that are commonly used with support vector machines: the polynomial kernel, which computes all possible polynomials up to a certain degree of the original features (like feature1^2 x feature2^5); and the radial basis function (RBF) kernel, also known as the Gaussian kernel. The Gaussian kernel is a bit harder to explain, as it corresponds to an infinite-dimensional feature space. One way to explain the Gaussian kernel is that it considers all possible polynomials of all degrees, but the importance of the features decreases for higher degrees.\nIn practice, the mathematical details behind the kernel SVM are not that important, though, and how an SVM with an RBF kernel makes a decision can be summarized quite easily—we’ll do so in the next section.\nUnderstanding SVMs\nDuring training, the SVM learns how important each of the training data points is to represent the decision boundary between the two classes. Typically only a subset of the training points matter for defining the decision boundary: the ones that lie on the border between the classes. These are called support vectors and give the support vector machine its name.\nTo make a prediction for a new point, the distance to each of the support vectors is measured. A classification decision is made based on the distances to the support vector, and the importance of the support vectors that was learned during training (stored in the dual_coef_ attribute of SVC).\nThe distance between data points is measured by the Gaussian kernel:\nkrbf(x1, x2) = exp (ɣǁx1 - x2ǁ2)\n\nHere, x1 and x2 are data points, ǁ x1 - x2 ǁ denotes Euclidean distance, and ɣ (gamma) is a parameter that controls the width of the Gaussian kernel.\nThe figure below shows the result of training a support vector machine on a two-dimensional two-class dataset. The decision boundary is shown in black, and the support vectors are larger points with the wide outline.",
"def make_handcrafted_dataset():\n # a carefully hand-designed dataset lol\n X, y = make_blobs(centers=2, random_state=4, n_samples=30)\n y[np.array([7, 27])] = 0\n mask = np.ones(len(X), dtype=np.bool)\n mask[np.array([0, 1, 5, 26])] = 0\n X, y = X[mask], y[mask]\n return X, y\n\nfrom sklearn.svm import SVC\nX, y = make_handcrafted_dataset()\nsvm = SVC(kernel='rbf', C=10, gamma=0.1).fit(X, y)\nplot_2d_separator(svm, X, eps=.5)\ndiscrete_scatter(X[:, 0], X[:, 1], y, c=c)\n# plot support vectors\nsv = svm.support_vectors_\n# class labels of support vectors are given by the sign of the dual coefficients\nsv_labels = svm.dual_coef_.ravel() > 0\ndiscrete_scatter(sv[:, 0], sv[:, 1], sv_labels, s=15, markeredgewidth=3, c=c)\nplt.xlabel(\"Feature 0\")\nplt.ylabel(\"Feature 1\")",
"In this case, the SVM yields a very smooth and nonlinear (not a straight line) boundary. We adjusted two parameters here: the C parameter and the gamma parameter, which we will now discuss in detail.\nTuning SVM parameters\nThe gamma parameter is the one shown in the formula given in the previous section, which controls the width of the Gaussian kernel. It determines the scale of what it means for points to be close together. The C parameter is a regularization parameter, similar to that used in other models. It limits the importance of each point (or more precisely, their dual_coef_).\nLet’s have a look at what happens when we vary these parameters:",
"def plot_svm(log_C, log_gamma, ax=None):\n X, y = make_handcrafted_dataset()\n C = 10. ** log_C\n gamma = 10. ** log_gamma\n svm = SVC(kernel='rbf', C=C, gamma=gamma).fit(X, y)\n if ax is None:\n ax = plt.gca()\n plot_2d_separator(svm, X, ax=ax, eps=.5)\n # plot data\n discrete_scatter(X[:, 0], X[:, 1], y, ax=ax, c=c)\n # plot support vectors\n sv = svm.support_vectors_\n # class labels of support vectors are given by the sign of the dual coefficients\n sv_labels = svm.dual_coef_.ravel() > 0\n discrete_scatter(sv[:, 0], sv[:, 1], sv_labels, s=15, markeredgewidth=3, ax=ax, c=c)\n ax.set_title(\"C = %.4f gamma = %.4f\" % (C, gamma))\n\nfig, axes = plt.subplots(3, 3, figsize=(15, 10))\n\nfor ax, C in zip(axes, [-1, 0, 3]):\n for a, gamma in zip(ax, range(-1, 2)):\n plot_svm(log_C=C, log_gamma=gamma, ax=a)\n\naxes[0, 0].legend([\"class 0\", \"class 1\", \"sv class 0\", \"sv class 1\"], ncol=4, loc=(.9, 1.2))",
"Going from left to right, we increase the value of the parameter gamma from 0.1 to 10. A small gamma means a large radius for the Gaussian kernel, which means that many points are considered close by. This is reflected in very smooth decision boundaries on the left, and boundaries that focus more on single points further to the right. A low value of gamma means that the decision boundary will vary slowly, which yields a model of low complexity, while a high value of gamma yields a more complex model.\nGoing from top to bottom, we increase the C parameter from 0.1 to 1000. As with the linear models, a small C means a very restricted model, where each data point can only have very limited influence. You can see that at the top left the decision boundary looks nearly linear, with the misclassified points barely having any influence on the line. Increasing C, as shown on the bottom right, allows these points to have a stronger influence on the model and makes the decision boundary bend to correctly classify them.\nLet’s apply the RBF kernel SVM to the Breast Cancer dataset. By default, C=1 and gamma=1/n_features:",
"from sklearn.datasets import load_breast_cancer\nfrom sklearn.model_selection import train_test_split\n\ncancer = load_breast_cancer()\n\nX_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, random_state=0)\n\nsvc = SVC()\nsvc.fit(X_train, y_train)\n\nprint(\"Accuracy on training set: {:.2f}\".format(svc.score(X_train, y_train)))\nprint(\"Accuracy on test set: {:.2f}\".format(svc.score(X_test, y_test)))",
"The model overfits quite substantially, with a perfect score on the training set and only 63% accuracy on the test set. While SVMs often perform quite well, they are very sensitive to the settings of the parameters and to the scaling of the data. In particular, they require all the features to vary on a similar scale. Let’s look at the minimum and maximum values for each feature, plotted in log-space:",
"plt.plot(X_train.min(axis=0), 'o', label=\"min\")\nplt.plot(X_train.max(axis=0), '^', label=\"max\")\nplt.legend(loc=4)\nplt.xlabel(\"Feature index\")\nplt.ylabel(\"Feature magnitude\")\nplt.yscale(\"log\")",
"From this plot we can determine that features in the Breast Cancer dataset are of completely different orders of magnitude. This can be somewhat of a problem for other models (like linear models), but it has devastating effects for the kernel SVM. Let’s examine some ways to deal with this issue.\nPreprocessing data for SVMs\nOne way to resolve this problem is by rescaling each feature so that they are all approximately on the same scale. A common rescaling method for kernel SVMs is to scale the data such that all features are between 0 and 1. We will see how to do this using the MinMaxScaler preprocessing method:",
"from sklearn.preprocessing import MinMaxScaler\n\nscaler = MinMaxScaler()\nscaler.fit(X_train)\n\nX_train_scaled = scaler.transform(X_train)\nX_test_scaled = scaler.transform(X_test)\n\nsvc = SVC()\nsvc.fit(X_train_scaled, y_train)\n\nprint(\"Accuracy on training set: {:.3f}\".format(svc.score(X_train_scaled, y_train)))\nprint(\"Accuracy on test set: {:.3f}\".format(svc.score(X_test_scaled, y_test)))",
"Scaling the data made a huge difference! Now we are actually in an underfitting regime, where training and test set performance are quite similar but less close to 100% accuracy. From here, we can try increasing either C or gamma to fit a more complex model. For example:",
"svc = SVC(C=1000)\nsvc.fit(X_train_scaled, y_train)\n\nprint(\"Accuracy on training set: {:.3f}\".format(svc.score(X_train_scaled, y_train)))\nprint(\"Accuracy on test set: {:.3f}\".format(svc.score(X_test_scaled, y_test)))",
"Here, increasing C allows us to improve the model significantly, resulting in 97.2% accuracy.\nStrengths, weaknesses, and parameters\nKernelized support vector machines are powerful models and perform well on a variety of datasets. SVMs allow for complex decision boundaries, even if the data has only a few features. They work well on low-dimensional and high-dimensional data (i.e., few and many features), but don’t scale very well with the number of samples. Running an SVM on data with up to 10,000 samples might work well, but working with datasets of size 100,000 or more can become challenging in terms of runtime and memory usage.\nAnother downside of SVMs is that they require careful preprocessing of the data and tuning of the parameters. This is why, these days, most people instead use tree-based models such as random forests or gradient boosting (which require little or no preprocessing) in many applications. Furthermore, SVM models are hard to inspect; it can be difficult to understand why a particular prediction was made, and it might be tricky to explain the model to a nonexpert.\nStill, it might be worth trying SVMs, particularly if all of your features represent measurements in similar units (e.g., all are pixel intensities) and they are on similar scales.\nThe important parameters in kernel SVMs are the regularization parameter C, the choice of the kernel, and the kernel-specific parameters. Although we primarily focused on the RBF kernel, other choices are available in scikit-learn. The RBF kernel has only one parameter, gamma, which is the inverse of the width of the Gaussian kernel. gamma and C both control the complexity of the model, with large values in either resulting in a more complex model. Therefore, good settings for the two parameters are usually strongly correlated, and C and gamma should be adjusted together."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
MaayanLab/clustergrammer-widget
|
Running_clustergrammer_widget.ipynb
|
mit
|
[
"Clustergrammer Widget\nGene Expression Example\nThis example shows how to visualize a matrix of gene expression data saved as a tab-separated-file (e.g. rc_two_cats.txt) using the Clustergrammer interactive widget (see the Clustergrammer Jupyter Widget Documentation for more information).",
"# import widget classes and instantiate Network instance\nfrom clustergrammer_widget import *\nnet = Network(clustergrammer_widget)\n\n# load matrix file\nnet.load_file('rc_two_cats.txt')\n\n# cluster using default parameters\nnet.cluster(enrichrgram=True)\n\n# make interactive widget\nnet.widget()",
"Interactive Features\n\nzoom/pan\nreorder rows and columns using buttons or by double-clicking row/column/category names\ninteractively perform dimensionality reduction (and re-clustering) using row-filter sliders (e.g. filter rows based on variance)\nidentify clusters of varying sizes using the interactive row and column dendrograms \nexport cluster names or crop matrix to clusters using the dendrogram and dendrogram crop buttons\nsearch for rows using the search box\ncrop the matrix using the brush cropping tool in the sidebar\ntake a PNG/SVG snapshot or download a TSV file snapshot of the matrix using the sidebar icons\n\nBiollogy-specific Features\nClustergrammer widget has biology-specific features that are activated when rows are given as official gene symbols:\n* mouseover gene (row) name to show full name and description (information provided by Harmonizome)\n* find biological information specific to your gene list with enrichment analysis from Enrichr\nGeneral Purpose Pandas DataFrame Viewer\nClustergrammer can also be used as a general purpose Pandas dataframe viewer. This example generates a dataframe with random data and visualizes it with Clustergrammer widget:",
"import numpy as np\nimport pandas as pd\n\n# generate random matrix\nnum_rows = 500\nnum_cols = 10\nnp.random.seed(seed=100)\nmat = np.random.rand(num_rows, num_cols)\n\n# make row and col labels\nrows = range(num_rows)\ncols = range(num_cols)\nrows = [str(i) for i in rows]\ncols = [str(i) for i in cols]\n\n# make dataframe \ndf = pd.DataFrame(data=mat, columns=cols, index=rows)",
"Initialize the network object, load the dataframe, hierarchically cluster the rows and columns using default parameters, and finally visualize using clustergrammer_widget.",
"net.load_df(df)\nnet.cluster(enrichrgram=False)\nnet.widget()",
"Installation\nClustergrammer widget is built using the ipywidgets framework (using the cookie cutter template) and can be installed (with pip) and enabled using the following commands:\npip install clustergrammer_widget\njupyter nbextension enable --py --sys-prefix widgetsnbextension\njupyter nbextension enable --py --sys-prefix clustergrammer_widget\n\nSee the documentation and clustergrammer_widget GitHub for more information. \nnbviewer\nInteractive widgets can also be rendered using Jupyter's nbviewer by using the 'Save Notebook with Widgets' action from the Widgets menu in the notebook (see ipywidgets documents). This notebook is being rendered by nbviewer using the Github repo.\nClustergrammer Web-app and Libraries\nThe Clustergrammer project can also be used through:\n* a web application: http://amp.pharm.mssm.edu/clustergrammer/ \n* and as JavaScript (front-end) and Python (back-end) libraries by developers: clustergrammer.js and clustergrammer.py"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
rflamary/POT
|
docs/source/auto_examples/plot_convolutional_barycenter.ipynb
|
mit
|
[
"%matplotlib inline",
"Convolutional Wasserstein Barycenter example\nThis example is designed to illustrate how the Convolutional Wasserstein Barycenter\nfunction of POT works.",
"# Author: Nicolas Courty <ncourty@irisa.fr>\n#\n# License: MIT License\n\n\nimport numpy as np\nimport pylab as pl\nimport ot",
"Data preparation\nThe four distributions are constructed from 4 simple images",
"f1 = 1 - pl.imread('../data/redcross.png')[:, :, 2]\nf2 = 1 - pl.imread('../data/duck.png')[:, :, 2]\nf3 = 1 - pl.imread('../data/heart.png')[:, :, 2]\nf4 = 1 - pl.imread('../data/tooth.png')[:, :, 2]\n\nA = []\nf1 = f1 / np.sum(f1)\nf2 = f2 / np.sum(f2)\nf3 = f3 / np.sum(f3)\nf4 = f4 / np.sum(f4)\nA.append(f1)\nA.append(f2)\nA.append(f3)\nA.append(f4)\nA = np.array(A)\n\nnb_images = 5\n\n# those are the four corners coordinates that will be interpolated by bilinear\n# interpolation\nv1 = np.array((1, 0, 0, 0))\nv2 = np.array((0, 1, 0, 0))\nv3 = np.array((0, 0, 1, 0))\nv4 = np.array((0, 0, 0, 1))",
"Barycenter computation and visualization",
"pl.figure(figsize=(10, 10))\npl.title('Convolutional Wasserstein Barycenters in POT')\ncm = 'Blues'\n# regularization parameter\nreg = 0.004\nfor i in range(nb_images):\n for j in range(nb_images):\n pl.subplot(nb_images, nb_images, i * nb_images + j + 1)\n tx = float(i) / (nb_images - 1)\n ty = float(j) / (nb_images - 1)\n\n # weights are constructed by bilinear interpolation\n tmp1 = (1 - tx) * v1 + tx * v2\n tmp2 = (1 - tx) * v3 + tx * v4\n weights = (1 - ty) * tmp1 + ty * tmp2\n\n if i == 0 and j == 0:\n pl.imshow(f1, cmap=cm)\n pl.axis('off')\n elif i == 0 and j == (nb_images - 1):\n pl.imshow(f3, cmap=cm)\n pl.axis('off')\n elif i == (nb_images - 1) and j == 0:\n pl.imshow(f2, cmap=cm)\n pl.axis('off')\n elif i == (nb_images - 1) and j == (nb_images - 1):\n pl.imshow(f4, cmap=cm)\n pl.axis('off')\n else:\n # call to barycenter computation\n pl.imshow(ot.bregman.convolutional_barycenter2d(A, reg, weights), cmap=cm)\n pl.axis('off')\npl.show()"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
google/timesketch
|
notebooks/add_a_single_event_to_a_sketch.ipynb
|
apache-2.0
|
[
"<a href=\"https://colab.research.google.com/github/google/timesketch/blob/master/notebooks/add_a_single_event_to_a_sketch.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\nSetup",
"# @title Install dependencies\n# @markdown Only execute if not already installed and running a cloud runtime\n!pip install -q timesketch_api_client\n\n\n# @title Import libraries\n# @markdown This cell will import all the libraries needed for the running of this colab.\n\nimport altair as alt # For graphing.\nimport numpy as np # Never know when this will come in handy.\nimport pandas as pd # We will be using pandas quite heavily.\n\nfrom timesketch_api_client import config\n\n# @title Sketch Configuration\n# @markdown Set the sketch ID for the rest of the notebook\n\nSketch_id = 1 #@param {type:\"integer\"}\n\n# @title Timesketch connnection\nts_client = config.get_client()\nsketch = ts.get_sketch(Sketch_id)",
"Fill in your event data\nPut in your message, date and timestamp description",
"#message: A string that will be used as the message string. \nmessage = \"\" #@param {type:\"string\"}\n\n#date: A string with the timestamp of the message. \n# This should be in a human readable format, eg: \"2020-09-03T22:52:21\". \n# if empty it will set to now\n\ndate = \"2020-08-06T12:48:06.994188Z\" #@param {type:\"string\"}\n\n#timestamp_desc : Description of the timestamp. \ntimestamp_desc = \"Test_description\" #@param {type:\"string\"}\n\n",
"Attributes / Tags (optional)\nFill in additional attributes and tags\nIf you do not need that, empty them.\nExamples:\nattributes: A dict of extra attributes to add to the event.\nattributes = {\"a\": \"alpha\", \"o\": \"omega\", \"g\": \"gamma\"}\ntags: A list of strings to include as tags.\ntags = [\"not\", \"important\"]",
"#attributes: A dict of extra attributes to add to the event. \nattributes = {\"a\": \"alpha\", \"o\": \"omega\", \"g\": \"gamma\"}\n\n#tags: A list of strings to include as tags.\ntags = [\"not\", \"important\"] \n\nsketch.add_event(message, date, timestamp_desc, attributes, tags)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
dtamayo/rebound
|
ipython_examples/SimulationArchiveRestart.ipynb
|
gpl-3.0
|
[
"Using the Simulation Archive to restart a simulation\nThe Simulation Archive (SA) is a binary file that can be used to restart a simulation. This can be useful when running a long simulation. REBOUND can restart simulation exactly (bit by bit) when using a SA. There are some restriction to when a SA can be used. Please read the corresponding paper (Rein & Tamayo 2017) for details. \nWe first setup a simulation in the normal way.",
"import rebound\n\nsim = rebound.Simulation()\nsim.integrator = \"whfast\"\nsim.dt = 2.*3.1415/365.*6 # 6 days in units where G=1\nsim.add(m=1.)\nsim.add(m=1e-3,a=1.)\nsim.add(m=5e-3,a=2.25)\nsim.move_to_com()",
"We then initialize the SA and specify the output filename and output cadence. We can choose the output interval to either correspond to constant intervals in walltime (in seconds) or simulation time. Here, we choose walltime. To choose simulation time instead replace the walltime argument with interval.",
"sim.automateSimulationArchive(\"simulationarchive.bin\", walltime=1.,deletefile=True)",
"Now, we can run the simulation forward in time.",
"sim.integrate(2e5)",
"Depending on how fast your computer is, the above command may take a couple of seconds. Once the simulation is done, we can delete it from memory and load it back in from the SA. You could do this at a later time. Note that this will even work if the SA file was generated on a different computer with a different operating system and even a different version of REBOUND. See Rein & Tamayo (2017) for a full discussion on machine independent code.",
"sim = None\nsim = rebound.Simulation(\"simulationarchive.bin\")\nprint(\"Time after loading simulation %.1f\" %sim.t)",
"If we want to integrate the simulation further in time and append snapshots to the same SA, then we need to call the automateSimulationArchive method again (this is fail safe mechanism to avoid accidentally modifying a SA file). Note that we set the deletefile flag to False. Otherwise we would create a new empty SA file. This outputs a warning because the file already exists (which is ok since we want to append that file).",
"sim.automateSimulationArchive(\"simulationarchive.bin\", walltime=1.,deletefile=False)",
"Now, let's integrate the simulation further in time.",
"sim.integrate(sim.t+2e5)",
"If we repeat the process, one can see that the SA binary file now includes the new snapshots from the restarted simulation.",
"sim = None\nsim = rebound.Simulation(\"simulationarchive.bin\")\nprint(\"Time after loading simulation %.1f\" %sim.t)",
"A few things to note when restarting a simulation from a SA: \n- If you used any additional forces or post-timestep modifications in the original simulation, then those need to be restored after loading a simulation from a SA. A RuntimeWarning may be given related to this indicating the need to reset function pointers after creating a reb_simulation struct with a binary file.\n- If you use the symplectic WHFast integrator with the safe mode turned off, then the simulation will be in an unsychronized state after reloading it. If you want to generate an output, then the simulation needs to be synchronized beforehand. See the WHFast tutorial on how to do that.\n- If you use the symplectic WHFast integrator with the safe mode turned off in order to combine kepler steps (see the Advanced WHFast tutorial), but want to preserve bitwise reproducibility when integrating to different times in the simulation or to match SimulationArchive snapshots, you need to manually set sim.ri_whfast.keep_unsynchronized = 1. This ensures that the integration state does not change depending on if and when you generate outputs.\n- For reproducibility, the SimulationArchive does not output snapshots at the exact intervals specified, but rather at the timestep in the integration directly following each interval. This means that if you load from a SimulationArchive and want to reproduce the state in a snapshot later on, you have to pass exact_finish_time=0 in a call to sim.integrate."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
Hamstard/RVMs
|
Tutorial.ipynb
|
mit
|
[
"Tutorial on RVM Regression\nIn this tutorial we play around with linear regression in form of Relevance Vector Machines (RVMs) using linear and localized kernels. And heeeere we go!",
"%matplotlib inline\nfrom linear_model import RelevanceVectorMachine, distribution_wrapper, GaussianFeatures, \\\n FourierFeatures, repeated_regression, plot_summary\nfrom sklearn import preprocessing\nimport numpy as np\nfrom scipy import stats\nimport matplotlib#\nimport matplotlib.pylab as plt\n\nmatplotlib.rc('text', usetex=True)\nmatplotlib.rcParams['text.latex.preamble']=[r\"\\usepackage{amsmath}\"]",
"First things first, let's set up up the database to regress.",
"x = np.linspace(-np.pi,np.pi,100)\nx_pred = np.linspace(-1.5*np.pi,1.5*np.pi,200)\nepsilon = stats.norm(loc=0,scale=0.01)\nnoise = epsilon.rvs(size=x.shape[0])\nt = np.exp(-x**2) + noise\n\nfig = plt.figure(figsize=(5,5))\nplt.plot(x,t,'ro',markerfacecolor=\"None\",label=\"data\")\nplt.xlabel(\"input\")\nplt.ylabel(\"output\")\nplt.legend(loc=0)\nplt.show()",
"1. Single Regression\n1.1 Linear Kernel\nNeat now let's test whether we can regress that data using a polynomial feature space.",
"# choosing the feature space\nk = 5\ntrafo = preprocessing.PolynomialFeatures(k)\nX = trafo.fit_transform(x.reshape((-1,1)))\n\n# initializing hyperparameters\ninit_beta = 1./ np.var(t) # (that's the default start)\ninit_alphas = np.ones(X.shape[1])\ninit_alphas[1:] = np.inf\n\n# setting up the model regression class\nmodel = RelevanceVectorMachine(n_iter=250,verbose=False,compute_score=True,init_beta=init_beta,\n init_alphas=init_alphas)\n# regress\nmodel.fit(X,t)\n\n# predict\nX_pred = trafo.fit_transform(x_pred.reshape((-1,1)))\ny, yerr = model.predict(X_pred,return_std=True)\n\nfig = plt.figure()\nax = fig.add_subplot(121)\nax.plot(x,t,'ro',label=\"data\",markerfacecolor=\"None\")\nax.fill_between(x_pred,y-2*yerr,y+2*yerr,alpha=.5,label=\"95\\%\")\nax.plot(x_pred,y,'-',label=\"estimate\")\nplt.legend(loc=0)\nax.set_xlabel(\"input\")\nax.set_ylabel(\"output\")\n\nax1 = fig.add_subplot(122)\nax1.plot(model.mse_,'-')\nax1.set_xlabel(\"iteration\")\nax1.set_ylabel(\"MSE\")\nplt.tight_layout()\nplt.show()",
"1.2 Localized Kernel\nIndeed that seemed to work. But what about a Gaussian feature space, will it be able to fit the Gaussian?",
"# choosing the feature space\ntrafo = GaussianFeatures(k=30,mu0=-3,dmu=.2)\nX = trafo.fit_transform(x.reshape((-1,1)))\n\n# initializing hyperparameters\ninit_beta = 1./ np.var(t) # (that's the default start)\ninit_alphas = np.ones(X.shape[1])\ninit_alphas[1:] = np.inf\n\n# setting up the model regression class\nmodel = RelevanceVectorMachine(n_iter=250,verbose=False,compute_score=True,init_beta=init_beta,\n init_alphas=init_alphas)\n# regress\nmodel.fit(X,t)\n\n# predict\nX_pred = trafo.fit_transform(x_pred.reshape((-1,1)))\ny, yerr = model.predict(X_pred,return_std=True)\n\nfig = plt.figure()\nax = fig.add_subplot(121)\nax.plot(x,t,'ro',label=\"data\",markerfacecolor=\"None\")\nax.fill_between(x_pred,y-2*yerr,y+2*yerr,alpha=.5,label=\"95\\%\")\nax.plot(x_pred,y,'-',label=\"estimate\")\nplt.legend(loc=0)\nax.set_xlabel(\"input\")\nax.set_ylabel(\"output\")\n\nax1 = fig.add_subplot(122)\nax1.plot(model.mse_,'-')\nax1.set_xlabel(\"iteration\")\nax1.set_ylabel(\"MSE\")\nplt.tight_layout()\nplt.show()",
"2. Repeated Regressions\nIndeed using a Gaussian basis set, for some mysterious reason, gave a closer estimate to the real data with tighter confidence intervals. Now let's do the same again for both kernels but multiple times initializing the hyperparmaeters such that we sample them from distributions as well.\n2.1 Linear Kernel",
"# choosing the feature space\nk = 5\ntrafo = preprocessing.PolynomialFeatures(k)\nX = trafo.fit_transform(x.reshape((-1,1)))\nbase_trafo = trafo.fit_transform\n\n# initializing hyperparameters using callable distributions giving new hyperparameters\n# with every call (useful for repeated regression)\ninit_beta = distribution_wrapper(stats.halfnorm(scale=1),size=1,single=True)\ninit_alphas = distribution_wrapper(stats.halfnorm(scale=1),single=False)\n\nmodel_type = RelevanceVectorMachine\nmodel_kwargs = dict(n_iter=250,verbose=False,compute_score=True,init_beta=init_beta,\n init_alphas=init_alphas,fit_intercept=False)\n\nNruns = 100\nruntimes, coefs, models = repeated_regression(x,base_trafo,model_type,t=t,\n model_kwargs=model_kwargs,Nruns=Nruns,\n return_coefs=True,return_models=True)\nplot_summary(models,noise,x,t,X,coefs,base_trafo)",
"2.2 Localized kernel",
"# choosing the feature space\ntrafo = GaussianFeatures(k=30,mu0=-3,dmu=.2)\nbase_trafo = trafo.fit_transform\n\n# initializing hyperparameters using callable distributions giving new hyperparameters\n# with every call (useful for repeated regression)\ninit_beta = distribution_wrapper(stats.halfnorm(scale=1),size=1,single=True)\ninit_alphas = distribution_wrapper(stats.halfnorm(scale=1),single=False)\n\nmodel_type = RelevanceVectorMachine\nmodel_kwargs = dict(n_iter=250,verbose=False,compute_score=True,init_beta=init_beta,\n init_alphas=init_alphas,fit_intercept=False)\n\nNruns = 100\nruntimes, coefs, models = repeated_regression(x,base_trafo,model_type,t=t,\n model_kwargs=model_kwargs,Nruns=Nruns,\n return_coefs=True,return_models=True)\nX = base_trafo(x.reshape((-1,1)))\nplot_summary(models,noise,x,t,X,coefs,base_trafo)",
"Excellent, that seemed to work and the Gaussians in the basis set near the true Gaussian in our data also seemed to receive the largest weights. Neat!"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
anshbansal/anshbansal.github.io
|
udacity_data_science_notes/intro_data_analysis/lesson_02/Lesson2.ipynb
|
mit
|
[
"Lesson 2: NumPy and Pandas for 1D Data\n01 - Introduction\n\nWill get familiar with 2 libraries - numpy and pandas\nWriting Data Analysis code will be much easier.\nCode runs faster\nAnalyse one dimensional data\n\n02 - Gapminder Data\nThe data in this lesson was obtained from the site gapminder.org. The variables included are:\n- Aged 15+ Employment Rate (%)\n- Life Expectancy (years)\n- GDP/capita (US$, inflation adjusted)\n- Primary school completion (% of boys)\n- Primary school completion (% of girls)\n04 - One-Dimensional Data in NumPy and Pandas",
"import pandas as pd",
"Importing it takes some time\nHas many functions like read_csv and uniq that help a lot",
"import numpy as np",
"05 - NumPy Arrays\n\nBoth Pandas and NumPy have special data structures for 1 D data\n\nNumpy array is similar to Python list\nSimilarities\nAccess element by index\nAccess a range of elements\nUse loops\n\n\nDifferences\nEach element should have same type\nCan have different types but it was designed for single data type\nConvenient functions like mean and std",
"employments = pd.read_csv('employment_above_15.csv')\n\nemployments[0:5]\n\n#Selecting a column and displaying its first 5 elements\nemployments.get('1991')[0:5]\n\nemployments.get('Country')[0:5]\n\ndef max_employment(countries, employment): \n i = employment.argmax()\n return (countries[i], employment[i])\n\nmax_employment(employments.get('Country'), employments.get('2007'))",
"Let's look at the element type of few array which numpy calls dtype",
"countries = np.array(['Afghanistan','Albania','Algeria','Angola','Argentina','Armenia'])\nemployment = np.array([56.700001, 52.700001, 39.400002, 75.800003, 53.599998])\n\nprint countries.dtype\nprint employment.dtype\n\nprint np.array([0, 1, 2, 3]).dtype\nprint np.array([True, False, True]).dtype\nprint np.array(['AL', 'AK']).dtype",
"|S11 means String with maximum length 11.",
"print employment.mean()\nprint employment.std()\nprint employment.max()\nprint employment.sum()",
"07 - Vectorized Operations\n\nNumpy supports Vectorized operations\nA vector is a list of numbers\n \nAddition of 2 vectors can be done in several ways. Different languages implement it differently\n \nIn case of NumPy it is an element wise addition",
"np.array([1, 2, 3]) + np.array([4, 5, 6])",
"09 - Multiplying by a Scalar\n\nMultiplying by Scalar is scalar multiplied with each element of the array",
"np.array([1, 2, 3]) * 3",
"11 - Calculate Overall Completion Rate\nMore vectorized operations",
"np.array([1, 2, 3]) + np.array([4, 5, 6])\n\nnp.array([1, 2, 3]) + 1\n\nnp.array([1, 2, 3]) - np.array([7, 10, 15])\n\nnp.array([1, 2, 3]) - 1\n\nnp.array([1, 2, 3]) * np.array([4, 5, 6])\n\nnp.array([1, 2, 3]) * np.array([2])\n\n#Throws error\n#np.array([1, 2, 3]) * np.array([2, 3])\n\nnp.array([2, 3]) ** np.array([2, 3])\n\nnp.array([5, 6]) ** 2",
"See this article for more information about bitwise operations.\nIn NumPy, a & b performs a bitwise and of a and b. This is not necessarily the same as a logical and, if you wanted to see if matching terms in two integer vectors were non-zero. However, if a and b are both arrays of booleans, rather than integers, bitwise and and logical and are the same thing. If you want to perform a logical and on integer vectors, then you can use the NumPy function np.logical_and(a, b) or convert them into boolean vectors first.\nSimilarly, a | b performs a bitwise or, and ~a performs a bitwise not. However, if your arrays contain booleans, these will be the same as performing logical or and logical not. NumPy also has similar functions for performing these logical operations on integer-valued arrays.\nIn the solution, we may want to / (2.) instead of just / (2) . This is because in Python 2, dividing an integer by another integer (2) drops fractions, so if our inputs are also integers, we may end up losing information. If we divide by a float (2.) then we will definitely retain decimal values.",
"female_completion = pd.read_csv('female_completion_rate.csv')\nmale_completion = pd.read_csv('male_completion_rate.csv')\n\nfemale_completion[0:5]\n\nmale_completion[0:5]\n\nfemale = np.array([56.0, 23.0, 65.0])\nmale = np.array([23.0, 45.0, 22.0])\n\ndef overall_completion_rate(female_completion, male_completion):\n return (female_completion + male_completion) / 2\n\noverall_completion_rate(female, male)",
"13 - Standardizing Data\n\nHow does one data point compare to other data point?\nOne way to do this is to convert the data point to number of standard deviations from the mean",
"def standardize_data(values):\n return (values - values.mean()) / values.std()",
"15 - NumPy Index Arrays",
"def mean_time_for_paid_students(time_spent, days_to_cancel):\n return time_spent[days_to_cancel >= 7].mean()",
"17 - + vs +=",
"a = np.array([1, 2, 3, 4])\nb = a\na += np.array([1, 1, 1, 1]) #Difference here\nprint b",
"",
"a = np.array([1, 2, 3, 4])\nb = a\na = a + np.array([1, 1, 1, 1]) #Difference here\nprint b",
"19 - In-Place vs Not In-Place\n\n+= operates in-place while + does not",
"a = np.array([1, 2, 3, 4, 5])\nslice = a[:3]\nslice[0] = 100\n\na",
"slice refers to view of original array\n\n21 - Pandas Series",
"def variable_correlation(variable1, variable2):\n both_above = (variable1 > variable1.mean()) & \\\n (variable2 > variable2.mean())\n both_below = (variable1 < variable1.mean()) & \\\n (variable2 < variable2.mean())\n \n is_same_direction = both_above | both_below\n num_same_direction = is_same_direction.sum()\n \n num_different_direction = len(variable1) - num_same_direction\n \n return (num_same_direction, num_different_direction)",
"23 - Series Indexes",
"s = pd.Series([1, 2, 3, 4])\n\ns.describe()\n\ncountries = np.array(['Albania', 'Algeria', 'Andorra', 'Angola'])\nlife_expectancy = np.array([74.7, 75., 83.4, 57.6])\n\nlife_expectancy",
"Some people call countries[0] as indexing into array. But the instructor uses position 0 to avoid confusion. This is because in Pandas index and postion are not the same thing",
"life_expectancy = pd.Series([74.7, 75., 83.4, 57.6],\n index = ['Albania', \n 'Algeria', \n 'Andorra', \n 'Angola'])\n\nlife_expectancy",
"NumPy arrays are souped-up version of Python lists\nPandas Series is like a cross between a list and a dictionary",
"#Access by index\nlife_expectancy.loc['Angola']\n\n#If we don't specify index then automatically adds index 0, 1, 2, ...\npd.Series([74.7, 75., 83.4, 57.6])\n\n#Access element by position\nprint life_expectancy.iloc[0]\n\n#same as\nprint life_expectancy[0]\n\ndef max_employment(employment):\n max_country = employment.argmax()\n max_value = employment.loc[max_country]\n \n return (max_country, max_value)",
"25 - Vectorized Operations and Series Indexes\n\nIn NumPy arrays addition happens as per position\nWhat happens if we add two Pandas series?",
"s1 = pd.Series([1, 2, 3, 4], index = ['a', 'b', 'c', 'd'])\ns2 = pd.Series([10, 20, 30, 40], index = ['a', 'b', 'c', 'd'])\n\ns1\n\ns2\n\ns1 + s2\n\n# Index are in different order\ns3 = pd.Series([10, 20, 30, 40], index = ['b', 'd', 'a', 'c'])\n\ns3\n\ns1 + s3",
"Matching indexes were used to add the 2 series",
"s4 = pd.Series([10, 20, 30, 40], index = ['c', 'd', 'e', 'f'])\n\ns4\n\ns1 + s4\n\n#If we don't want to show NaN in our solution\n(s1 + s4).dropna()",
"28 - Filling Missing Values - Solution",
"#If we want to give a default value\ns1.add(s4, fill_value=0)",
"29 - Pandas Series apply\n\nSo far we have used built-in functions like mean() and vectorized operations like +\napply takes a Series and a function and returns a new series applying the function on each element of the Series",
"names = pd.Series([\n 'Andre Agassi',\n 'Barry Bonds',\n 'Christopher Columbus',\n 'Daniel Defoe'\n ])\n\ndef reverse_name(name):\n split_name = name.split(\" \")\n return \"{}, {}\".format(split_name[1], split_name[0])\n\nreverse_name(names.iloc[0])\n\ndef reverse_names(names):\n return names.apply(reverse_name)\n\nreverse_names(names)",
"31 - Plotting in Pandas - Solution",
"employment = pd.read_csv('employment_above_15.csv', index_col = 'Country')\nfemale_completion = pd.read_csv('female_completion_rate.csv', index_col = 'Country')\nmale_completion = pd.read_csv('male_completion_rate.csv', index_col = 'Country')\nlife_expectancy = pd.read_csv('life_expectancy.csv', index_col = 'Country')\ngdp_per_capita = pd.read_csv('gdp_per_capita.csv', index_col = 'Country')\n\n_country = 'United States'\n\nemployment_country = employment.loc[_country]\nfemale_completion_country = female_completion.loc[_country]\nmale_completion_country = male_completion.loc[_country]\nlife_expectancy_country = life_expectancy.loc[_country]\ngdp_per_capita_country = gdp_per_capita.loc[_country]\n\n%pylab inline\n\nemployment_country.plot()\n\nfemale_completion_country.plot()\n\nmale_completion_country.plot()\n\nlife_expectancy_country.plot()\n\ngdp_per_capita_country.plot()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
TeamProgramming/testsPy
|
RSA.ipynb
|
mit
|
[
"ru = {}\nfor i in range(256,10000):\n if 'а'<=chr(i)<='я' or 'А'<=chr(i)<='Я':\n #print(i-848,'-', chr(i))\n ru[chr(i)]=i-848\n\nimport math\n\ndef phi(n):\n amount = 0\n for k in range(1, n + 1):\n if math.gcd(n, k) == 1:\n amount += 1\n return amount\n\ne, n = 233, 403 # открытый ключ\nE = lambda m: (pow(m,e))%n # Шифрование ",
"$$ E(m) = m^e (mod(n)) $$",
"d, n = 17,403 # закрытый ключ\nD = lambda c: (pow(c,d))%n # расшифрование",
"$$ D(c) = c^d (mod(n)) $$",
"p, q = 23, 17\nphi(n)",
"$$ p(360) = p(40)p(9) = 6p(5)p(8) = 64*4 = 96 $$\n$$ d\\cdot e\\equiv 1{\\pmod {\\varphi (n)}} $$",
"print ( (e*d)%phi(n))\n\ne*d",
"обратный элемент\n$$a^{-1} \\equiv a^{\\varphi(n)-1} \\pmod n, если { \\displaystyle (a,n)=1.} (a,n) = 1. $$\n$$ e = d^{\\phi(\\phi(n))-1} mod(\\phi(n))$$",
"a_1 = lambda a,n: pow(a,phi(n)-1)%n\n\ne = a_1(d,phi(n))\n\ndef strRSA(msg):\n for m in msg:\n print(m+\" = \"+str(ru[m]) + \" RSA = \"+str(E(ru[m])))\n\nstrRSA('жопа')"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ffpenaloza/AstroExp
|
tarea1-2/Respuesta.ipynb
|
gpl-3.0
|
[
"<h1>Tarea 1, parte 2</h1>\n<h2>Pregunta 1</h2>\nPara convertir del sistema de filtros de la HRC al sistema UBVRI, se realiza el ajuste siguiendo la pauta del ejemplo (que transforma del sistema HRC a WFC) de Sirianni et al. (2005):\n$$(1): V = F555W + c_{1,1}(V-I)+c_{2,1}(V-I)^2$$\n$$(2): I = F814W + c_{2,1}(V-I)+c_{2,2}(V-I)^2$$\nLos valores de las constantes se encuentran tabuladas para cada filtro, y dependen del valor $TCOL = F555W - F814W$, el cual también se asumirá como el valor $(V-I)$ para una primera aproximación. Luego se aplica la ecuación (1) y (2) para calcular V e I. Se calcula $TCOL$ con los valores actualizados de $(V-I)$ y se vuelven a aplicar las ecuaciones (1) y (2). Repitiendo el procedimiento, los valores $(V-I)$ cambian cada vez menos. Se comprueba que realizar 6 repeticiones basta para obtener un resultado con mejor precisión que la magnitud de entrada, pero se realizarán un par más en esta ocasión.\nSe escribe un catálogo y un CMD con esta nueva información.",
"from astropy.io import ascii\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n%matplotlib inline\n\ntbl = ascii.read('n121_match.cat')\n\ndef transform(v,i):\n\tc1f555 = [-0.09,-0.124]\n\tc2f555 = [0.034,0.018]\n\tc1f814 = [0.06,0.001]\n\tc2f814 = [-0.099,0.013]\n\tfor j in range(8):\n\t\ttcol = v-i\n\t\tv = np.where(tcol<0.6, (tbl['f555wMAG']+c1f555[0]*tcol+c2f555[0]*tcol*tcol), (tbl['f555wMAG']+c1f555[1]*tcol+c2f555[1]*tcol*tcol))\n\t\ti = np.where(tcol<0.1, (tbl['f814wMAG']+c1f814[0]*tcol+c2f814[0]*tcol*tcol), (tbl['f814wMAG']+c1f814[1]*tcol+c2f814[1]*tcol*tcol))\n\treturn v,i\n\ntbl['V'], tbl['I'] = transform(tbl['f555wMAG'],tbl['f814wMAG'])\n\n# Genera catalogo\ndata = [tbl['V'],tbl['I'],tbl['f814wALPHA'],tbl['f814wDELTA']]\nascii.write(data,'n121_ubvri.cat',delimiter='\\t')\n\n# Genera plots\nfig, ax = plt.subplots(2,1,figsize=(13,20))\nfor axis in ax:\n axis.set_xlim(-0.5,1.5)\n axis.set_ylim(24,16)\n axis.set_xlabel('$V-I$',fontsize=20)\n axis.set_ylabel('$V$',fontsize=20)\n\nax[0].plot(tbl['V']-tbl['I'],tbl['V'],'k.',alpha=0.4,ms=3)\nhess = ax[1].hexbin(tbl['V']-tbl['I'],tbl['V'],bins='log',gridsize=220,mincnt=1)\nleg = fig.colorbar(hess,ax=ax[1])\nleg.set_label('Cuentas (log)')\n\nplt.savefig('cmd_n121_ubvri', dpi=300)\nplt.show()\nplt.close()",
"<hr>\n<h2>Pregunta 2</h2>\nUsando como parámetros de entrada:\n<ul>\n <li>Edad = 10.5 Gyr</li>\n <li>Fe/H = -1.56</li>\n <li>Distancia = 61KPc</li>\n <li>$A_V = 0.1272$</li>\n</ul>",
"from isochrones.dartmouth import Dartmouth_Isochrone\niso = Dartmouth_Isochrone(bands=['V','I'])\nmodel = iso.isochrone(age=10.021189299,feh=-1.56,distance=61000,AV=0.1272,dm=1e-5)\n\nfig, ax = plt.subplots(figsize=(10,10))\nax.set_xlim(-0.5,1.5)\nax.set_ylim(24,16)\nax.set_xlabel('$V-I$',fontsize=20)\nax.set_ylabel('$V$',fontsize=20)\n\nax.plot(tbl['V']-tbl['I'],tbl['V'],'k.',alpha=0.4,ms=3)\nax.plot(model.V_mag - model.I_mag,model.V_mag,'g',lw=2)\n\nplt.show()\nplt.close()",
"Se obtuvo la isócrona de la imagen (línea verde). Al menos pasa cerca de los puntos obtenidos por fotometría.\nSin embargo, para una mejro aproximación sería necesaria una magnitud V mayor, de forma que todo el CMD se mueva a la derecha y hacia abajo.\n<hr>\n<h2>Pregunta 3</h2>\nPara calcular el <em>ridge line</em> del set de datos, primero se dividió el eje y en rangos de 0.22 magnitudes, que es lo mínimo posible para que no aparezcan secotres sin puntos. En cada rango se calculó la mediana de V-I y se ensambló un set de coordenadas que contiene (mediana,V). El segmento en el eje y que contiene la rama horizontal (entre 19.4 y 20) fue ignorado para este cálculo.\nPosteriormente, se ajustó un polinomio de grado 6.\nSe grafica en <strong>azul</strong> la línea ajustada.\nSe ajustarán la magnitudes en la última pregunta con tal de poder comparar esta isócrona con el ridge line y un mejor fit.",
"s = 0.22\nx = []\ny1 = np.arange(16.5,19.4,step=s)\ny2 = np.arange(19.5,23.5,step=s)\ny = np.append(y1,y2)\nfor i in y:\n\ta = np.where((i<tbl['V']) & (tbl['V']<i+s) & (tbl['V']-tbl['I']>0.4))\n\tx.append(np.median(tbl['V'][a]-tbl['I'][a]))\n\n \nfig, ax = plt.subplots(figsize=(10,10))\nax.set_xlim(-0.5,1.5)\nax.set_ylim(24,16)\nax.set_xlabel('$V-I$',fontsize=20)\nax.set_ylabel('$V$',fontsize=20)\n\nax.plot(tbl['V']-tbl['I'],tbl['V'],'k.',alpha=0.4,ms=3)\n\n# ajuste de polinomio a la ridge line\np = np.poly1d(np.polyfit(y,x,7))\nax.plot(p(y),y,'b',lw=2)\n\n#nueva isocrona que se acerque a nuestra ridge line\niso = Dartmouth_Isochrone(bands=['V','I'])\nmodel = iso.isochrone(age=9.89,feh=-1.55,distance=61000,dm=1e-5)\n\nax.plot(model.V_mag - model.I_mag,model.V_mag,'r',lw=2)\n\nplt.show()\nplt.close()",
"<hr>\n<h2>pregunta 4</h2>\nAhora que se tiene la ridge line, se busca una isócrona que se acerque de la mejor manera. Suponiendo que la distancia al cúmulo es el dato mejor conocido, se cambian los valores de edad y metalicidad. Se encuentra que el mejor fit se logra con\n<ul>\n <li>Edad: 9.89Gyr</li>\n <li>Fe/H: -1.55</li>\n <li>distancia: 61KPc</li>\n</ul>\nEl fit se traza en rojo. Si bien no es el más cercano a la línea azul, adopta una forma cercana. El trabajo de desplazar el gráfico del CMD puede hacerse al cambiar los valores V e I. Haciendo $V = V+0.15$ Se obtiene el siguiente fit, que corresponde a la primera isócrona, con parámetros de literatura. Hecho así, podría intentarse refinar más calibración en vista de que la forma de la curva es la del CMD.",
"fig, ax = plt.subplots(figsize=(10,10))\nax.set_xlim(-0.5,1.5)\nax.set_ylim(24,16)\nax.set_xlabel('$V-I$',fontsize=20)\nax.set_ylabel('$V$',fontsize=20)\n\nax.plot(tbl['V']-tbl['I']+0.15,tbl['V']+0.15,'k.',alpha=0.4,ms=3)\n\n#nueva isocrona que se acerque a nuestra ridge line\niso = Dartmouth_Isochrone(bands=['V','I'])\nmodel = iso.isochrone(age=10.021189299,feh=-1.56,distance=61000,AV=0.1272,dm=1e-5)\n\nax.plot(model.V_mag - model.I_mag,model.V_mag,'r',lw=2)\n\nplt.show()\nplt.close()",
"Otras isócronas que podrían haberse utilizado son las de Teramo y las de Padova. En la literatura, ambas entregan edades mayores de este cúmulo (11.8 Gyr y 11.2 Gyr respectivamente, según <a href=\"http://iopscience.iop.org/article/10.1088/0004-6256/135/4/1106/pdf\">katharina Glatt et al. 2008</a>)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
jrg365/gpytorch
|
examples/03_Multitask_Exact_GPs/Multitask_GP_Regression.ipynb
|
mit
|
[
"Multitask GP Regression\nIntroduction\nMultitask regression, introduced in this paper learns similarities in the outputs simultaneously. It's useful when you are performing regression on multiple functions that share the same inputs, especially if they have similarities (such as being sinusodial). \nGiven inputs $x$ and $x'$, and tasks $i$ and $j$, the covariance between two datapoints and two tasks is given by\n$$ k([x, i], [x', j]) = k_\\text{inputs}(x, x') * k_\\text{tasks}(i, j)\n$$\nwhere $k_\\text{inputs}$ is a standard kernel (e.g. RBF) that operates on the inputs.\n$k_\\text{task}$ is a lookup table containing inter-task covariance.",
"import math\nimport torch\nimport gpytorch\nfrom matplotlib import pyplot as plt\n\n%matplotlib inline\n%load_ext autoreload\n%autoreload 2",
"Set up training data\nIn the next cell, we set up the training data for this example. We'll be using 100 regularly spaced points on [0,1] which we evaluate the function on and add Gaussian noise to get the training labels.\nWe'll have two functions - a sine function (y1) and a cosine function (y2).\nFor MTGPs, our train_targets will actually have two dimensions: with the second dimension corresponding to the different tasks.",
"train_x = torch.linspace(0, 1, 100)\n\ntrain_y = torch.stack([\n torch.sin(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2,\n torch.cos(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2,\n], -1)",
"Define a multitask model\nThe model should be somewhat similar to the ExactGP model in the simple regression example.\nThe differences:\n\nWe're going to wrap ConstantMean with a MultitaskMean. This makes sure we have a mean function for each task.\nRather than just using a RBFKernel, we're using that in conjunction with a MultitaskKernel. This gives us the covariance function described in the introduction.\nWe're using a MultitaskMultivariateNormal and MultitaskGaussianLikelihood. This allows us to deal with the predictions/outputs in a nice way. For example, when we call MultitaskMultivariateNormal.mean, we get a n x num_tasks matrix back.\n\nYou may also notice that we don't use a ScaleKernel, since the IndexKernel will do some scaling for us. (This way we're not overparameterizing the kernel.)",
"class MultitaskGPModel(gpytorch.models.ExactGP):\n def __init__(self, train_x, train_y, likelihood):\n super(MultitaskGPModel, self).__init__(train_x, train_y, likelihood)\n self.mean_module = gpytorch.means.MultitaskMean(\n gpytorch.means.ConstantMean(), num_tasks=2\n )\n self.covar_module = gpytorch.kernels.MultitaskKernel(\n gpytorch.kernels.RBFKernel(), num_tasks=2, rank=1\n )\n\n def forward(self, x):\n mean_x = self.mean_module(x)\n covar_x = self.covar_module(x)\n return gpytorch.distributions.MultitaskMultivariateNormal(mean_x, covar_x)\n\n \nlikelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood(num_tasks=2)\nmodel = MultitaskGPModel(train_x, train_y, likelihood)",
"Train the model hyperparameters",
"# this is for running the notebook in our testing framework\nimport os\nsmoke_test = ('CI' in os.environ)\ntraining_iterations = 2 if smoke_test else 50\n\n\n# Find optimal model hyperparameters\nmodel.train()\nlikelihood.train()\n\n# Use the adam optimizer\noptimizer = torch.optim.Adam(model.parameters(), lr=0.1) # Includes GaussianLikelihood parameters\n\n# \"Loss\" for GPs - the marginal log likelihood\nmll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)\n\nfor i in range(training_iterations):\n optimizer.zero_grad()\n output = model(train_x)\n loss = -mll(output, train_y)\n loss.backward()\n print('Iter %d/%d - Loss: %.3f' % (i + 1, training_iterations, loss.item()))\n optimizer.step()",
"Make predictions with the model",
"# Set into eval mode\nmodel.eval()\nlikelihood.eval()\n\n# Initialize plots\nf, (y1_ax, y2_ax) = plt.subplots(1, 2, figsize=(8, 3))\n\n# Make predictions\nwith torch.no_grad(), gpytorch.settings.fast_pred_var():\n test_x = torch.linspace(0, 1, 51)\n predictions = likelihood(model(test_x))\n mean = predictions.mean\n lower, upper = predictions.confidence_region()\n \n# This contains predictions for both tasks, flattened out\n# The first half of the predictions is for the first task\n# The second half is for the second task\n\n# Plot training data as black stars\ny1_ax.plot(train_x.detach().numpy(), train_y[:, 0].detach().numpy(), 'k*')\n# Predictive mean as blue line\ny1_ax.plot(test_x.numpy(), mean[:, 0].numpy(), 'b')\n# Shade in confidence \ny1_ax.fill_between(test_x.numpy(), lower[:, 0].numpy(), upper[:, 0].numpy(), alpha=0.5)\ny1_ax.set_ylim([-3, 3])\ny1_ax.legend(['Observed Data', 'Mean', 'Confidence'])\ny1_ax.set_title('Observed Values (Likelihood)')\n\n# Plot training data as black stars\ny2_ax.plot(train_x.detach().numpy(), train_y[:, 1].detach().numpy(), 'k*')\n# Predictive mean as blue line\ny2_ax.plot(test_x.numpy(), mean[:, 1].numpy(), 'b')\n# Shade in confidence \ny2_ax.fill_between(test_x.numpy(), lower[:, 1].numpy(), upper[:, 1].numpy(), alpha=0.5)\ny2_ax.set_ylim([-3, 3])\ny2_ax.legend(['Observed Data', 'Mean', 'Confidence'])\ny2_ax.set_title('Observed Values (Likelihood)')\n\nNone"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
giacomov/3ML
|
docs/examples/joint_BAT_gbm_demo.ipynb
|
bsd-3-clause
|
[
"Example joint fit between GBM and Swift BAT\nOne of the key features of 3ML is the abil ity to fit multi-messenger data properly. A simple example of this is the joint fitting of two instruments whose data obey different likelihoods. Here, we have GBM data which obey a Poisson-Gaussian profile likelihoog (<a href=http://heasarc.gsfc.nasa.gov/docs/xanadu/xspec/manual/node293.html> PGSTAT</a> in XSPEC lingo) and Swift BAT which data which are the result of a \"fit\" via a coded mask and hence obey a Gaussian ( $\\chi^2$ ) likelihood.",
"%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom jupyterthemes import jtplot\n\njtplot.style(context=\"talk\", fscale=1, ticks=True, grid=False)\nplt.style.use(\"mike\")\n\nfrom threeML import *\nfrom threeML.io.package_data import get_path_of_data_file\n\nimport os\n\nimport warnings\n\n\nwarnings.simplefilter(\"ignore\")",
"Plugin setup\nWe have data from the same time interval from Swift BAT and a GBM NAI and BGO detector. We have preprocessed GBM data to so that it is OGIP compliant. (Remember that we can handle the raw data with the TimeSeriesBuilder). Thus, we will use the OGIPLike plugin to read in each dataset, make energy selections and examine the raw count spectra. \nSwift BAT",
"bat_pha = get_path_of_data_file(\"datasets/bat/gbm_bat_joint_BAT.pha\")\nbat_rsp = get_path_of_data_file(\"datasets/bat/gbm_bat_joint_BAT.rsp\")\n\nbat = OGIPLike(\"BAT\", observation=bat_pha, response=bat_rsp)\n\nbat.set_active_measurements(\"15-150\")\nbat.view_count_spectrum()",
"Fermi GBM",
"nai6 = OGIPLike(\n \"n6\",\n get_path_of_data_file(\"datasets/gbm/gbm_bat_joint_NAI_06.pha\"),\n get_path_of_data_file(\"datasets/gbm/gbm_bat_joint_NAI_06.bak\"),\n get_path_of_data_file(\"datasets/gbm/gbm_bat_joint_NAI_06.rsp\"),\n spectrum_number=1,\n)\n\n\nnai6.set_active_measurements(\"8-900\")\nnai6.view_count_spectrum()\n\nbgo0 = OGIPLike(\n \"b0\",\n get_path_of_data_file(\"datasets/gbm/gbm_bat_joint_BGO_00.pha\"),\n get_path_of_data_file(\"datasets/gbm/gbm_bat_joint_BGO_00.bak\"),\n get_path_of_data_file(\"datasets/gbm/gbm_bat_joint_BGO_00.rsp\"),\n spectrum_number=1,\n)\n\nbgo0.set_active_measurements(\"250-30000\")\nbgo0.view_count_spectrum()",
"Model setup\nWe setup up or spectrum and likelihood model and combine the data. 3ML will automatically assign the proper likelihood to each data set. At first, we will assume a perfect calibration between the different detectors and not a apply a so-called effective area correction.",
"band = Band()\n\nmodel_no_eac = Model(PointSource(\"joint_fit_no_eac\", 0, 0, spectral_shape=band))",
"Spectral fitting\nNow we simply fit the data by building the data list, creating the joint likelihood and running the fit.\nNo effective area correction",
"data_list = DataList(bat, nai6, bgo0)\n\njl_no_eac = JointLikelihood(model_no_eac, data_list)\n\njl_no_eac.fit()",
"The fit has resulted in a very typical Band function fit. Let's look in count space at how good of a fit we have obtained.",
"threeML_config[\"ogip\"][\"model plot cmap\"] = \"Set1\"\n\ndisplay_spectrum_model_counts(\n jl_no_eac, step=False, min_rate=[0.01, 10.0, 10.0], data_colors=[\"grey\", \"k\", \"k\"]\n)",
"It seems that the effective areas between GBM and BAT do not agree! We can look at the goodness of fit for the various data sets.",
"gof_object = GoodnessOfFit(jl_no_eac)\n\nwith parallel_computation():\n\n gof, res_frame, lh_frame = gof_object.by_mc(n_iterations=8000)\n\nimport pandas as pd\npd.Series(gof)",
"Both the GBM NaI detector and Swift BAT exhibit poor GOF.\nWith effective are correction\nNow let's add an effective area correction between the detectors to see if this fixes the problem. The effective area is a nuissance parameter that attempts to model systematic problems in a instruments calibration. It simply scales the counts of an instrument by a multiplicative factor. It cannot handle more complicated energy dependent",
"# turn on the effective area correction and set it's bounds\nnai6.use_effective_area_correction(0.2, 1.8)\nbgo0.use_effective_area_correction(0.2, 1.8)\n\nmodel_eac = Model(PointSource(\"joint_fit_eac\", 0, 0, spectral_shape=band))\n\njl_eac = JointLikelihood(model_eac, data_list)\n\njl_eac.fit()",
"Now we have a much better fit to all data sets",
"display_spectrum_model_counts(\n jl_eac, step=False, min_rate=[0.01, 10.0, 10.0], data_colors=[\"grey\", \"k\", \"k\"]\n)\n\ngof_object = GoodnessOfFit(jl_eac)\n\nwith parallel_computation():\n\n gof, res_frame, lh_frame = gof_object.by_mc(\n n_iterations=8000, continue_on_failure=True\n )\n\nimport pandas as pd\npd.Series(gof)",
"Examining the differences\nLet's plot the fits in model space and see how different the resulting models are.",
"plot_spectra(\n jl_eac.results,\n jl_no_eac.results,\n fit_cmap=\"Set1\",\n contour_cmap=\"Set1\",\n flux_unit=\"erg2/(keV s cm2)\",\n equal_tailed=True,\n)",
"We can easily see that the models are different"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
subhankarb/Machine-Learning-PlayGround
|
Machine-Learning-Specialization/machine_learning_regression/week2/multiple-regression-assignment-1.ipynb
|
apache-2.0
|
[
"Regression Week 2: Multiple Regression (Interpretation)\nThe goal of this first notebook is to explore multiple regression and feature engineering with existing graphlab functions.\nIn this notebook you will use data on house sales in King County to predict prices using multiple regression. You will:\n* Use SFrames to do some feature engineering\n* Use built-in graphlab functions to compute the regression weights (coefficients/parameters)\n* Given the regression weights, predictors and outcome write a function to compute the Residual Sum of Squares\n* Look at coefficients and interpret their meanings\n* Evaluate multiple models via RSS\nFire up graphlab create",
"import graphlab",
"Load in house sales data\nDataset is from house sales in King County, the region where the city of Seattle, WA is located.",
"sales = graphlab.SFrame('kc_house_data.gl/')\nsales.head()",
"Split data into training and testing.\nWe use seed=0 so that everyone running this notebook gets the same results. In practice, you may set a random seed (or let GraphLab Create pick a random seed for you).",
"train_data,test_data = sales.random_split(.8,seed=0)",
"Learning a multiple regression model\nRecall we can use the following code to learn a multiple regression model predicting 'price' based on the following features:\nexample_features = ['sqft_living', 'bedrooms', 'bathrooms'] on training data with the following code:\n(Aside: We set validation_set = None to ensure that the results are always the same)",
"example_features = ['sqft_living', 'bedrooms', 'bathrooms']\nexample_model = graphlab.linear_regression.create(train_data, target = 'price', features = example_features, \n validation_set = None)",
"Now that we have fitted the model we can extract the regression weights (coefficients) as an SFrame as follows:",
"example_weight_summary = example_model.get(\"coefficients\")\nprint example_weight_summary",
"Making Predictions\nIn the gradient descent notebook we use numpy to do our regression. In this book we will use existing graphlab create functions to analyze multiple regressions. \nRecall that once a model is built we can use the .predict() function to find the predicted values for data we pass. For example using the example model above:",
"example_predictions = example_model.predict(train_data)\nprint example_predictions[0] # should be 271789.505878",
"Compute RSS\nNow that we can make predictions given the model, let's write a function to compute the RSS of the model. Complete the function below to calculate RSS given the model, data, and the outcome.",
"def get_residual_sum_of_squares(model, data, outcome):\n # First get the predictions\n predictions = model.predict(data)\n # Then compute the residuals/errors\n errors = outcome - predictions\n # Then square and add them up\n RSS = (errors * errors).sum()\n return(RSS) ",
"Test your function by computing the RSS on TEST data for the example model:",
"rss_example_train = get_residual_sum_of_squares(example_model, test_data, test_data['price'])\nprint rss_example_train # should be 2.7376153833e+14",
"Create some new features\nAlthough we often think of multiple regression as including multiple different features (e.g. # of bedrooms, squarefeet, and # of bathrooms) but we can also consider transformations of existing features e.g. the log of the squarefeet or even \"interaction\" features such as the product of bedrooms and bathrooms.\nYou will use the logarithm function to create a new feature. so first you should import it from the math library.",
"from math import log",
"Next create the following 4 new features as column in both TEST and TRAIN data:\n* bedrooms_squared = bedrooms*bedrooms\n* bed_bath_rooms = bedrooms*bathrooms\n* log_sqft_living = log(sqft_living)\n* lat_plus_long = lat + long \nAs an example here's the first one:",
"train_data['bedrooms_squared'] = train_data['bedrooms'].apply(lambda x: x**2)\ntest_data['bedrooms_squared'] = test_data['bedrooms'].apply(lambda x: x**2)\n\n# create the remaining 3 features in both TEST and TRAIN data\ntrain_data['bed_bath_rooms'] = (train_data['bedrooms'] * train_data['bathrooms']).apply(lambda x: x)\ntest_data['bed_bath_rooms'] = (test_data['bedrooms'] * test_data['bathrooms']).apply(lambda x: x)\n\ntrain_data['log_sqft_living'] = train_data['sqft_living'].apply(lambda x: log(x))\ntest_data['log_sqft_living'] = test_data['sqft_living'].apply(lambda x: log(x))\n\ntrain_data['lat_plus_long'] = (train_data['lat'] + train_data['long']).apply(lambda x: x)\ntest_data['lat_plus_long'] = (test_data['lat'] + test_data['long']).apply(lambda x: x)",
"Squaring bedrooms will increase the separation between not many bedrooms (e.g. 1) and lots of bedrooms (e.g. 4) since 1^2 = 1 but 4^2 = 16. Consequently this feature will mostly affect houses with many bedrooms.\nbedrooms times bathrooms gives what's called an \"interaction\" feature. It is large when both of them are large.\nTaking the log of squarefeet has the effect of bringing large values closer together and spreading out small values.\nAdding latitude to longitude is totally non-sensical but we will do it anyway (you'll see why)\n\nQuiz Question: What is the mean (arithmetic average) value of your 4 new features on TEST data? (round to 2 digits)",
"print 'bedrooms_squared:', test_data['bedrooms_squared'].mean()\nprint 'bed_bath_rooms', test_data['bed_bath_rooms'].mean()\nprint 'lat_plus_long', test_data['lat_plus_long'].mean()\nprint 'log_sqft_living', test_data['log_sqft_living'].mean()",
"Learning Multiple Models\nNow we will learn the weights for three (nested) models for predicting house prices. The first model will have the fewest features the second model will add one more feature and the third will add a few more:\n* Model 1: squarefeet, # bedrooms, # bathrooms, latitude & longitude\n* Model 2: add bedrooms*bathrooms\n* Model 3: Add log squarefeet, bedrooms squared, and the (nonsensical) latitude + longitude",
"model_1_features = ['sqft_living', 'bedrooms', 'bathrooms', 'lat', 'long']\nmodel_2_features = model_1_features + ['bed_bath_rooms']\nmodel_3_features = model_2_features + ['bedrooms_squared', 'log_sqft_living', 'lat_plus_long']",
"Now that you have the features, learn the weights for the three different models for predicting target = 'price' using graphlab.linear_regression.create() and look at the value of the weights/coefficients:",
"# Learn the three models: (don't forget to set validation_set = None)\nmodel_1 = graphlab.linear_regression.create(train_data, target = 'price', \n features = model_1_features, validation_set = None)\nmodel_2 = graphlab.linear_regression.create(train_data, target = 'price', \n features = model_2_features, validation_set = None)\nmodel_3 = graphlab.linear_regression.create(train_data, target = 'price', \n features = model_3_features, validation_set = None)\n\n# Examine/extract each model's coefficients:\nmodel_1_summary = model_1.get(\"coefficients\")\nmodel_2_summary = model_2.get(\"coefficients\")\nmodel_3_summary = model_3.get(\"coefficients\")\nprint model_1_summary\nprint model_2_summary\nprint model_3_summary",
"Quiz Question: What is the sign (positive or negative) for the coefficient/weight for 'bathrooms' in model 1?\nQuiz Question: What is the sign (positive or negative) for the coefficient/weight for 'bathrooms' in model 2?\nThink about what this means.\nComparing multiple models\nNow that you've learned three models and extracted the model weights we want to evaluate which model is best.\nFirst use your functions from earlier to compute the RSS on TRAINING Data for each of the three models.",
"# Compute the RSS on TRAINING data for each of the three models and record the values:\nrss_model_1 = get_residual_sum_of_squares(model_1, train_data, train_data['price'])\nrss_model_2 = get_residual_sum_of_squares(model_2, train_data, train_data['price'])\nrss_model_3 = get_residual_sum_of_squares(model_3, train_data, train_data['price'])\nprint 'rss_model_1: ', rss_model_1\nprint 'rss_model_2: ', rss_model_2\nprint 'rss_model_3: ', rss_model_3",
"Quiz Question: Which model (1, 2 or 3) has lowest RSS on TRAINING Data? Is this what you expected?\nNow compute the RSS on on TEST data for each of the three models.",
"# Compute the RSS on TESTING data for each of the three models and record the values:\nrss_model_1 = get_residual_sum_of_squares(model_1, test_data, test_data['price'])\nrss_model_2 = get_residual_sum_of_squares(model_2, test_data, test_data['price'])\nrss_model_3 = get_residual_sum_of_squares(model_3, test_data, test_data['price'])\nprint 'rss_model_1: ', rss_model_1\nprint 'rss_model_2: ', rss_model_2\nprint 'rss_model_3: ', rss_model_3",
"Quiz Question: Which model (1, 2 or 3) has lowest RSS on TESTING Data? Is this what you expected?Think about the features that were added to each model from the previous."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
tuanavu/coursera-university-of-washington
|
machine_learning/2_regression/assignment/week1/week-1-simple-regression-assignment-exercise.ipynb
|
mit
|
[
"Regression Week 1: Simple Linear Regression\nIn this notebook we will use data on house sales in King County to predict house prices using simple (one input) linear regression. You will:\n* Use graphlab SArray and SFrame functions to compute important summary statistics\n* Write a function to compute the Simple Linear Regression weights using the closed form solution\n* Write a function to make predictions of the output given the input feature\n* Turn the regression around to predict the input given the output\n* Compare two different models for predicting house prices\nIn this notebook you will be provided with some already complete code as well as some code that you should complete yourself in order to answer quiz questions. The code we provide to complte is optional and is there to assist you with solving the problems but feel free to ignore the helper code and write your own.\nFire up graphlab create",
"import sys\nsys.path.append('C:\\Anaconda2\\envs\\dato-env\\Lib\\site-packages')\nimport graphlab",
"Load house sales data\nDataset is from house sales in King County, the region where the city of Seattle, WA is located.",
"sales = graphlab.SFrame('kc_house_data.gl/')",
"Split data into training and testing\nWe use seed=0 so that everyone running this notebook gets the same results. In practice, you may set a random seed (or let GraphLab Create pick a random seed for you).",
"train_data,test_data = sales.random_split(.8,seed=0)",
"Useful SFrame summary functions\nIn order to make use of the closed form soltion as well as take advantage of graphlab's built in functions we will review some important ones. In particular:\n* Computing the sum of an SArray\n* Computing the arithmetic average (mean) of an SArray\n* multiplying SArrays by constants\n* multiplying SArrays by other SArrays",
"# Let's compute the mean of the House Prices in King County in 2 different ways.\nprices = sales['price'] # extract the price column of the sales SFrame -- this is now an SArray\n\n# recall that the arithmetic average (the mean) is the sum of the prices divided by the total number of houses:\nsum_prices = prices.sum()\nnum_houses = prices.size() # when prices is an SArray .size() returns its length\navg_price_1 = sum_prices/num_houses\navg_price_2 = prices.mean() # if you just want the average, the .mean() function\nprint \"average price via method 1: \" + str(avg_price_1)\nprint \"average price via method 2: \" + str(avg_price_2)",
"As we see we get the same answer both ways",
"# if we want to multiply every price by 0.5 it's a simple as:\nhalf_prices = 0.5*prices\n# Let's compute the sum of squares of price. We can multiply two SArrays of the same length elementwise also with *\nprices_squared = prices*prices\nsum_prices_squared = prices_squared.sum() # price_squared is an SArray of the squares and we want to add them up.\nprint \"the sum of price squared is: \" + str(sum_prices_squared)",
"Aside: The python notation x.xxe+yy means x.xx * 10^(yy). e.g 100 = 10^2 = 1*10^2 = 1e2 \nBuild a generic simple linear regression function\nArmed with these SArray functions we can use the closed form solution found from lecture to compute the slope and intercept for a simple linear regression on observations stored as SArrays: input_feature, output.\nComplete the following function (or write your own) to compute the simple linear regression slope and intercept:\nHint:\n\nhttps://www.coursera.org/learn/ml-regression/module/9crXk/discussions/MZT-xZnVEeWPmAru8qzZow\nFollow slide 68, which is Approach 1: Set gradient = 0",
"def simple_linear_regression(input_feature, output):\n Xi = input_feature\n Yi = output\n N = len(Xi)\n # compute the mean of input_feature and output\n Ymean = Yi.mean()\n Xmean = Xi.mean()\n \n # compute the product of the output and the input_feature and its mean\n SumYiXi = (Yi * Xi).sum()\n YiXiByN = (Yi.sum() * Xi.sum()) / N\n \n # compute the squared value of the input_feature and its mean\n XiSq = (Xi * Xi).sum()\n XiXiByN = (Xi.sum() * Xi.sum()) / N\n \n # use the formula for the slope\n slope = (SumYiXi - YiXiByN) / (XiSq - XiXiByN)\n \n # use the formula for the intercept\n intercept = Ymean - (slope * Xmean)\n return (intercept, slope)",
"We can test that our function works by passing it something where we know the answer. In particular we can generate a feature and then put the output exactly on a line: output = 1 + 1*input_feature then we know both our slope and intercept should be 1",
"test_feature = graphlab.SArray(range(5))\ntest_output = graphlab.SArray(1 + 1*test_feature)\n(test_intercept, test_slope) = simple_linear_regression(test_feature, test_output)\nprint \"Intercept: \" + str(test_intercept)\nprint \"Slope: \" + str(test_slope)",
"Now that we know it works let's build a regression model for predicting price based on sqft_living. Rembember that we train on train_data!",
"sqft_intercept, sqft_slope = simple_linear_regression(train_data['sqft_living'], train_data['price'])\n\nprint \"Intercept: \" + str(sqft_intercept)\nprint \"Slope: \" + str(sqft_slope)",
"Predicting Values\nNow that we have the model parameters: intercept & slope we can make predictions. Using SArrays it's easy to multiply an SArray by a constant and add a constant value. Complete the following function to return the predicted output given the input_feature, slope and intercept:",
"def get_regression_predictions(input_feature, intercept, slope):\n # calculate the predicted values:\n predicted_values = intercept + (slope * input_feature)\n return predicted_values",
"Now that we can calculate a prediction given the slop and intercept let's make a prediction. Use (or alter) the following to find out the estimated price for a house with 2650 squarefeet according to the squarefeet model we estiamted above.\nQuiz Question: Using your Slope and Intercept from (4), What is the predicted price for a house with 2650 sqft?",
"my_house_sqft = 2650\nestimated_price = get_regression_predictions(my_house_sqft, sqft_intercept, sqft_slope)\nprint \"The estimated price for a house with %d squarefeet is $%.2f\" % (my_house_sqft, estimated_price)",
"Residual Sum of Squares\nNow that we have a model and can make predictions let's evaluate our model using Residual Sum of Squares (RSS). Recall that RSS is the sum of the squares of the residuals and the residuals is just a fancy word for the difference between the predicted output and the true output. \nComplete the following (or write your own) function to compute the RSS of a simple linear regression model given the input_feature, output, intercept and slope:",
"def get_residual_sum_of_squares(input_feature, output, intercept, slope):\n # First get the predictions\n predicted_values = intercept + (slope * input_feature)\n # then compute the residuals (since we are squaring it doesn't matter which order you subtract)\n residuals = output - predicted_values\n # square the residuals and add them up\n RSS = (residuals * residuals).sum()\n return(RSS)",
"Let's test our get_residual_sum_of_squares function by applying it to the test model where the data lie exactly on a line. Since they lie exactly on a line the residual sum of squares should be zero!",
"print get_residual_sum_of_squares(test_feature, test_output, test_intercept, test_slope) # should be 0.0",
"Now use your function to calculate the RSS on training data from the squarefeet model calculated above.\nQuiz Question: According to this function and the slope and intercept from the squarefeet model What is the RSS for the simple linear regression using squarefeet to predict prices on TRAINING data?",
"rss_prices_on_sqft = get_residual_sum_of_squares(train_data['sqft_living'], train_data['price'], sqft_intercept, sqft_slope)\nprint 'The RSS of predicting Prices based on Square Feet is : ' + str(rss_prices_on_sqft)",
"Predict the squarefeet given price\nWhat if we want to predict the squarefoot given the price? Since we have an equation y = a + b*x we can solve the function for x. So that if we have the intercept (a) and the slope (b) and the price (y) we can solve for the estimated squarefeet (x).\nComlplete the following function to compute the inverse regression estimate, i.e. predict the input_feature given the output!",
"def inverse_regression_predictions(output, intercept, slope):\n # solve output = intercept + slope*input_feature for input_feature. Use this equation to compute the inverse predictions:\n estimated_feature = (output - intercept)/slope\n return estimated_feature",
"Now that we have a function to compute the squarefeet given the price from our simple regression model let's see how big we might expect a house that coses $800,000 to be.\nQuiz Question: According to this function and the regression slope and intercept from (3) what is the estimated square-feet for a house costing $800,000?",
"my_house_price = 800000\nestimated_squarefeet = inverse_regression_predictions(my_house_price, sqft_intercept, sqft_slope)\nprint \"The estimated squarefeet for a house worth $%.2f is %d\" % (my_house_price, estimated_squarefeet)",
"New Model: estimate prices from bedrooms\nWe have made one model for predicting house prices using squarefeet, but there are many other features in the sales SFrame. \nUse your simple linear regression function to estimate the regression parameters from predicting Prices based on number of bedrooms. Use the training data!",
"# Estimate the slope and intercept for predicting 'price' based on 'bedrooms'\nsqft_intercept, sqft_slope = simple_linear_regression(train_data['bedrooms'], train_data['price'])\n\nprint \"Intercept: \" + str(sqft_intercept)\nprint \"Slope: \" + str(sqft_slope)",
"Test your Linear Regression Algorithm\nNow we have two models for predicting the price of a house. How do we know which one is better? Calculate the RSS on the TEST data (remember this data wasn't involved in learning the model). Compute the RSS from predicting prices using bedrooms and from predicting prices using squarefeet.\nQuiz Question: Which model (square feet or bedrooms) has lowest RSS on TEST data? Think about why this might be the case.",
"# Compute RSS when using bedrooms on TEST data:\nsqft_intercept, sqft_slope = simple_linear_regression(train_data['bedrooms'], train_data['price'])\nrss_prices_on_bedrooms = get_residual_sum_of_squares(test_data['bedrooms'], test_data['price'], sqft_intercept, sqft_slope)\nprint 'The RSS of predicting Prices based on Bedrooms is : ' + str(rss_prices_on_bedrooms)\n\n# Compute RSS when using squarfeet on TEST data:\nsqft_intercept, sqft_slope = simple_linear_regression(train_data['sqft_living'], train_data['price'])\nrss_prices_on_sqft = get_residual_sum_of_squares(test_data['sqft_living'], test_data['price'], sqft_intercept, sqft_slope)\nprint 'The RSS of predicting Prices based on Square Feet is : ' + str(rss_prices_on_sqft)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
TickSmith/tickvault-python-api
|
examples/TickVault_NasdaqCx_Example1.ipynb
|
mit
|
[
"TickVault Python API Example Usage\n\nMake sure to have registered and generated an API key on https://nasdaq-cx.ticksmith.com before starting this example. \nIn the following code block, replace <span style=\"color:blue\"> <USER_NAME> </span> with the email you signed up with (within quotes) and <span style=\"color:blue\"> <API_KEY> </span> with the API key you generated (also within quotes).\nThen, to see what data you have access to, call the 'datasets' method:",
"from tickvaultpythonapi.nasdaqcxclient import NasdaqCxClient\n\nnasdaq = NasdaqCxClient(user_name=\"<USER_NAME>\", \n secret_key=\"<API_KEY>\")\n\nnasdaq.datasets()",
"To see what columns exist in that dataset and filter by, call the 'describe' method on a dataset from above:",
"nasdaq.describe('cx_hits')",
"To access the bid and ask prices for TD quotes when the ask and bid sizes were greater than 10, we will query the HiTS dataset (and print its length to make sure we got results):",
"result = nasdaq.query_hits(source=\"CHIX\", tickers=\"TD\",\n fields=\"ts,askprice,bidprice\", start_time=20150302093000, end_time=20150302160000,\n predicates=\"ask_size > 10 and bid_size > 10 and line_type like Q\",\n limit=1000000)\n\nstr(len(result))",
"Let's convert the result to a pandas DataFrame for analytics:",
"df = nasdaq.as_dataframe(result) \n\ndf.info()",
"Now to visualise the result:",
"%matplotlib inline\n\ndf.plot()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
mathLab/RBniCS
|
tutorials/17_navier_stokes/tutorial_navier_stokes_1_exact.ipynb
|
lgpl-3.0
|
[
"Tutorial 17 - Navier Stokes equations\nKeywords: exact parametrized functions, supremizer operator\n1. Introduction\nIn this tutorial, we will study the Navier-Stokes equations over the two-dimensional backward-facing step domain $\\Omega$ shown below:\n<img src=\"data/backward_facing_step.png\" width=\"80%\"/>\nA Poiseuille flow profile is imposed on the inlet boundary, and a no-flow (zero velocity) condition is imposed on the walls. A homogeneous Neumann condition of the Cauchy stress tensor is applied at the outflow boundary.\nThe inflow velocity boundary condition is characterized by $$\\boldsymbol{u}(\\boldsymbol{x};\\mu)=\\mu\\bigg {\\frac{1}{2.25}(x_1-2)(5-x_1),0\\bigg } \\quad \\forall \\boldsymbol{x}=(x_0,x_1) \\in \\Omega$$ \nThis problem is characterized by one parameter $\\mu$, which characterizes the inlet velocity. The range of $\\mu$ is the following $$\\mu \\in [1.0, 80.0].$$ \nThus, the parameter domain is $$\\mathbb{P}=[1.0,80.0].$$\nIn order to be able to compare the interpolation method (DEIM) used to solve this problem, we pursue an exact model reduction by means of a POD-Galerkin reduced order method.\n2. Parametrized formulation\nLet $\\boldsymbol{u}(\\mu)$ be the velocity vector and $p(\\mu)$ be the pressure in the domain $\\Omega$.\nWe will directly provide a weak formulation for this problem: <center>for a given parameter $\\mu \\in \\mathbb{P},$ find $u(\\mu) \\in \\mathbb{V}(\\mu), \\; p \\in\\mathbb{M}$ such that </center>\n<center>\n $\n \\begin{cases}\n \\nu \\int_{\\Omega} \\nabla \\boldsymbol{u} : \\nabla \\boldsymbol{v} \\ d\\Omega + \\int_{\\Omega} [(\\boldsymbol{u} \\cdot \\nabla) \\boldsymbol{u}] \\cdot \\boldsymbol{v} \\ d\\Omega - \\int_{\\Omega} p \\nabla \\cdot \\boldsymbol{v} \\ d\\Omega = \\int_{\\Omega} \\boldsymbol{f} \\cdot \\boldsymbol{v} \\ d\\Omega, \\quad \\forall \\boldsymbol{v} \\in\\mathbb{V}, \\\n \\int_{\\Omega} q \\nabla \\cdot \\boldsymbol{u} \\ d\\Omega = 0, \\quad \\forall q \\in\\mathbb{M}\n \\end{cases}\n $\n</center> \nwhere\n\n$\\nu$ represents kinematic viscosity\nthe functional space $\\mathbb{V}(\\mu)$ is defined as $\\mathbb{V}=[H^1_{\\Gamma_{wall}}(\\Omega)]^2$\nthe functional space $\\mathbb{M}(\\mu)$ is defined as $\\mathbb{M}=L^2(\\Omega)$\n\nSince this problem utilizes mixed finite element discretization with the velocity and pressure as solution variables, the inf-sup condition is necessary for the well posedness of this problem. Thus, the supremizer operator $T^{\\mu}: \\mathbb{M}_h \\rightarrow \\mathbb{V}_h$ will be used.",
"from ufl import transpose\nfrom dolfin import *\nfrom rbnics import *",
"3. Affine Decomposition",
"@ExactParametrizedFunctions()\nclass NavierStokes(NavierStokesProblem):\n\n # Default initialization of members\n def __init__(self, V, **kwargs):\n # Call the standard initialization\n NavierStokesProblem.__init__(self, V, **kwargs)\n # ... and also store FEniCS data structures for assembly\n assert \"subdomains\" in kwargs\n assert \"boundaries\" in kwargs\n self.subdomains, self.boundaries = kwargs[\"subdomains\"], kwargs[\"boundaries\"]\n dup = TrialFunction(V)\n (self.du, self.dp) = split(dup)\n (self.u, _) = split(self._solution)\n vq = TestFunction(V)\n (self.v, self.q) = split(vq)\n self.dx = Measure(\"dx\")(subdomain_data=self.subdomains)\n self.ds = Measure(\"ds\")(subdomain_data=self.boundaries)\n # ... as well as forcing terms and inlet velocity\n self.inlet = Expression((\"1. / 2.25 * (x[1] - 2) * (5 - x[1])\", \"0.\"), degree=2)\n self.f = Constant((0.0, 0.0))\n self.g = Constant(0.0)\n # Customize nonlinear solver parameters\n self._nonlinear_solver_parameters.update({\n \"linear_solver\": \"mumps\",\n \"maximum_iterations\": 20,\n \"report\": True\n })\n\n # Return custom problem name\n def name(self):\n return \"NavierStokesExact1\"\n\n # Return theta multiplicative terms of the affine expansion of the problem.\n @compute_theta_for_derivatives\n @compute_theta_for_supremizers\n def compute_theta(self, term):\n mu = self.mu\n if term == \"a\":\n theta_a0 = 1.\n return (theta_a0,)\n elif term in (\"b\", \"bt\"):\n theta_b0 = 1.\n return (theta_b0,)\n elif term == \"c\":\n theta_c0 = 1.\n return (theta_c0,)\n elif term == \"f\":\n theta_f0 = 1.\n return (theta_f0,)\n elif term == \"g\":\n theta_g0 = 1.\n return (theta_g0,)\n elif term == \"dirichlet_bc_u\":\n theta_bc00 = mu[0]\n return (theta_bc00,)\n else:\n raise ValueError(\"Invalid term for compute_theta().\")\n\n # Return forms resulting from the discretization of the affine expansion of the problem operators.\n @assemble_operator_for_derivatives\n @assemble_operator_for_supremizers\n def assemble_operator(self, term):\n dx = self.dx\n if term == \"a\":\n u = self.du\n v = self.v\n a0 = inner(grad(u) + transpose(grad(u)), grad(v)) * dx\n return (a0,)\n elif term == \"b\":\n u = self.du\n q = self.q\n b0 = - q * div(u) * dx\n return (b0,)\n elif term == \"bt\":\n p = self.dp\n v = self.v\n bt0 = - p * div(v) * dx\n return (bt0,)\n elif term == \"c\":\n u = self.u\n v = self.v\n c0 = inner(grad(u) * u, v) * dx\n return (c0,)\n elif term == \"f\":\n v = self.v\n f0 = inner(self.f, v) * dx\n return (f0,)\n elif term == \"g\":\n q = self.q\n g0 = self.g * q * dx\n return (g0,)\n elif term == \"dirichlet_bc_u\":\n bc0 = [DirichletBC(self.V.sub(0), self.inlet, self.boundaries, 1),\n DirichletBC(self.V.sub(0), Constant((0.0, 0.0)), self.boundaries, 2)]\n return (bc0,)\n elif term == \"inner_product_u\":\n u = self.du\n v = self.v\n x0 = inner(grad(u), grad(v)) * dx\n return (x0,)\n elif term == \"inner_product_p\":\n p = self.dp\n q = self.q\n x0 = inner(p, q) * dx\n return (x0,)\n else:\n raise ValueError(\"Invalid term for assemble_operator().\")\n\n\n# Customize the resulting reduced problem\n@CustomizeReducedProblemFor(NavierStokesProblem)\ndef CustomizeReducedNavierStokes(ReducedNavierStokes_Base):\n class ReducedNavierStokes(ReducedNavierStokes_Base):\n def __init__(self, truth_problem, **kwargs):\n ReducedNavierStokes_Base.__init__(self, truth_problem, **kwargs)\n self._nonlinear_solver_parameters.update({\n \"report\": True,\n \"line_search\": \"wolfe\"\n })\n\n return ReducedNavierStokes",
"4. Main program\n4.1. Read the mesh for this problem\nThe mesh was generated by the data/generate_mesh.ipynb notebook.",
"mesh = Mesh(\"data/backward_facing_step.xml\")\nsubdomains = MeshFunction(\"size_t\", mesh, \"data/backward_facing_step_physical_region.xml\")\nboundaries = MeshFunction(\"size_t\", mesh, \"data/backward_facing_step_facet_region.xml\")",
"4.2. Create Finite Element Space (Taylor-Hood P2-P1)",
"element_u = VectorElement(\"Lagrange\", mesh.ufl_cell(), 2)\nelement_p = FiniteElement(\"Lagrange\", mesh.ufl_cell(), 1)\nelement = MixedElement(element_u, element_p)\nV = FunctionSpace(mesh, element, components=[[\"u\", \"s\"], \"p\"])",
"4.3. Allocate an object of the NavierStokes class",
"problem = NavierStokes(V, subdomains=subdomains, boundaries=boundaries)\nmu_range = [(1.0, 80.0)]\nproblem.set_mu_range(mu_range)",
"4.4. Prepare reduction with a POD-Galerkin method",
"reduction_method = PODGalerkin(problem)\nreduction_method.set_Nmax(10)",
"4.5. Perform the offline phase",
"lifting_mu = (1.0,)\nproblem.set_mu(lifting_mu)\nreduction_method.initialize_training_set(100, sampling=EquispacedDistribution())\nreduced_problem = reduction_method.offline()",
"4.6. Perform an online solve",
"online_mu = (80.0,)\nreduced_problem.set_mu(online_mu)\nreduced_solution = reduced_problem.solve()\n\nplot(reduced_solution, reduced_problem=reduced_problem, component=\"u\")\n\nplot(reduced_solution, reduced_problem=reduced_problem, component=\"p\")",
"4.7. Perform an error analysis",
"reduction_method.initialize_testing_set(16, sampling=EquispacedDistribution())\nreduction_method.error_analysis()",
"4.8. Perform a speedup analysis",
"reduction_method.speedup_analysis()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
Olsthoorn/TransientGroundwaterFlow
|
exercises_notebooks/DelayedYield.ipynb
|
gpl-3.0
|
[
"Delayed yield\nIntroduction\nDelayed yield is a phenomenon of well drawdown in a confined aquifer, which seems to follow two differnt Theis curves, the first corresponding to the Theis curve belonging to the situation with a confined aqufier, the second corresponding to the Theis curve belonging to the situation with phreatic water.\nA set of numerically computed delayed yield curves that show the phenomenon is presented in the figure.\n\nFigure: Some numerically computed delayed yield drawdown curves.\nThe delayed yield is caused by vertical resistance in the aquifer (or of a covering layer), making release of water from the water table decline slow relative to that from elastic strorage. Due to this, the drawdown starts off and spreads as if the aqufier was confined, first like a Theis curve, and then like a Hantush curve seeking a steady-state value. However, this steady state is not reachted, because the water table itself starts declining, like if the aquifer were phreatic. At later times the drawdown has become so slow, that this water table decline can easily cope with the water table decline and no further delay is observed, causing the drawdown to follow the Theis curve that belongs to the specific yield instead of the elastic storage.\nThis phenomenon is important when pumping tests are carried out in water table aquifers or aquifers covered by a semi-confined toplayer above which the water level is not maintained. Such tests, of short, may not show the delayed yield, which may lead to the hastely drawn conclusion that the \"hantush\" drawdown that seems to establish soon after the start of the test, is the final drawdown. However, if we would have continued our test longer, we would have cearly seen the darwdown increasing again and following the second Theis curve. The delayed drawdown can be substantially larger than the early time elastic drawdown and false conclusions may be drawn if this phenomenon is not anticipated by the designers of the pumping test.\nDelayed yield: two Theis curves\nThe drawdown in a confined aquifer follows Theis. The same is true for that in an unconfined aquifer under the condition that the drawdown is not too great relative to the thickness of the aquifer, such that this thickness can be assumed constant. The difference between the two drawdowns (noting that the transmissivity is the same in both cases), is the delay of the unconfined drawdown relative to that in the confined aquifer.\nLet's analyze this by starting with the Theis drawdown solution\n$$ s(r,t) = \\frac Q {4 \\pi kD} W( u ) \\,\\,\\,\\, with \\,\\,\\,\\, u = \\frac {r^2 S} {4 kD t} $$ \nThe two cases differ by their storage coefficient, which is $S_e$ for the confined case and $S_y$, the specific yield, for the unconfined case. So we have\n$$ u_{conf} = \\frac {r^2 S_2} {4 kD t}\\,\\,\\,\\,\\, and \\,\\,\\,\\,\\, u_{unconf} = \\frac {r^2 S_y} {4 kD t} $$\nand, therefore\n$$ \\frac {u_{unconf}} {u_{conf}} = \\frac {S_y} {S_e} $$\nGiven that $S_y$ is two orders of magnitude larger than $S_e$, we see that both curves are the same, except that the curve for the unconfined case is two orders of magnitude delayed with respect to the first. One can see this as the time in the unconfined case has to be $\\frac {S_y} {S_e}$ times that of the confined case to get the same $W(u)$ value and, therefore the same drawdown.\nWe have also seen what the radius of influence was. We got it from the logarithmic approximation of the Theis dradown, which was\n$$ s(r, t) \\approx \\frac Q {4 \\pi kD } \\ln \\left( \\frac {2.25 kD t} {r^2 S} \\right) $$\nand realizing that s=0 when the argument of the $\\ln(\\cdots)$ equals 1, so:\n$$ r = \\sqrt {\\frac {2.25 kD t} S } $$\nIf we draw the two curves on double log scale, as usual $W(u)$ versus $1/u$ then dividing $u$ by a factor. i.e. multiplyin $1/u$ by that factor implies shifting the drawdown curve to the right, but without changing its shape.\nWe see here that if $S_y$ is two orders of magnitude larger than $S_e$, the radius of influence is one order of magniture larger. So the radius of influence grows in the order of 10 times faster in the confined case than it does in the unconfined case.\nThis leads to delayed yield in situations where the hydraulic vertical resistance in the aquifer is relatively high. In that case the immediate drawdown is dominated by the elastic Theis case and spreads rapidly from the well. But as soon as the elastic drawdown establishes itself, will the free water table decline and providing water to the flow in the aquifer. After some time, the free water table will adapt to the elastic drawdown in the aquifer and the system will behave as if it were unconfined. So initially the head in the aquifer will behave like the Theis formula with the elastic storage coefficient, but in the long run it will be have like the Theis formula with the unconfined storage coefficient, the specific yield. Of course, there is a transition between the two curves.",
"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.special import expi, k0, k1 # exp. integral and two bessel functions\nfrom wells import Wh # Hantush well function defined in module wells\n\ndef W(u): return -expi(-u) # Theis well function\n\nSe = 1e-3\nSy = 1e-1\nu = np.logspace(-5., 2., 81)\nue = u\nuy = ue * Sy/Se \n\nax = plt.figure().add_subplot(111)\nax.set(xlabel=\"1/u\", ylabel=\"W(u)\", xscale=\"log\", yscale=\"log\",\n ylim=(1e-6, 10), \n title=\"$W(u_e)$ and $W(u_y)$\")\nax.plot(1/u, W(ue), 'r', label=\"W(ue)\")\nax.plot(1/u, W(uy), 'b', label=\"W(uy)\")\nax.legend(loc='best')\nplt.show()",
"Figure: The two Theis curves caused by delayed yield\nWe see that the blue curve is equal to the red one but shifted exactly a factor $S_y/S_e = 100$ to the right, i.e. it is delayed by a factor $S_y/S_e$.\nTherefore, in an unconfined aquifer with some vertical resistance, we expect the drawdown to initially follow the red curve, the elastic drawdown curve, and after some transition time follow the blue curve, the unconfined drawdown curve.\nWe already know the behavior of the Hantush drawdown. If a constant head is maintained above the aquifer and leakage occurs through a leaky confining layer towards the aquifer proportional to the difference between the maintained head above and the lowered head in the aquifer, then the drawdown will after some time become constant. The Hantush drawdown curves all deviate from their initial Theis start to become horizontal, the level of which depends on the distance from the well, $r$, and the characteristic length $\\lambda = \\sqrt{ kD c}$ of the aquifer system. The higher the resistance, that is, the larger the $\\lambda$, the longer will the drawdown follow the Theis curve and the larger will the final Hantush drawdown be.\nThe delayed yield situation is similar to that of Hantush until the drawdown in the overlying layer, of of the water table, becomes substantial, its supply deminishes and therefore, the drawdown must match that of the delayed Theis curve, the curve that belongs to the specific yield $S_y$.\nA (famous) pumping test\nThe pumping test was carried out in the Gironde Valley in France in 1965. Bonnet at al [1970] published an analysis on teh basis of Boulton's theory. The aquifer is clayey in shallow depth and sandy to gravelly at larger depths. The aquifer bottom is at 13.75 m and the initial water table at 5.51 m, so that the wet thickness b=8.24 m.\nThe well was screened between 7 and 13.5 m and has a diameter of 0.32 m. The pumping lastted for 48h 50 min at a rate as 53 m3/a, but oscillated between 51 and 54.6 m3/h. Drawdowns were monitored at 10 and 30 m from the well.\nDue to the large penetration, the effectof partial penetration can be neglected at 10 and 30 m. Although not probable, due to lack of information about the observation wells, their screens were assumed to be perforated over the entire depth of the aquifer. The results were consistent with this assumption, which may also mean that the vertical resistance within the coarser part of the aquifer may be neglected as it may very well be that the screen was only perforated in the coarser part of the aquifer.\nThe drawdown data are as follows:",
"import csv\nimport wells\n\nW = wells.W # Theis\nWh = wells.Wh # Hantush\n\n\n# Global info and prefrences for this project\nproj = \"Pumping test at St. Pardon de Conques (1965)\"\nfolder = './ptestDelayed'\np_names = 'ptst02', 'ptst10', 'ptst30'\nl_names = 'semilog02_late', 'semilog10_late', 'semilog30_late', 'semilog10_early'\n\nplotsettings = {'xlabel':'t [s]', 'ylabel':'s [m]', 'xscale':'log',\n 'xlim':(1e-1, 1e6), 'title':proj}\npltset0 = plotsettings.copy(); pltset0['yscale']='log'; pltset0['ylim'] =(1e-2, 1.0)\npltset1 = plotsettings.copy(); pltset1['yscale']='linear'; pltset1['ylim']=(0.,0.9)\n\n",
"Next we define a class piezometer, that allows us to store information pertaining to individual piezometers, so that we can easily plot them, use their data together with their meta data. Defining such a class prevents clutter. It's always to good idea to define a class or a function when this prevents duplicating code. Duplicated code is always error-prone and very hard to update and maintain.",
"class Piezometer:\n \"\"\"Piezometer definition\"\"\"\n \n clrs ='brgkmcy'\n styles = ['-',';','--','-.']\n markers = ['o','s','^','v','p','+','x','.']\n c_cnt = -1\n l_cnt = -1\n m_cnt = -1\n \n def __init__(self, name=\"\", t=np.array([]), s=np.array([]),\n dim_t='s', dim_s='m', color=None, linestyle=None, marker=None):\n self.name = name\n self.t = t\n self.s = s\n self.dim_t = dim_t\n self.dim_s = dim_s\n self.color = color\n self.linestyle = linestyle\n self.marker = marker\n \n self.P = type(self) # the class\n \n if color is None:\n self.color = self.nextClr()\n else:\n self.color = color\n if linestyle is None:\n self.linestyle = self.nextStyle()\n else:\n self.linestyle = linestyle\n if marker is None:\n self.marker = self.nextMarker()\n else:\n self.marker = marker\n \n def plot(self):\n \"\"\"Plot the piezometer\"\"\"\n ax =plt.gca()\n lspec = self.color + self.marker + self.linestyle\n ax.plot(self.t, self.s, lspec)\n \n def nextClr(self):\n \"\"\"Remembers which color previous piezometer used and chooses the next color\"\"\"\n C = self.P\n C.c_cnt += 1\n if C.c_cnt == len(C.clrs):\n C.c_cnt = 0\n return C.clrs[C.c_cnt]\n \n def nextStyle(self):\n \"\"\"Remembers which line style the previous piezometer used and chooses the next style\"\"\"\n C = self.P\n C.l_cnt += 1\n if C.l_cnt == len(C.styles):\n C.l_cnt = 0\n return C.styles[C.l_cnt]\n \n def nextMarker(self):\n \"\"\"Remembers which marker the previous piezometer used and chooses the next marker\"\"\"\n C = self.P\n C.m_cnt += 1\n if C.m_cnt == len(C.markers):\n C.m_cnt = 0\n return C.markers[C.m_cnt]\n",
"The function invK0 defined below is the inverse of the K0 bessel function. It takes the value of K0(z) and computes z. To do this, Newton Raphson iterations are used. Look up the Newton Raphson method on the internet (Wikipedia), if you don't know it. It's a basic numerical method.",
"def invK0(K0, tol=1e-6, verbose=False):\n \"Return x if K0(x) is given (Newton Raphson)\"\n x = tol\n for i in range(100):\n f = K0 - k0(x)\n facc = k1(x)\n x = x - f/facc\n if verbose:\n print(\"{:3d} {:10.3g} {:10.3g}\".format(i, x, f/facc))\n if np.abs(f/facc) < tol:\n break\n return x",
"The next function reads in the data for all piezometers and returns a list of piezometer objects, that is, objects of the class defined above.",
"def get_data(folder, names, linestyle=None, marker=None):\n \"Reads in the data, returns them as a list of class Piezometer\"\n piezoms = []\n for name in names:\n p = Piezometer(name=name)\n fName = folder + '/' + name + '.csv'\n with open(fName) as csvfile: \n datareader = csv.reader(csvfile, delimiter=',', quotechar=\"|\")\n t = []\n s = []\n for j, row in enumerate(datareader):\n if j>5: # skip first 5 lines\n t.append(row[0])\n s.append(row[1])\n p = Piezometer(name=name, t=np.array(t), s=np.array(s), linestyle=linestyle, marker=marker)\n piezoms.append(p)\n return piezoms",
"Now read the data, returning the piezometers in a list, called \"piezoms\".\nAfter that show the lines on a semilog graph, firrst as they are read and then together with the best linear approximation.",
"# reads piezometers\npiezoms = get_data(folder, p_names, linestyle='', marker='o')\n\n# get straight lines drawn through piezometer data\npiezlines = get_data(folder, l_names, linestyle='-', marker='')\n\n\n# Show the data\n\n# Double log scale\nax1 = plt.figure().add_subplot(111)\nax1.set(**pltset0)\nax1.grid(True)\n\nfor p in piezoms:\n lspec = p.color + p.linestyle + p.marker\n ax1.plot(p.t , p.s, lspec, label=p.name)\nax1.legend(loc='best', fontsize='small')\n\n# Semilog scale\nax2 = plt.figure().add_subplot(111)\nax2.set(**pltset1)\nax2.grid(True)\n\nfor p in piezoms:\n lspec = p.color + p.linestyle + p.marker\n ax2.plot(p.t, p.s, lspec, label=p.name)\n \nfor pl in piezlines:\n lspec = pl.color + pl.linestyle + pl.marker\n ax2.plot(pl.t, pl.s, lspec, label=pl.name)\nax2.legend(loc='best', fontsize='small')\nplt.show()",
"Intepretation of the pumping test\nThe data on double log scales should reveal a picture of the delayed type curves. The first part of the lines should mimic Theis, the mediate part should tend to a horizontal line as predicts the Hantush' type curves and the last part should also mimic the later part of a Theis curve. One sees however, that it is difficult draw exact conclusions. From the continuously, almost constant rising curves, it is obvious, on the other hand, that no Theis like curve could be fitted. The same is true for the Hantush curves. The only thing we observe that gives a hint towards delayed yield is the mid part of the green line that tends to horizontal but later on starts to rise again.\nWe also observe some increase of drawdown at the very end of the test, at least for the piezometers at 10 and 30 m. We don't know what this is, because non of our models will give such and upward decline. But it may very well be caused by wheather influences, other boundaries, and even bounce-back of an impervious boundary far away. Therefore, without any further information regarding the conditions of the test, we ignore this deviation for our analyis.\nWhat could help is drawing the drawdown data on linear scale but keep the time data on logarithmic scale. From the behavior of Theis curves, we know that the drawdown will follow straight lines on such graphs. That is the drawdown per log cycle is the constant and the same for all piezometers, i.e. irrespective of their distance from the well. Clearly, differences may exist in this respect between individual piezometers, due to heterogeneity of the aquifer. But we expect those to be small at the scale of some tens of meters that pertain to these piezometers.\nThe best way it thus to fit straight parallel lines through all of the data. Keeping the lines parallel is based on the fact that they have to be parallel in a homogeneous system, and it minimizes errors where fitting such lines is uncertain. The result is given in the figure above.\nFrom this we conclude that the data provide indeed information on the early and late Theis curves. The first thing to conclude is that the ratio of the specific yield and the elastic storage coefficient equals the shift between the two straight lines drawn throug the green points.\n$$ \\frac {S_y} {S_e} = \\frac {3 \\times 10^{1}} {2.2 \\times 10^{0}} \\approx 14 $$ \nWe may then compute the transmissivity and the storage coefficient using the log approximation of the Theis curve.\nThe change of the drawdown per logcycle is also read from the straigh lines, it is is 0.115 m\n$$ s_{10t} - s_{t} = \\frac Q {4 \\pi kD} \\ln 10 $$\nSo that\n$$ kD = \\frac Q {4 \\pi} \\frac {\\ln 10} {0.115} $$\nWith $Q = 53$ m3/h = 0.0147 m3/s, this yields:\n$$ kD = 0.023 \\,\\, m2/s = 84.4 \\,\\, m2/h = 2525\\,\\, m3/d $$\nand\n$$ s = \\frac Q {2 \\pi kD} \\ln \\left( \\frac {2.25 kD t} { r^2 S } \\right) $$\nsetting $s=0$, that is the argument to 1 and filling in $t$ for $s=0$ and $r$, i.e. $t=30$ s and $r=10$ m for the green data points yieding\n$$ \\frac {S_y} {kD} = \\frac {2.25 \\times 30} {10^2} = 0.675 $$\n$$ S_y = 0.625 \\, kD = 0.675 \\times 0.023 = 1.6 \\times 10^{-2} $$\nand, therefore\n$$ S_e = S_y / 14 = 1.13 \\times 10^{-3} $$\nThe last property to estimate is the vertical resistance of the confining top of the aquifer. For this we need the horizontal branch of the best fitting Hantush type curve. We don't really see it in the data, but we could estimate it from the green datapoints at $s = 0.22$ m.\nWith this we may directly use the steady state solution for a well in a semi-confined aquifer\n$$ s(r) = \\frac Q {2 \\pi kD} K_0 \\frac r \\lambda $$\n$$ K_0 \\frac r \\lambda = 2 \\pi kD \\frac {s(r)} Q = 2 \\pi 0.023 \\times \\frac {0.22} {0.0147} = 2.16 $$\nBy some trial and error this yields \n$$ \\frac r \\lambda \\approx 0.131 \\rightarrow \\lambda = \\frac {10} {0.131} = 76.3 m $$\nand, therfore\n$$ kDc = \\lambda^2 \\rightarrow c = \\frac {76.3^2} {0.023} = 253000 \\,\\,s = 70.4 \\,\\, h $$\nNeuman uses the term $\\beta = \\frac {k_z} {k_r} \\frac {r^2} {D^2}$ which can be converted as follows:\n$$ \\beta = \\frac {k_z} {k_r} \\frac {r^2} {D^2} = \\frac {r^2} {k_r D} \\frac {k_z} {D} = \\frac {r^2} {k_r D c} = \\frac {r^2} {\\lambda^2} $$\n$$ \\beta = \\left( \\frac {10} {76.3} \\right)^2 = 0.0172 $$\n$$ \\frac {k_z} {k_r} = \\beta \\frac {D^2} {r^2} = 0.0172 \\times \\frac {8.24^2} {10^2} = 0.0117 $$\n$$ \\frac {k_r} {k_z} = 85.7 $$\n$$ k_r = \\frac {kD} D = \\frac {0.023} {8.24} = 2.8 \\times 10^{-3} \\,\\, m/s = 10.0 \\,\\, m/h $$\n$$ k_z = \\frac {k_r} {85.7} = 3.27 \\times 10^{-5} \\,\\, m/s = 0.12\\,\\, m/h $$\n$$ c = \\frac D {k_z} = \\frac {8.24} {3.27 \\times 10^{-5}} = 252000\\,\\, sec = 70.0 \\,\\, h$$",
"# compute it all\ndef all(author, proj, t0, s, ds, sig, D=8.24, Q=53., r=10.):\n sph = 3600.\n t0 = t0/sph # [h]\n kD = Q / (4 * np.pi) * np.log(10) / ds\n Sy = 2.25 * kD * t0 / r**2\n Se = Sy / sig\n besK0 = 2 * np.pi * kD * s / Q\n Lam = r / invK0(besK0)\n beta = (r/Lam)**2 # (r/lambda)**2\n av = beta * (D/r)**2 # kz/kr\n kr = kD/D\n kz = kr * av\n c = D/kz \n\n print(\"\\nUsing the data from {}:\".format(author)) \n print(\"Q = {:10.3g} m3/h\".format(Q))\n print(\"kD = {:10.3g} m2/h\".format(kD))\n print(\"Sy = {:10.3g} [-]\".format(Sy))\n print(\"Se = {:10.3g} [-]\".format(Se))\n print(\"Sy/Se = {:10.3g} [-]\".format(Sy/Se))\n print(\"Se/Sy = {:10.3g} [-]\".format(Se/Sy))\n print(\"K0(r/lambda)= {:7.3g} [-]\".format(besK0))\n print(\"r/lambda = {:10.3g} [m]\".format(r/Lam))\n print(\"r = {:10.3g} [m]\".format(r))\n print(\"lambda = {:10.3g} [m]\".format(Lam))\n print(\"beta = {:10.3g} [-]\".format(beta)) \n print(\"kz/kr = {:10.3g} [-]\".format(kz/kr))\n print(\"kr/kz = {:10.3g} [-]\".format(kr/kz))\n print(\"av = {:10.3g} [-]\".format(av))\n print(\"kr = {:10.3g} [m/h]\".format(kr))\n print(\"kz = {:10.3g} [m/h]\".format(kz))\n print(\"c = {:10.3g} [h]\".format(c))\n print()\n \n return author, proj, Q/sph, r, D, kr/sph, kz/sph, c*sph, Sy, Se\n\n# compute it all\n\n# me (2016)\nt0 = 30.# sec !\ns = 0.22\nds = 0.115\nsig = 14.\nme = all('me', proj, t0, s, ds, sig)\n\n# Neuman (1975)\nt0 = 70 # sec !\ns = 0.28\nsig = 14.5\nds = 0.137\nneuman = all('Neuman', proj, t0, s, ds, sig)\n",
"The essential difference between the results of Neuman (using the semilog method) and the indipendently derived figures, is the steady state drawdown, i.e. the Hantush case, that would pertain to the situation in which the water table would be fixed by continuous additional supply of water. That figure is difficult to obtain from the data. Given the curves a figure of 0.22 m for the r-10 m piezometer would seem valid, but 0.285 m is needed to make the results fit with those of Neuman.\nNow that we have the data, we can plot both the Theis and Hantush curves together with the data to verify the match.",
"#def plotResult(author, proj, Q, r, Lam, c, kD, Sy, Se, t, plotset):\n\ndef plotResult(author, proj, Q, r, D, kr, kz, c, Sy, Se, t, plotset):\n \n kD = kr * D\n Lam = np.sqrt(kD * c)\n \n ue = r**2 *Se /(4 * kD * t)\n uy = r**2 *Sy /(4 * kD * t)\n\n sh = Q/ (4 * np.pi * kD) * Wh(ue, r/Lam)\n se = Q/ (4 * np.pi * kD) * W(ue)\n sy = Q/ (4 * np.pi * kD) * W(uy)\n\n\n ax = plt.figure().add_subplot(111)\n ax.set(**plotset)\n ax.set_title(proj + \", analyzed by \" + author)\n ax.grid(True)\n\n for p in piezoms:\n lspec = p.color + p.linestyle + p.marker\n ax.plot(p.t , p.s, lspec, label=p.name)\n\n ax.plot(t, sh.ravel(), label='Hantush')\n ax.plot(t, se, label='Theis elastic')\n ax.plot(t, sy, label='Theis spec. yld')\n\n ax.legend(loc='best', fontsize='x-small')\n plt.show()\n \n return ax\n \nsph = 3600. # seconds per hour\nt = np.logspace(1, 6, 51) # seconds\n\nax3 = plotResult(*me, t, pltset1)\nax4 = plotResult(*neuman, t, pltset1)",
"In conclusion, the curves analysed here deviate little from the measurements. We had to approximate the steady-state drawdown without delayed yield, to obtain a value for $ r/\\lambda $. The value chosen was 0.22 m, whereas the value that follows from the results of Neuman would be .28 m, corresponding to the horizontal branch of the Hantush curves. Neuman's results do not seem to agree with the Hantush curve that is expected to match. It is, however, unclear what the reason for this difference is. It could be tested using a numerical model.",
"modules = '/Users/Theo/GRWMODELS/Python_projects/mfpy/modules/'\n\nimport sys\n\nif not modules in sys.path:\n sys.path.insert(0, modules)\n \nimport mfgrid as grid\nimport fdm_t\n\n# author, r, Q, D, kr, kz, c, Sy, Se\nt = np.logspace(-1, 6, 71)\nax3 = plt.figure().add_subplot(111)\nax4 = plt.figure().add_subplot(111)\n\ndef numMdl(author, proj, Q, r, D, kr, kz, c, Sy, Se, t, ax, piezoms=None):\n print(author)\n x = np.logspace(-1, 4, 51)\n y = np.array([-0.5, 0.5])\n z = np.array([0.01, 0.0, -D])\n gr = grid.Grid(x, y, z, axial=True)\n\n Kr = gr.const(kr)\n Kz = gr.const(kr); Kz[:,:,0] = gr.dz[0] / c / 2.\n Ss = gr.const(Se/D)\n Ss[:,:,0] = Sy/gr.dz[0]\n IBOUND = gr.const(1)\n FQ = gr.const(0.)\n FH = gr.const(0.)\n FQ[0,0,-1] = Q\n\n out = fdm_t.fdm3t(gr, t, (Kr, Kr, Kz), Ss, FQ, FH, IBOUND)\n # Get heads in lower layer\n phi = out.Phi.reshape((len(t), gr.Nx, gr.Nz))\n phi = out.Phi[:,0,:,-1]\n # interpolate r on x-grid\n up = np.interp(r, gr.xm, np.arange(gr.Nx))\n u, iu = up - int(up), int(up)\n # interpolate phi to get data exactly on x=r\n phi_t = phi[:,iu] + u * (phi[:,iu+1] - phi[:,iu])\n \n if not piezoms is None:\n for p in piezoms:\n lspec = p.color + p.linestyle + p.marker\n ax.plot(p.t , p.s, lspec, label=p.name)\n\n ax.plot(t, phi_t, 'b', linewidth=2, label='numerical')\n ax.legend(loc='best', fontsize='small')\n ax.set(xlabel='t [s]', ylabel='s [m]', xscale='log', yscale='log', ylim=(1e-2, 1.),\n title = proj + ', analyzed by ' + author)\n ax.grid(True)\n return out\n \nout1 = numMdl(*me , t, ax3, piezoms)\nout2 = numMdl(*neuman, t, ax4, piezoms)\n\nplt.show()",
"The numerical model show a good agreement with the measurements. This is not the case for the data that Neuman (1975) obtained."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
AllenDowney/ModSimPy
|
notebooks/pendulum2.ipynb
|
mit
|
[
"Modeling and Simulation in Python\nChapter 10 Example: Rigid Pendulum\nCopyright 2017 Allen Downey\nLicense: Creative Commons Attribution 4.0 International",
"# If you want the figures to appear in the notebook, \n# and you want to interact with them, use\n# %matplotlib notebook\n\n# If you want the figures to appear in the notebook, \n# and you don't want to interact with them, use\n# %matplotlib inline\n\n# If you want the figures to appear in separate windows, use\n# %matplotlib qt5\n\n# tempo switch from one to another, you have to select Kernel->Restart\n\n%matplotlib notebook\n\nfrom modsim import *",
"Pendulum\nThis notebook solves the Spider-Man problem from spiderman.ipynb, demonstrating a different development process for physical simulations.\nIn pendulum_sympy, we derive the equations of motion for a rigid pendulum without drag, yielding:\n$ \\ddot{x} = \\frac{x}{x^{2} + y^{2}} \\left(g y - vx^{2} - vy^{2}\\right) $\n$ \\ddot{y} = - \\frac{1}{x^{2} + y^{2}} \\left(g x^{2} + y \\left(vx^{2} + vy^{2}\\right)\\right) $\nWe'll use the same conditions we saw in spiderman.ipynb",
"condition = Condition(g = 9.8,\n m = 75,\n area = 1,\n rho = 1.2,\n v_term = 60,\n duration = 30,\n length0 = 100,\n angle = (270 - 45),\n k = 20)",
"Now here's a version of make_system that takes a Condition object as a parameter.\nmake_system uses the given value of v_term to compute the drag coefficient C_d.",
"def make_system(condition):\n \"\"\"Makes a System object for the given conditions.\n \n condition: Condition with height, g, m, diameter, \n rho, v_term, and duration\n \n returns: System with init, g, m, rho, C_d, area, and ts\n \"\"\"\n unpack(condition)\n \n theta = np.deg2rad(angle)\n x, y = pol2cart(theta, length0)\n P = Vector(x, y)\n V = Vector(0, 0)\n \n init = State(x=P.x, y=P.y, vx=V.x, vy=V.y)\n C_d = 2 * m * g / (rho * area * v_term**2)\n ts = linspace(0, duration, 501)\n \n \n return System(init=init, g=g, m=m, rho=rho,\n C_d=C_d, area=area, length0=length0,\n k=k, ts=ts)",
"Let's make a System",
"system = make_system(condition)\nsystem\n\nsystem.init",
"To write the slope function, we can get the expressions for ax and ay directly from SymPy and plug them in.",
"def slope_func(state, t, system):\n \"\"\"Computes derivatives of the state variables.\n \n state: State (x, y, x velocity, y velocity)\n t: time\n system: System object with length0, m, k\n \n returns: sequence (vx, vy, ax, ay)\n \"\"\"\n x, y, vx, vy = state\n unpack(system)\n\n ax = x*(g*y - vx**2 - vy**2)/(x**2 + y**2)\n ay = -(g*x**2 + y*(vx**2 + vy**2))/(x**2 + y**2)\n\n return vx, vy, ax, ay",
"As always, let's test the slope function with the initial conditions.",
"slope_func(system.init, 0, system)",
"And then run the simulation.",
"%time run_odeint(system, slope_func)",
"Visualizing the results\nWe can extract the x and y components as Series objects.",
"xs = system.results.x\nys = system.results.y",
"The simplest way to visualize the results is to plot x and y as functions of time.",
"newfig()\nplot(xs, label='x')\nplot(ys, label='y')\n\ndecorate(xlabel='Time (s)',\n ylabel='Position (m)')",
"We can plot the velocities the same way.",
"vxs = system.results.vx\nvys = system.results.vy\n\nnewfig()\nplot(vxs, label='vx')\nplot(vys, label='vy')\n\ndecorate(xlabel='Time (s)',\n ylabel='Velocity (m/s)')",
"Another way to visualize the results is to plot y versus x. The result is the trajectory through the plane of motion.",
"newfig()\nplot(xs, ys, label='trajectory')\n\ndecorate(xlabel='x position (m)',\n ylabel='y position (m)')",
"We can also animate the trajectory. If there's an error in the simulation, we can sometimes spot it by looking at animations.",
"newfig()\ndecorate(xlabel='x position (m)',\n ylabel='y position (m)',\n xlim=[-100, 100],\n ylim=[-200, -50],\n legend=False)\n\nfor x, y in zip(xs, ys):\n plot(x, y, 'bo', update=True)\n sleep(0.01)",
"Here's a function that encapsulates that code and runs the animation in (approximately) real time.",
"def animate2d(xs, ys, speedup=1):\n \"\"\"Animate the results of a projectile simulation.\n \n xs: x position as a function of time\n ys: y position as a function of time\n \n speedup: how much to divide `dt` by\n \"\"\"\n # get the time intervals between elements\n ts = xs.index\n dts = np.diff(ts)\n dts = np.append(dts, 0)\n\n # decorate the plot\n newfig()\n decorate(xlabel='x position (m)',\n ylabel='y position (m)',\n xlim=[xs.min(), xs.max()],\n ylim=[ys.min(), ys.max()],\n legend=False)\n\n # loop through the values\n for x, y, dt in zip(xs, ys, dts):\n plot(x, y, 'bo', update=True)\n sleep(dt / speedup)\n\nanimate2d(system.results.x, system.results.y)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
jhamrick/original-nbgrader
|
examples/create_assignment/Assignment Template.ipynb
|
mit
|
[
"# import plotting libraries\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\n%load_ext nbgrader\n%render_template_as solution",
"Problem 1\nWrite a function that returns a list of numbers, such that $x_i=i^2$, for $1\\leq i \\leq n$. Make sure it handles the case where $n<1$ by raising a ValueError.",
"def squares(n):\n \"\"\"Compute the squares of numbers from 1 to n, such that the \n ith element of the returned list equals i^2.\n \n \"\"\"\n {% if solution %}\n if n < 1:\n raise ValueError(\"n must be greater than or equal to 1\")\n return [i ** 2 for i in xrange(1, n + 1)]\n {% else %}\n # YOUR CODE HERE\n raise NotImplementedError\n {% endif %}",
"Your function should print [1, 4, 9, 16, 25, 36, 49, 64, 81, 100] for $n=10$. Check that it does:",
"squares(10)\n\n\"\"\"Check that squares returns the correct output for several inputs\"\"\"\nfrom nbgrader.tests import assert_equal\nassert_equal(squares(1), [1])\nassert_equal(squares(2), [1, 4])\nassert_equal(squares(10), [1, 4, 9, 16, 25, 36, 49, 64, 81, 100])\nassert_equal(squares(11), [1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121])\n\n\"\"\"Check that squares raises an error for invalid inputs\"\"\"\nfrom nbgrader.tests import assert_raises\nassert_raises(ValueError, squares, 0)\nassert_raises(ValueError, squares, -4)",
"Problem 2\nPart A\nUsing your squares function, write a function that computes the sum of the squares of the numbers from 1 to $n$. Your function should call the squares function -- it should NOT reimplement its functionality.",
"def sum_of_squares(n):\n \"\"\"Compute the sum of the squares of numbers from 1 to n.\"\"\"\n {% if solution %}\n return sum(squares(n))\n {% else %}\n # YOUR CODE HERE\n raise NotImplementedError\n {% endif %}",
"The sum of squares from 1 to 10 should be 385. Verify that this is the answer you get:",
"sum_of_squares(10)\n\n\"\"\"Check that sum_of_squares returns the correct answer for various inputs.\"\"\"\nassert_equal(sum_of_squares(1), 1)\nassert_equal(sum_of_squares(2), 5)\nassert_equal(sum_of_squares(10), 385)\nassert_equal(sum_of_squares(11), 506)\n\n\"\"\"Check that sum_of_squares relies on squares.\"\"\"\norig_squares = squares\ndel squares\ntry:\n assert_raises(NameError, sum_of_squares, 1)\nexcept AssertionError:\n raise AssertionError(\"sum_of_squares does not use squares\")\nfinally:\n squares = orig_squares",
"Part B\nUsing LaTeX math notation, write out the equation that is implemented by your sum_of_squares function.\n{% if solution %}\n$\\sum_{i=1}^n i^2$\n{% else %}\nYOUR ANSWER HERE\n{% endif %}\nPart C\nCreate a plot of the sum of squares for $n=1$ to $n=15$. Make sure to appropriately label the $x$-axis and $y$-axis, and to give the plot a title. Set the $x$-axis limits to be 1 (minimum) and 15 (maximum).",
"fig, ax = plt.subplots() # do not delete this line!\n{% if solution %}\nx = range(1, 16)\ny = [sum_of_squares(x[i]) for i in xrange(len(x))]\nax.plot(x, y)\nax.set_title(\"Sum of squares from 1 to $n$\")\nax.set_xlabel(\"$n$\")\nax.set_ylabel(\"sum\")\nax.set_xlim([1, 15])\n{% else %}\n# YOUR CODE HERE\nraise NotImplementedError\n{% endif %}\n\n\"\"\"Check that the axis limits are correct.\"\"\"\nassert_equal(ax.get_xlim(), (1.0, 15.0))\n\n\"\"\"Check that the xlabel is set.\"\"\"\nfrom nbgrader.tests import assert_unequal\nassert_unequal(ax.get_xlabel(), \"\", \"xlabel not set\")\n\n\"\"\"Check that the ylabel is set.\"\"\"\nassert_unequal(ax.get_ylabel(), \"\", \"ylabel not set\")\n\n\"\"\"Check that the title is set.\"\"\"\nassert_unequal(ax.get_title(), \"\", \"title not set\")\n\n\"\"\"Check that the correct xdata was used.\"\"\"\nfrom nbgrader.tests import assert_allclose, assert_same_shape\n\nlines = ax.get_lines()\nassert_equal(len(lines), 1)\n\nxdata = lines[0].get_xdata()\nxdata_correct = np.arange(1, 16)\nassert_same_shape(xdata, xdata_correct)\nassert_allclose(xdata, xdata_correct)\n\n\"\"\"Check that the correct ydata was used.\"\"\"\nlines = ax.get_lines()\nassert_equal(len(lines), 1)\n\nxdata = lines[0].get_xdata()\nydata = lines[0].get_ydata()\nydata_correct = [sum_of_squares(x) for x in xdata]\nassert_same_shape(ydata, ydata_correct)\nassert_allclose(ydata, ydata_correct)"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
kaleoyster/nbi-data-science
|
Bridge Life-Cycle Models/CDF+Probability+Reconstruction+vs+Age+of+Bridges+in+the+Southeast+United+States.ipynb
|
gpl-2.0
|
[
"Libraries and Packages",
"import pymongo\nfrom pymongo import MongoClient\nimport time\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nfrom matplotlib.pyplot import *\nimport matplotlib.pyplot as plt\nimport folium\nimport datetime as dt\nimport random as rnd\nimport warnings\nimport datetime as dt\nimport csv\n%matplotlib inline",
"Connecting to National Data Service: The Lab Benchwork's NBI - MongoDB instance",
"warnings.filterwarnings(action=\"ignore\")\nClient = MongoClient(\"mongodb://bridges:readonly@nbi-mongo.admin/bridge\")\ndb = Client.bridge\ncollection = db[\"bridges\"]",
"Extracting Data of Southeast states of the United states from 1992 - 2016.\nThe following query will extract data from the mongoDB instance and project only selected attributes such as structure number, yearBuilt, deck, year, superstructure, owner, countryCode, structure type, type of wearing surface, and subtructure.",
"def getData(state):\n pipeline = [{\"$match\":{\"$and\":[{\"year\":{\"$gt\":1991, \"$lt\":2017}},{\"stateCode\":state}]}},\n {\"$project\":{\"_id\":0,\n \"structureNumber\":1,\n \"yearBuilt\":1,\n \"yearReconstructed\":1,\n \"deck\":1, ## Rating of deck\n \"year\":1,\n 'owner':1,\n \"countyCode\":1,\n \"substructure\":1, ## rating of substructure\n \"superstructure\":1, ## rating of superstructure\n \"Structure Type\":\"$structureTypeMain.typeOfDesignConstruction\",\n \"Type of Wearing Surface\":\"$wearingSurface/ProtectiveSystem.typeOfWearingSurface\",\n }}]\n dec = collection.aggregate(pipeline)\n conditionRatings = pd.DataFrame(list(dec))\n\n ## Creating new column: Age\n conditionRatings['Age'] = conditionRatings['year']- conditionRatings['yearBuilt']\n \n return conditionRatings\n",
"Filteration of NBI Data\nThe following routine removes the missing data such as 'N', 'NA' from deck, substructure,and superstructure , and also removing data with structure Type - 19 and type of wearing surface - 6.",
"## filter and convert them into interger\ndef filterConvert(conditionRatings):\n before = len(conditionRatings)\n print(\"Total Records before filteration: \",len(conditionRatings))\n conditionRatings = conditionRatings.loc[~conditionRatings['deck'].isin(['N','NA'])]\n conditionRatings = conditionRatings.loc[~conditionRatings['substructure'].isin(['N','NA'])]\n conditionRatings = conditionRatings.loc[~conditionRatings['superstructure'].isin(['N','NA'])]\n conditionRatings = conditionRatings.loc[~conditionRatings['Structure Type'].isin([19])]\n conditionRatings = conditionRatings.loc[~conditionRatings['Type of Wearing Surface'].isin(['6'])]\n after = len(conditionRatings)\n print(\"Total Records after filteration: \",len(conditionRatings))\n print(\"Difference: \", before - after)\n return conditionRatings\n\n",
"Particularly in the area of determining a deterioration model of bridges, There is an observed sudden increase in condition ratings of bridges over the period of time, This sudden increase in the condition rating is attributed to the reconstruction of the bridges. NBI dataset contains an attribute to record this reconstruction of the bridge. An observation of an increase in condition rating of bridges over time without any recorded information of reconstruction of that bridge in NBI dataset suggests that dataset is not updated consistently. In order to have an accurate deterioration model, such unrecorded reconstruction activities must be accounted in the deterioration model of the bridges.",
"## make it into a function\ndef findSurvivalProbablities(conditionRatings):\n \n i = 1\n j = 2\n probabilities = []\n while j < 121:\n v = list(conditionRatings.loc[conditionRatings['Age'] == i]['deck'])\n k = list(conditionRatings.loc[conditionRatings['Age'] == i]['structureNumber'])\n Age1 = {key:int(value) for key, value in zip(k,v)}\n #v = conditionRatings.loc[conditionRatings['Age'] == j]\n\n v_2 = list(conditionRatings.loc[conditionRatings['Age'] == j]['deck'])\n k_2 = list(conditionRatings.loc[conditionRatings['Age'] == j]['structureNumber'])\n Age2 = {key:int(value) for key, value in zip(k_2,v_2)}\n\n\n intersectedList = list(Age1.keys() & Age2.keys())\n reconstructed = 0\n for structureNumber in intersectedList:\n if Age1[structureNumber] < Age2[structureNumber]:\n if (Age1[structureNumber] - Age2[structureNumber]) < -1:\n reconstructed = reconstructed + 1\n try:\n probability = reconstructed / len(intersectedList)\n except ZeroDivisionError:\n probability = 0\n\n probabilities.append(probability*100)\n\n i = i + 1\n j = j + 1\n \n return probabilities\n",
"A utility function to plot the graphs.",
"def plotCDF(cumsum_probabilities):\n fig = plt.figure(figsize=(15,8))\n ax = plt.axes()\n\n plt.title('CDF of Reonstruction Vs Age')\n plt.xlabel('Age')\n plt.ylabel('CDF of Reonstruction')\n plt.yticks([0,10,20,30,40,50,60,70,80,90,100])\n plt.ylim(0,100)\n\n x = [i for i in range(1,120)]\n y = cumsum_probabilities\n ax.plot(x,y)\n return plt.show()\n\n",
"The following script will select all the bridges in the Southeast United States, filter missing and not required data. The script also provides information of how much of the data is being filtered.",
"states = ['54','51','21','47','37','45','13','01','28','02','22','12'] \n\n# Mapping state code to state abbreviation \nstateNameDict = {'25':'MA',\n '04':'AZ',\n '08':'CO',\n '38':'ND',\n '09':'CT',\n '19':'IA',\n '26':'MI',\n '48':'TX',\n '35':'NM',\n '17':'IL',\n '51':'VA',\n '23':'ME',\n '16':'ID',\n '36':'NY',\n '56':'WY',\n '29':'MO',\n '39':'OH',\n '28':'MS',\n '11':'DC',\n '21':'KY',\n '18':'IN',\n '06':'CA',\n '47':'TN',\n '12':'FL',\n '24':'MD',\n '34':'NJ',\n '46':'SD',\n '13':'GA',\n '55':'WI',\n '30':'MT',\n '54':'WV',\n '15':'HI',\n '32':'NV',\n '37':'NC',\n '10':'DE',\n '33':'NH',\n '44':'RI',\n '50':'VT',\n '42':'PA',\n '05':'AR',\n '20':'KS',\n '45':'SC',\n '22':'LA',\n '40':'OK',\n '72':'PR',\n '41':'OR',\n '27':'MN',\n '53':'WA',\n '01':'AL',\n '31':'NE',\n '02':'AK',\n '49':'UT'\n }\n\ndef getProbs(states, stateNameDict):\n # Initializaing the dataframes for deck, superstructure and subtructure\n df_prob_recon = pd.DataFrame({'Age':range(1,61)})\n df_cumsum_prob_recon = pd.DataFrame({'Age':range(1,61)})\n \n\n for state in states:\n conditionRatings_state = getData(state)\n stateName = stateNameDict[state]\n print(\"STATE - \",stateName)\n conditionRatings_state = filterConvert(conditionRatings_state)\n print(\"\\n\")\n probabilities_state = findSurvivalProbablities(conditionRatings_state)\n cumsum_probabilities_state = np.cumsum(probabilities_state)\n \n df_prob_recon[stateName] = probabilities_state[:60]\n df_cumsum_prob_recon[stateName] = cumsum_probabilities_state[:60]\n \n# df_prob_recon.set_index('Age', inplace = True)\n# df_cumsum_prob_recon.set_index('Age', inplace = True)\n \n return df_prob_recon, df_cumsum_prob_recon\n \ndf_prob_recon, df_cumsum_prob_recon = getProbs(states, stateNameDict)\n\ndf_prob_recon.to_csv('prsoutheast.csv')\ndf_cumsum_prob_recon.to_csv('cprsoutheast.csv')",
"In following figures, shows the cumulative distribution function of the probability of reconstruction over the bridges' lifespan, of bridges in the Southeast United States, as the bridges grow older the probability of reconstruction increases.",
"plt.figure(figsize=(12,8))\nplt.title(\"CDF Probability of Reconstruction vs Age\")\n\npalette = [\n 'blue', 'blue', 'green', 'magenta', 'cyan', 'brown', 'grey', 'red', 'silver', 'purple', 'gold', 'black','olive'\n]\n\nlinestyles =[':','-.','--','-',':','-.','--','-',':','-.','--','-']\nfor num, state in enumerate(df_cumsum_prob_recon.drop('Age', axis = 1)):\n \n plt.plot(df_cumsum_prob_recon[state], color = palette[num], linestyle = linestyles[num], linewidth = 4)\n \nplt.xlabel('Age'); plt.ylabel('Probablity of Reconstruction'); \nplt.legend([state for state in df_cumsum_prob_recon.drop('Age', axis = 1)], loc='upper left', ncol = 2)\nplt.ylim(1,100)\nplt.show()",
"The below figure presents CDF Probability of reconstruction, of bridge in the midwestern United States.",
"plt.figure(figsize = (16,12))\nplt.xlabel('Age')\nplt.ylabel('Mean')\n\n# Initialize the figure\nplt.style.use('seaborn-darkgrid')\n \n# create a color palette\npalette = [\n 'blue', 'blue', 'green', 'magenta', 'cyan', 'brown', 'grey', 'red', 'silver', 'purple', 'gold', 'black','olive'\n]\n# multiple line plot\nnum = 1\nlinestyles = [':','-.','--','-',':','-.','--','-',':','-.','--','-']\nfor n, column in enumerate(df_cumsum_prob_recon.drop('Age', axis=1)):\n \n # Find the right spot on the plot\n plt.subplot(4,3, num)\n \n # Plot the lineplot\n plt.plot(df_cumsum_prob_recon['Age'], df_cumsum_prob_recon[column], linestyle = linestyles[n] , color=palette[num], linewidth=4, alpha=0.9, label=column)\n \n # Same limits for everybody!\n plt.xlim(1,60)\n plt.ylim(1,100)\n \n # Not ticks everywhere\n if num in range(10) :\n plt.tick_params(labelbottom='off')\n if num not in [1,4,7,10]:\n plt.tick_params(labelleft='off')\n \n # Add title\n plt.title(column, loc='left', fontsize=12, fontweight=0, color=palette[num])\n plt.text(30, -1, 'Age', ha='center', va='center')\n plt.text(1, 50, 'Probability', ha='center', va='center', rotation='vertical')\n num = num + 1\n \n# general title\nplt.suptitle(\"CDF Probability of Reconstruction vs Age\", fontsize=13, fontweight=0, color='black', style='italic', y=1.02)\n ",
"In the following figures, provides the probability of reconstruction at every age. Note this is not a cumulative probability function. the constant number of reconstruction of the bridges can be explained by various factors.\none particularly interesting reason could be funding provided to reconstruct bridges, this explain why some of the states have perfect linear curve.",
"plt.figure(figsize=(12,8))\nplt.title(\"Probability of Reconstruction vs Age\")\n\npalette = [\n 'blue', 'blue', 'green', 'magenta', 'cyan', 'brown', 'grey', 'red', 'silver', 'purple', 'gold', 'black','olive'\n]\nlinestyles =[':','-.','--','-',':','-.','--','-',':','-.','--','-']\nfor num, state in enumerate(df_cumsum_prob_recon.drop('Age', axis = 1)):\n \n plt.plot(df_prob_recon[state], color = palette[num], linestyle = linestyles[num], linewidth = 4)\n \nplt.xlabel('Age'); plt.ylabel('Probablity of Reconstruction'); \nplt.legend([state for state in df_cumsum_prob_recon.drop('Age', axis = 1)], loc='upper left', ncol = 2)\nplt.ylim(1,25)\nplt.show()",
"A key observation in this investigation of several state reveals a constant number of bridges are reconstructed every year, this could be an effect of fixed budget allocated for reconstruction by the state. This also highlights the fact that not all bridges that might require reconstruction are reconstructed.\nTo Understand this phenomena in clearing, the following figure presents probability of reconstruction vs age of all individual states in the Southeast United States.",
"plt.figure(figsize = (16,12))\nplt.xlabel('Age')\nplt.ylabel('Mean')\n\n\n# Initialize the figure\nplt.style.use('seaborn-darkgrid')\n \n# create a color palette\npalette = [\n 'blue', 'blue', 'green', 'magenta', 'cyan', 'brown', 'grey', 'red', 'silver', 'purple', 'gold', 'black','olive'\n]\n# multiple line plot\nnum = 1\nlinestyles = [':','-.','--','-',':','-.','--','-',':','-.','--','-']\nfor n, column in enumerate(df_prob_recon.drop('Age', axis=1)):\n \n # Find the right spot on the plot\n plt.subplot(4,3, num)\n \n # Plot the lineplot\n plt.plot(df_prob_recon['Age'], df_prob_recon[column], linestyle = linestyles[n] , color=palette[num], linewidth=4, alpha=0.9, label=column)\n \n # Same limits for everybody!\n plt.xlim(1,60)\n plt.ylim(1,25)\n \n # Not ticks everywhere\n if num in range(10) :\n plt.tick_params(labelbottom='off')\n if num not in [1,4,7,10]:\n plt.tick_params(labelleft='off')\n \n # Add title\n plt.title(column, loc='left', fontsize=12, fontweight=0, color=palette[num])\n plt.text(30, -1, 'Age', ha='center', va='center')\n plt.text(1, 12.5, 'Probability', ha='center', va='center', rotation='vertical')\n num = num + 1\n \n# general title\nplt.suptitle(\"Probability of Reconstruction vs Age\", fontsize=13, fontweight=0, color='black', style='italic', y=1.02)\n "
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
jmlon/PythonTutorials
|
pandas/PandasSeries.ipynb
|
gpl-3.0
|
[
"Pandas: Series\nLa librería Pandas ofrece clases que permiten manejar datasets para operaciones de análisis de datos.\nPara importar la librería en un programa se usa:",
"import pandas as pd",
"Pandas es una librería orientada a objetos y los objetos más importantes que incluye son las Series y los DataFrames.\nSeries\nUna serie es una colección de valores con un índice asociado. El índice puede ser numérico, nominal, fecha y hora, etc. y permite referenciar los datos de la serie por su índice.\nLos siguientes son algunos ejemplos de creación de objetos Serie",
"# Crear una seríe indicando la producción por mes\nproduccion = pd.Series( [120,130,110,150,170,180,170,160,190,175,160,141], \n index=['ene','feb','mar','abr','may','jun','jul','ago','sep','oct','nov','dec'] )\nprint(produccion)\n\n# Crear una serie a partir de un diccionario\ndict = { 'a':6.4, 'b':7.1, 'c':8.8, 'd':9.2, 'e':6.4, 'f':5.6, 'g':3.2 }\npesos = pd.Series(dict)\nprint(pesos)\n\n# Crear una serie utilizando un secuencia aleatoria distribuida normalmente\n# Si no se indican los indices, se numeran automáticamente al instanciar la serie\nimport numpy as np\naleatorio = pd.Series( np.random.randn(10) )\nprint(aleatorio)",
"Atributos y Métodos de las Series\nLos siguientes son los atributos más importantes",
"# Número de elementos\npesos.size\n\n# Forma (número de filas, columnas). En el caso de las series solo son filas\npesos.shape\n\n# Tipo de datos que contiene la serie\npesos.dtype\n\n# Los indices de la serie\npesos.index\n\n# Los valores contenidos en la serie\npesos.values\n\n## Acceso a los elementos de la serie",
"Los elementos de una serie se acceden de forma similar a los de una lista. Se pueden usar los índices de la serie, o la posición de los elementos, por ejemplo:",
"pesos['a']\n\n# o usando el metodo .get\npesos.get('a')\n\npesos[0]\n\npesos['b':'e']\n\npesos[1:5]\n\npesos[:'c']\n\npesos[:3]\n\npesos['d':]\n\npesos[3:]",
"También se puede seleccionar un subconjunto de índices o posiciones particular",
"pesos[ ['b','e','c'] ]\n\npesos[ [1,4,2] ]",
"Indexado lógico\nLa indexación lógica permite obtener los elementos de la serie que cumplen con una condición lógica",
"pesos[ pesos>7 ]",
"Se puede comprobar si un determinado indice está presente en la serie",
"'a' in pesos\n\n'h' in pesos\n\n'Sep' in produccion\n\n# Asignar valor a un indice\npesos.at['a'] = 5.0\npesos[0]\n\n# Esta vacia la serie\npesos.empty\n\n# Crear una serie vacia\nvolumenes = pd.Series()\nvolumenes.empty",
"Eliminar elementos de una Series\nSe pueden eliminar elementos por medio del método .drop e indicando los índices a eliminar",
"pesos.drop(['e','c'])",
"De particular interes son casos en los que la serie contiene elementos nulos (NaN). Todos los elementos nulos se pueden eliminar con el método .dropna",
"# Hacer una copia de la Series\npesos2 = pesos.copy()\n# Hacer nulos algunos de sus elementos\npesos2.at['b'] = None\npesos2\n\npesos2.dropna()",
"Métodos para comprobar una condición lógica sobre todos los elementos de una Series",
"# Comprobar si todos/algunos elementos de la serie cumplen una condición lógica\n(pesos>5).all()\n\n(pesos<5).any()\n\n# Indice correspondiente al mayor/menor valor de la serie\npesos.argmax()\n\npesos.argmin()\n\n# Convertir a matriz de numpy\nx = pesos.as_matrix()\nprint(x)\nprint(type(x))\n\n# Estadísticas descriptivas de la serie\npesos.describe()\n\n# Seleccionar un rango de elementos al principio/final de la serie\nprint( pesos.head(3) )\nprint( pesos.tail(2) )",
"Operadores aritméticos\nTodas las operaciones aritméticas entre series se hacen alineando los indices de las series. Las operaciones se pueden realizar utilizando los métodos .add .sub .mul .div .pow .dot .abs .mod o los correspondientes operadores sobrecargados. Indices no presentes se asumen NaN (Not a Number)",
"# Crear una nueva serie\nmedidas = pd.Series({ 'b':6.1, 'c':8.2, 'd':7.3, 'e':5.4, 'f':6.5, 'h':3.2 })\nprint(\"pesos\\n\", pesos)\nprint(\"\\nmedidas\\n\",medidas)\n\npesos.add(medidas)\n\npesos+medidas\n\npesos-medidas\n\npesos*medidas\n\npesos/medidas",
"Operadores relacionales\nSe cuenta asi mismo con los métodos relacionales estándar .lt .le .gt .ge .eq .ne o sus correspondientes operadores sobrecargados < <= > >= == !=. Solo son aplicables a series de igual longitud (no es oblogatorio que coincidan los índices).",
"medidas.at['a']=1.\npesos.size, medidas.size\n\npesos<medidas\n\npesos.eq(medidas)\n\npesos!=medidas\n\npesos.ge(medidas)\n\n# Comprobar si alguno de los elementos es nulo\npesos.isnull()",
"También se cuenta con las operaciones estádisticas comunes: .count .sum .min .max .mean .var .std .median .mode .cov .corr .autocorr .sem",
"pesos.count(), pesos.sum(), pesos.mean(), pesos.median()",
"Se puede ordernar la serie por valores o por indices, en orden creciente o decreciente",
"pesos.sort_values()\n\npesos.sort_index(ascending=False)",
"Operaciones de entrada/salida",
"# Salvar la serie en un archivo\npesos.to_csv('pesos.csv')\n\n# Leer la serie de un archivo\ncopia = pd.read_csv('pesos.csv', squeeze=True, index_col=0, header=None)\n\ncopia\n\n# Graficar la serie\nimport matplotlib.pyplot as plt\npesos.plot()\nplt.show()\n\n# Obtener su histograma\npesos.hist()\nplt.show()",
"Referencias\nSeries\nhttps://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.html"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
cogeorg/black_rhino
|
examples/firesales_SA/.ipynb_checkpoints/Python code Toy model-checkpoint.ipynb
|
gpl-3.0
|
[
"Explanation of python code\nDear Yordan, \nKindly find attached a folder containing python code for the toy model in the stock flow presentation. The coding of system wide stress simulation is part of my deliverables. In addition, I thought it may be useful for you to have an object orientated programming framework in python because it is associated with agent based modelling. In object-oriented programming languages, data and programming methods or functions are “embedded” in objects that can manipulate their own data and interact with other programming objects. This makes real-world entities such as households or banks easy to be represented. For example, in this model a natural starting point is to define an object/class called “Dealer banks”. I specified information dealers would need to have and rules for how they interact with each other and their environment. Another feature of object-oriented programming that is good for ABM development is inheritance. An instantiated object, e.g. commercial bank, can ‘inherit’ some characteristics from a ‘parent class’ but still behave in different ways. \nHowever, the obvious downside is that Python coding is definitely harder than Matlab. For example, there is no interface to click on a variable set(not that I\"m aware of anyway), you have to save the variables in a dataframe (by writing a function in the code) and then write the dataframe to a csv file. On the upside, it’s possible to automate the update state easily and add heterogenous behavior for same-type like institutions. Another feature is that \nHow to run the model\nRunning python :\n For python code to be executed the computer needs \n to have python 2 or python 3 installed (via https://www.python.org) \n A nice coding editor like Sublime Text 3, Atom or pycharm. The good thing about pycharm is that you can run the \n code in the editor without installing additional packages. On my mac, I actually run the code in the terminal \n (also called bash). I think in Windows, the equivalent is the shell. The program is run via command line codes. So \n I go to the path of where the folders are stored and type.\nruby \npython sflow.py\nAlternatively, for example in pycharm, you can open the slfow.py program and then run it. \nThis executes the whole program and writes a csv file with the output in the main folder stockflow. \nIf you want to change things in this model, the main files you will be working on are the agent scripts \nin src/institution, the updater.py and the configuration files in /configs/. \nSo let's start by looking at the main file: \nSflow.py",
"# %load sflow.py\n# [SublimeLinter pep8-max-line-length:300]\n# -*- coding: utf-8 -*-\n\n# -------------------------------------------------------------------------\n#\n# MAIN\n#\n# -------------------------------------------------------------------------\nif __name__ == '__main__':\n\n from src.environment import Environment\n from src.runner import Runner\n\n# We have to pass in the path of the environment xml as args[0] \n# the name of the environment xml as args[1] here:\n# and the scenario name as args[2] here:\n\n args = [\"configs/environment/\", \"environment_config\", \"benchmark\"]\n\n#\n# INITIALIZATION\n#\n environment_directory = str(args[0])\n identifier = str(args[1])\n scenario = str(args[2])\n environment = Environment(environment_directory, identifier)\n runner = Runner(environment, scenario)\n\n#\n# UPDATE STEP\n#\n for i in range(int(environment.static_parameters['num_simulations'])):\n environment.initialize(environment_directory, identifier)\n runner.initialize(environment, scenario)\n # do the run\n runner.do_run(environment, scenario)\n",
"In the beginning we need to instantiate two objects in the main file to run the program -\nfrom src.environment import Environment\n from src.runner import Runner\nthe Environment and the Runner class. The code for those classes are stored in the folder src and the name of the classes are Environment and Runner (not surprising).\n* The runner has two main purposes - it just handles the time steps and writes the output to csv. \n* The environment has the purpose of reading all the configuration files, instantiating the agents and saving the agents in lists to access them in the actual modelling part. So whenever you want to access a variable for an agent in the updater (for example a cash position of a hedgefund), you need to write \nruby \nfor hr in environment.hedgefunds:\n print hr.Cash\nThe instantiation of the agents takes up quite a bit of code in the environment file. The code reads the data for the agents from configuration files you find in /configs. A configuration file for the agents is stored in an agent folder because python iterates through the folder to look for all config files for a specific agent. It's possible to store them all in one place but then the automation code of the instantiation has to be changed. \nAn agent config file looks like this:",
"# %load configs/agents/hf/hf.xml\n<agent identifier=\"HF\">\n<parameter type=\"stock_variables\" name=\"GB\" value=\"40\"></parameter>\n<parameter type=\"stock_variables\" name=\"CB\" value=\"20\"></parameter>\n <parameter type=\"stock_variables\" name=\"Cash\" value=\"10\"></parameter>\n <parameter type=\"stock_variables\" name=\"does_repo\" value=\"yes\"></parameter>\n</agent>\n<!-- <parameter type=\"parameters\" name=\"leverage\" value=\"13.0780116888061\"></parameter> -->\n",
"Those type \" \" names later become dictionaries for the instantiated agent object. So the hedge fund has \na dictionary (a data structure that saves stuff) with the name stock_variable = {Cash: 20, GB: 40, CB: 20, does_repo = 'yes'} When I write the results to csv, I iterate over those variables.\nYou can define additional variables (e.g. leverage) by adding a line. But for now I commented that out. \nAgents' configuration files all look similar to that one. \nThe environment config file looks a bit different:",
"# %load configs/environment/environment_config.xml\n<environment identifier='toy_test'>\n<!-- simulation parameters -->\n <parameter type='static' name='num_sweeps' value='10'></parameter>\n <parameter type='static' name='num_simulations' value='1'></parameter>\n <parameter type='static' name='cbank_directory' value='configs/agents/cbank/'></parameter>\n <parameter type='static' name='dealer_directory' value='configs/agents/dealer/'></parameter>\n <parameter type='static' name='hf_directory' value='configs/agents/hf/'></parameter>\n <parameter type='static' name='pf_directory' value='configs/agents/pf/'></parameter>\n <parameter type='static' name='ic_directory' value='configs/agents/ic/'></parameter>\n <parameter type='static' name='mmf_directory' value='configs/agents/mmf/'></parameter>\n <parameter type='static' name='if_directory' value='configs/agents/if/'></parameter>\n <parameter type='exogenous' name='price_GB' value=\"1.0\"></parameter>\n <parameter type='exogenous' name='price_CB' value=\"1.0\"></parameter>\n <parameter type='exogenous' name='haircut' value=\"0.05\"></parameter>\n <parameter type='exogenous' name='interest_GB' value=\"0.02\"></parameter>\n <parameter type='exogenous' name='interest_CB' value=\"0.04\"></parameter>\n <parameter type='exogenous' name='interest_repo' value=\"0.02\"></parameter>\n <parameter type='exogenous' name='interest_loans' value=\"0.02\"></parameter>\n <parameter type='exogenous' name='interest_deposits' value=\"0.02\"></parameter>\n <parameter type='exogenous' name='GB_shock' value=\"-0.1\"></parameter>\n <parameter type='exogenous' name='CB_shock' value=\"-0.1\"></parameter>\n <parameter type='exogenous' name='Redemption' value=\"-0.1\"></parameter>\n</environment>\n",
"Here you can change the initital interest rates and prices for the marketable assets and also the shocks. It's possible to assign them to other variables and change them later in the updater. It's also possible to not read them in at all (just delete the lines) and define those in the updater script, but in the current setup they get read into the dictionary in the environment, environment.exogenous variables = {price_GB : 1.0, ... }. \nWhat's the difference between num_simulations and num_sweeps? For the time iterations current_step it's more convenient to use num_sweeps rather than num_simulations for now. num_sweeps are the time steps for the model algorithm. With num_simulations, one can initiate a different environment with different classes, so for now it's better to stick with num_sweeps to handle the time steps in the model. \nOnce all the agents' objects and configuration files are instantiated, the code moves on to the actual model algorithm which is stored in the updater class under \\src\\\n updater.py\nThe updater can be thought as the class that handels the transition. \nUpdater.py\nHere \n\nWe take over the initial prices and interest rates given by the enviroment config and save them in updater specific dictionaries (updater.prices and updater.rates by using the method def add_rates(self, environment):\n\ndef initialize_prices and def updade_prices\n\n\nThe really important method is the one below: \n```ruby \ndef do_update_benchmark(self, environment, current_step, scenario):\n import pandas as pd\nif current_step < 1:\n self.add_rates(environment)\n self.initialize_prices(environment, current_step)\n\n print \"***In t=:\", current_step , \" This is the price matrix***\\n\", self.prices, \"\\n\",\n\n self.initialize_assets_all_agents(current_step, environment)\n\n self.profit_all_agents(environment, current_step)\n\nelse:\n\n self.update_all_agents_balance_sheets(environment, current_step, scenario)```\n\n\n\ndef write_to_csv puts the output in to a result dataframe self.results_df.",
"# -*- coding: utf-8 -*-\n\n\n# -------------------------------------------------------------------------\n# class Updater\n# -------------------------------------------------------------------------\n\nimport numpy as np\n\nclass Updater():\n #\n #\n # METHODS\n #\n def get_identifier(self):\n return self.identifier\n\n # -------------------------------------------------------------------------\n # __init__\n # -------------------------------------------------------------------------\n def __init__(self, environment, runner):\n self.environment = environment\n self.runner = runner\n\n self.results_df = 0\n self.prices = np.array([])\n self.rates={}\n\n self.system_equity = 0\n self.system_assets = 0\n\n self.delta_pGB = 0\n self.delta_pGB = 0\n\n # -------------------------------------------------------------------------\n # -------------------------------------------------------------------------\n # do_update\n # -------------------------------------------------------------------------\n def do_update_benchmark(self, environment, current_step, scenario):\n import pandas as pd\n\n if current_step < 1:\n self.add_rates(environment)\n self.initialize_prices(environment, current_step)\n\n print \"***In t=:\", current_step , \" This is the price matrix***\\n\", self.prices, \"\\n\",\n\n self.initialize_assets_all_agents(current_step, environment)\n\n self.profit_all_agents(environment, current_step)\n\n else:\n\n self.update_all_agents_balance_sheets(environment, current_step, scenario)\n",
"Last, but not least, we have the agent classes under /src/class_folder where agents' balance sheets and behavior are set. Profit and balance sheet dynamics are found in the respective agent scripts, i.e. hf.py has the profit functions for the hedge fund, pf.py has the profit function and (potential) constraints for the pension fund, etc. \nAgent classes\nThe functions in the agent scripts are:\nruby \ndef initialize_assets\ndef print_balance_shee\ndef profit\ndef check_consistency\ndef update_balance_sheets\nI hope these instructions are sufficient to get you started. \nPlease let me know if there is anything else I can do to explain. \nHope you are well. \nTina"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
hannorein/reboundx
|
ipython_examples/Radiation_Forces_Circumplanetary_Dust.ipynb
|
gpl-3.0
|
[
"Radiation Forces on Circumplanetary Dust\nThis example shows how to integrate circumplanetary dust particles under the action of radiation forces. We use Saturn's Phoebe ring as an example, a distant ring of debris. \nWe have to make sure we add all quantities in the same units. Here we choose to use SI units. We begin by adding the Sun and Saturn, and use Saturn's orbital plane as the reference plane:",
"import rebound\nimport reboundx\nimport numpy as np\nsim = rebound.Simulation()\nsim.G = 6.674e-11 # SI units\nsim.dt = 1.e4 # Initial timestep in sec.\nsim.N_active = 2 # Make it so dust particles don't interact with one another gravitationally\nsim.add(m=1.99e30, hash=\"Sun\") # add Sun with mass in kg\nsim.add(m=5.68e26, a=1.43e12, e=0.056, pomega = 0., f=0., hash=\"Saturn\") # Add Saturn at pericenter\nps = sim.particles",
"Now let's set up REBOUNDx and add radiation_forces. We also have to set the speed of light in the units we want to use.",
"rebx = reboundx.Extras(sim)\nrf = rebx.load_force(\"radiation_forces\")\nrebx.add_force(rf)\nrf.params[\"c\"] = 3.e8",
"By default, the radiation_forces effect assumes the particle at index 0 is the source of the radiation. If you'd like to use a different one, or it's possible that the radiation source might move to a different index (e.g. with a custom merger routine), you can add a radiation_source flag to the appropriate particle like this:",
"ps[\"Sun\"].params[\"radiation_source\"] = 1",
"Here we show how to add two dust grains to the simulation in different ways. Let's first initialize their orbits. In both cases we use the orbital elements of Saturn's irregular satellite Phoebe, which the dust grains will inherit upon release (Tamayo et al. 2011). Since the dust grains don't interact with one another, putting them on top of each other is OK.",
"a = 1.3e10 # in meters\ne = 0.16\ninc = 175*np.pi/180.\nOmega = 0. # longitude of node\nomega = 0. # argument of pericenter\nf = 0. # true anomaly\n\n# Add two dust grains with the same orbit\nsim.add(primary=ps[\"Saturn\"], a=a, e=e, inc=inc, Omega=Omega, omega=omega, f=f, hash=\"p1\")\nsim.add(primary=ps[\"Saturn\"], a=a, e=e, inc=inc, Omega=Omega, omega=omega, f=f, hash=\"p2\")",
"Now we add the grains' physical properties. In order for particles to feel radiation forces, we have to set their beta parameter. $\\beta$ is tha ratio of the radiation force to the gravitational force from the star (Burns et al. 1979). One can either set it directly:",
"ps[\"p1\"].params[\"beta\"] = 0.01",
"or we can calculate it from more fundamental parameters. REBOUNDx has a convenience function that takes the gravitional constant, speed of light, radiation source's mass and luminosity, and then the grain's physical radius, bulk density, and radiation pressure coefficient Q_pr (Burns et al. 1979, equals 1 in the limit that the grain size is >> the radiation's wavelength).",
"grain_radius = 1.e-5 # grain radius in m\ndensity = 1000. # kg/m^3 = 1g/cc\nQ_pr = 1.\nluminosity = 3.85e26 # Watts\nps[\"p2\"].params[\"beta\"] = rebx.rad_calc_beta(sim.G, rf.params[\"c\"], ps[0].m, luminosity, grain_radius, density, Q_pr)\nprint(\"Particle 2's beta parameter = {0}\".format(ps[\"p2\"].params[\"beta\"]))",
"Now let's run for 100 years (about 3 Saturn orbits), and look at how the eccentricity varies over a Saturn year:",
"yr = 365*24*3600 # s\nNoutput = 1000\ntimes = np.linspace(0,100.*yr, Noutput)\ne1, e2 = np.zeros(Noutput), np.zeros(Noutput)\n\nsim.move_to_com() # move to center of mass frame first\n\nfor i, time in enumerate(times):\n sim.integrate(time)\n e1[i] = ps[\"p1\"].calculate_orbit(primary=ps[\"Saturn\"]).e\n e2[i] = ps[\"p2\"].calculate_orbit(primary=ps[\"Saturn\"]).e\n \n%matplotlib inline\nimport matplotlib.pyplot as plt\nfig, ax = plt.subplots(figsize=(15,5))\n\nax.plot(times/yr, e1, label=r\"$\\beta$={0:.1e}\".format(ps[\"p1\"].params[\"beta\"]))\nax.plot(times/yr, e2, label=r\"$\\beta$={0:.1e}\".format(ps[\"p2\"].params[\"beta\"]))\nax.set_xlabel('Time (yrs)', fontsize=24)\nax.set_ylabel('Eccentricity', fontsize=24)\nplt.legend(fontsize=24)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ltiao/notebooks
|
exploring-the-google-quickdraw-dataset-with-sketchrnn-part-2.ipynb
|
mit
|
[
"This is the second part in a series of notes on my exploration of the recently released Google QuickDraw dataset, using the concurrently released SketchRNN model. \nIn the previous note, we set up our development environment, downloaded a subset of the data along with some pre-trained models, and developed some utilities for visualizing the data in the notebook. We retain most of the code from previous note and omit the expository code and markdown cells.\n\nThe QuickDraw dataset is curated from the millions of drawings contributed by over 15 million people around the world who participated in the \"Quick, Draw!\" A.I. Experiment, in which they were given the challenge of drawing objects belonging to a particular class (such as \"cat\") in under 20 seconds.\nSketchRNN is a very impressive generative model that was trained to produce vector drawings using this dataset. It was of particular interest to me because it cleverly combines many of the latest tools and techniques recently developed in machine learning, such as Variational Autoencoders, HyperLSTMs (a HyperNetwork for LSTM), Autoregressive models, Layer Normalization, Recurrent Dropout, the Adam optimizer, and others.",
"%matplotlib inline\n%config InlineBackend.figure_format = 'svg'\n%load_ext autoreload\n%autoreload 2\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom matplotlib.animation import FuncAnimation\nfrom matplotlib.path import Path\nfrom matplotlib import rc\n\nfrom six.moves import map\n\nfrom magenta.models.sketch_rnn.sketch_rnn_train import \\\n (load_env,\n load_checkpoint,\n reset_graph,\n download_pretrained_models,\n PRETRAINED_MODELS_URL)\nfrom magenta.models.sketch_rnn.model import Model, sample\nfrom magenta.models.sketch_rnn.utils import (lerp,\n slerp,\n get_bounds, \n to_big_strokes,\n to_normal_strokes)\n\n# For inine display of animation\n# equivalent to rcParams['animation.html'] = 'html5'\nrc('animation', html='html5')\n\n# set numpy output to something sensible\nnp.set_printoptions(precision=8, \n edgeitems=6, \n linewidth=200, \n suppress=True)\n\ntf.logging.info(\"TensorFlow Version: {}\".format(tf.__version__))",
"Getting the Pre-Trained Models and Data",
"DATA_DIR = ('http://github.com/hardmaru/sketch-rnn-datasets/'\n 'raw/master/aaron_sheep/')\nMODELS_ROOT_DIR = '/tmp/sketch_rnn/models'\n\nDATA_DIR\n\nPRETRAINED_MODELS_URL\n\ndownload_pretrained_models(\n models_root_dir=MODELS_ROOT_DIR,\n pretrained_models_url=PRETRAINED_MODELS_URL)",
"We look at the layer normalized model trained on the aaron_sheep dataset for now.",
"MODEL_DIR = MODELS_ROOT_DIR + '/aaron_sheep/layer_norm'\n\n(train_set, \n valid_set, \n test_set, \n hps_model, \n eval_hps_model, \n sample_hps_model) = load_env(DATA_DIR, MODEL_DIR)\n\nclass SketchPath(Path):\n \n def __init__(self, data, factor=.2, *args, **kwargs):\n \n vertices = np.cumsum(data[::, :-1], axis=0) / factor\n codes = np.roll(self.to_code(data[::,-1].astype(int)), \n shift=1)\n codes[0] = Path.MOVETO\n\n super(SketchPath, self).__init__(vertices, \n codes, \n *args, \n **kwargs)\n \n @staticmethod\n def to_code(cmd):\n # if cmd == 0, the code is LINETO\n # if cmd == 1, the code is MOVETO (which is LINETO - 1)\n return Path.LINETO - cmd\n\ndef draw(sketch_data, factor=.2, pad=(10, 10), ax=None):\n\n if ax is None:\n ax = plt.gca()\n\n x_pad, y_pad = pad\n \n x_pad //= 2\n y_pad //= 2\n \n x_min, x_max, y_min, y_max = get_bounds(data=sketch_data,\n factor=factor)\n\n ax.set_xlim(x_min-x_pad, x_max+x_pad)\n ax.set_ylim(y_max+y_pad, y_min-y_pad)\n\n sketch = SketchPath(sketch_data)\n\n patch = patches.PathPatch(sketch, facecolor='none')\n ax.add_patch(patch)",
"The real fun begins\nEverything up to here has more or less been copied straight from the previous notebook. Now we load the pre-trained SketchRNN model and use it to begin our exploration of the test dataset.",
"# construct the sketch-rnn model here:\nreset_graph()\nmodel = Model(hps_model)\neval_model = Model(eval_hps_model, reuse=True)\nsample_model = Model(sample_hps_model, reuse=True)\n\nsess = tf.InteractiveSession()\nsess.run(tf.global_variables_initializer())\n\n# loads the weights from checkpoint into our model\nload_checkpoint(sess=sess, checkpoint_path=MODEL_DIR)",
"The helper functions for encoding a sketch to some latent code $z$ and then decoding it back to a sketch were provided in the original notebook. I just made some minor syntactic changes and removed the behaviour of plotting as a side-effect.",
"def encode(input_strokes):\n strokes = to_big_strokes(input_strokes).tolist()\n strokes.insert(0, [0, 0, 1, 0, 0])\n seq_len = [len(input_strokes)]\n z = sess.run(eval_model.batch_z,\n feed_dict={\n eval_model.input_data: [strokes], \n eval_model.sequence_lengths: seq_len})[0]\n return z\n\ndef decode(z_input=None, temperature=.1, factor=.2):\n z = None\n if z_input is not None:\n z = [z_input]\n sample_strokes, m = sample(\n sess, \n sample_model, \n seq_len=eval_model.hps.max_seq_len, \n temperature=temperature, z=z)\n return to_normal_strokes(sample_strokes)",
"Now we get a random sample from the test dataset",
"sketch = test_set.random_sample()\n\nfig, ax = plt.subplots(figsize=(3, 3),\n subplot_kw=dict(xticks=[], \n yticks=[], \n frame_on=False))\n\ndraw(sketch, ax=ax)\n\nplt.show()",
"We project it into the 128-dimensional latent space using the pre-trained encoder",
"z = encode(sketch)\nz.shape",
"Now we can reconstruct the original sketch from the learned latent representation using the pre-trained decoder, with temperature $\\tau=0.8$. The temperature parameter controls the level of randomness in the samples generated by the model, which becomes deterministic as $\\tau \\to 0$, and produces samples that are the most likely point in the probability density function. See pg. 7 of the original paper for further discussion of the effects the temperature parameter has on the sampling process.",
"sketch_reconstructed = decode(z, temperature=.6)\nsketch_reconstructed.shape\n\nfig, ax = plt.subplots(figsize=(3, 3),\n subplot_kw=dict(xticks=[], \n yticks=[], \n frame_on=False))\n\ndraw(sketch_reconstructed, ax=ax)\n\nplt.show()",
"Variance in the Reconstruction\nThe grid of drawings below consists of samples of the reconstructed drawings at various settings of the temperature parameter. The first column is the original drawing, and each of the remaining columns are 5 samples of the reconstructed drawing with $\\tau$ increasing from 0.1 to 0.9.",
"fig, ax_arr = plt.subplots(nrows=5, \n ncols=10, \n figsize=(8, 4),\n subplot_kw=dict(xticks=[],\n yticks=[],\n frame_on=False))\nfig.tight_layout()\n\nfor row_num, ax_row in enumerate(ax_arr): \n for col_num, ax in enumerate(ax_row):\n if not col_num:\n draw(sketch, ax=ax)\n xlabel = 'original'\n else:\n t = col_num / 10.\n draw(decode(z, temperature=t), ax=ax)\n xlabel = r'$\\tau={}$'.format(t)\n if row_num+1 == len(ax_arr):\n ax.set_xlabel(xlabel)\n\nplt.show()",
"At the lowest setting of the temperature at $\\tau=0.1$, we see the samples consistently share a similar appearance - they all look like vertical strokes emanating from a fluffy cloud. However, they are also consistently dissimilar to the original sketch. In this sense, the samples from the models seems to exhibit high bias and low variance. As we increase the variance in the samples by increasing $\\tau$, we start to find some samples that resemble our original sketch. But when we increase $\\tau$ a little too much, beyond say 0.8, we begin to see a little too much randomness in the samples.\nDrawing Comparisons\nHumans typically write and, by extension, draw from left to right, top to bottom. Here, I wanted to animate the process of the original sketch being drawn alongside the decoder's reconstruction of the sketch to compare stroke patterns, typical stroke lengths, etc.",
"fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(6, 3),\n subplot_kw=dict(xticks=[], \n yticks=[]))\nfig.tight_layout()\n\nx_pad, y_pad = 10, 10\n \nx_pad //= 2\ny_pad //= 2\n\n(x_min_1, \n x_max_1,\n y_min_1,\n y_max_1) = get_bounds(data=sketch, factor=.2)\n\n(x_min_2, \n x_max_2, \n y_min_2, \n y_max_2) = get_bounds(data=sketch_reconstructed, factor=.2)\n\nx_min = np.minimum(x_min_1, x_min_2)\ny_min = np.minimum(y_min_1, y_min_2)\n\nx_max = np.maximum(x_max_1, x_max_2)\ny_max = np.maximum(y_max_1, y_max_2)\n\nax1.set_xlim(x_min-x_pad, x_max+x_pad)\nax1.set_ylim(y_max+y_pad, y_min-y_pad)\n\nax1.set_xlabel('Original')\n\nax2.set_xlim(x_min-x_pad, x_max+x_pad)\nax2.set_ylim(y_max+y_pad, y_min-y_pad)\n\nax2.set_xlabel('Reconstruction')\n\ndef animate(i):\n\n original = SketchPath(sketch[:i+1])\n reconstructed = SketchPath(sketch_reconstructed[:i+1])\n\n patch1 = ax1.add_patch(patches.PathPatch(original,\n facecolor='none'))\n\n patch2 = ax2.add_patch(patches.PathPatch(reconstructed, \n facecolor='none'))\n \n return patch1, patch2\n\nframes = np.maximum(sketch.shape[0], \n sketch_reconstructed.shape[0])\nframes\n\nFuncAnimation(fig,\n animate,\n frames=frames-1, \n interval=15,\n repeat_delay=1000*3, \n blit=True)",
"Unfortunately, the strokes that make up a sketch have been normalized with the Ramer–Douglas–Peucker algorithm, which is a simple stroke simplification process. This means the strokes aren't quite the same as the that which the human originally used to construct the sketch. Moreover, the timing of each stroke are also important to understanding patterns in how humans draw quick sketches. While timestamp data is provided in the full QuickDraw dataset, they are not preserved in the modified version of the dataset used by SketchRNN."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
bjshaw/phys202-2015-work
|
days/day11/Interpolation.ipynb
|
mit
|
[
"Interpolation\nLearning Objective: Learn to interpolate 1d and 2d datasets of structured and unstructured points using SciPy.",
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns",
"Overview\nWe have already seen how to evaluate a Python function at a set of numerical points:\n$$ f(x) \\rightarrow f_i = f(x_i) $$\nHere is an array of points:",
"x = np.linspace(0,4*np.pi,10)\nx",
"This creates a new array of points that are the values of $\\sin(x_i)$ at each point $x_i$:",
"f = np.sin(x)\nf\n\nplt.plot(x, f, marker='o')\nplt.xlabel('x')\nplt.ylabel('f(x)');",
"This plot shows that the points in this numerical array are an approximation to the actual function as they don't have the function's value at all possible points. In this case we know the actual function ($\\sin(x)$). What if we only know the value of the function at a limited set of points, and don't know the analytical form of the function itself? This is common when the data points come from a set of measurements.\nInterpolation is a numerical technique that enables you to construct an approximation of the actual function from a set of points:\n$$ {x_i,f_i} \\rightarrow f(x) $$\nIt is important to note that unlike curve fitting or regression, interpolation doesn't not allow you to incorporate a statistical model into the approximation. Because of this, interpolation has limitations:\n\nIt cannot accurately construct the function's approximation outside the limits of the original points.\nIt cannot tell you the analytical form of the underlying function.\n\nOnce you have performed interpolation you can:\n\nEvaluate the function at other points not in the original dataset.\nUse the function in other calculations that require an actual function.\nCompute numerical derivatives or integrals.\nPlot the approximate function on a finer grid that the original dataset.\n\nWarning:\nThe different functions in SciPy work with a range of different 1d and 2d arrays. To help you keep all of that straight, I will use lowercase variables for 1d arrays (x, y) and uppercase variables (X,Y) for 2d arrays. \n1d data\nWe begin with a 1d interpolation example with regularly spaced data. The function we will use it interp1d:",
"from scipy.interpolate import interp1d",
"Let's create the numerical data we will use to build our interpolation.",
"x = np.linspace(0,4*np.pi,10) # only use 10 points to emphasize this is an approx\nf = np.sin(x)",
"To create our approximate function, we call interp1d as follows, with the numerical data. Options for the kind argument includes:\n\nlinear: draw a straight line between initial points.\nnearest: return the value of the function of the nearest point.\nslinear, quadratic, cubic: use a spline (particular kinds of piecewise polynomial of a given order.\n\nThe most common case you will want to use is cubic spline (try other options):",
"sin_approx = interp1d(x, f, kind='cubic')",
"The sin_approx variabl that interp1d returns is a callable object that can be used to compute the approximate function at other points. Compute the approximate function on a fine grid:",
"newx = np.linspace(0,4*np.pi,100)\nnewf = sin_approx(newx)",
"Plot the original data points, along with the approximate interpolated values. It is quite amazing to see how the interpolation has done a good job of reconstructing the actual function with relatively few points.",
"plt.plot(x, f, marker='o', linestyle='', label='original data')\nplt.plot(newx, newf, marker='.', label='interpolated');\nplt.legend();\nplt.xlabel('x')\nplt.ylabel('f(x)');",
"Let's look at the absolute error between the actual function and the approximate interpolated function:",
"plt.plot(newx, np.abs(np.sin(newx)-sin_approx(newx)))\nplt.xlabel('x')\nplt.ylabel('Absolute error');",
"1d non-regular data\nIt is also possible to use interp1d when the x data is not regularly spaced. To show this, let's repeat the above analysis with randomly distributed data in the range $[0,4\\pi]$. Everything else is the same.",
"x = 4*np.pi*np.random.rand(15)\nf = np.sin(x)\n\nsin_approx = interp1d(x, f, kind='cubic')\n\n# We have to be careful about not interpolating outside the range\nnewx = np.linspace(np.min(x), np.max(x),100)\nnewf = sin_approx(newx)\n\nplt.plot(x, f, marker='o', linestyle='', label='original data')\nplt.plot(newx, newf, marker='.', label='interpolated');\nplt.legend();\nplt.xlabel('x')\nplt.ylabel('f(x)');\n\nplt.plot(newx, np.abs(np.sin(newx)-sin_approx(newx)))\nplt.xlabel('x')\nplt.ylabel('Absolute error');",
"Notice how the absolute error is larger in the intervals where there are no points.\n2d structured\nFor the 2d case we want to construct a scalar function of two variables, given\n$$ {x_i, y_i, f_i} \\rightarrow f(x,y) $$\nFor now, we will assume that the points ${x_i,y_i}$ are on a structured grid of points. This case is covered by the interp2d function:",
"from scipy.interpolate import interp2d",
"Here is the actual function we will use the generate our original dataset:",
"def wave2d(x, y):\n return np.sin(2*np.pi*x)*np.sin(3*np.pi*y)",
"Build 1d arrays to use as the structured grid:",
"x = np.linspace(0.0, 1.0, 10)\ny = np.linspace(0.0, 1.0, 10)",
"Build 2d arrays to use in computing the function on the grid points:",
"X, Y = np.meshgrid(x, y)\nZ = wave2d(X, Y)\n\nX",
"Here is a scatter plot of the points overlayed with the value of the function at those points:",
"plt.pcolor(X, Y, Z)\nplt.colorbar();\nplt.scatter(X, Y);\nplt.xlim(0,1)\nplt.ylim(0,1)\nplt.xlabel('x')\nplt.ylabel('y');",
"You can see in this plot that the function is not smooth as we don't have its value on a fine grid.\nNow let's compute the interpolated function using interp2d. Notice how we are passing 2d arrays to this function:",
"wave2d_approx = interp2d(X, Y, Z, kind='cubic')",
"Compute the interpolated function on a fine grid:",
"xnew = np.linspace(0.0, 1.0, 40)\nynew = np.linspace(0.0, 1.0, 40)\nXnew, Ynew = np.meshgrid(xnew, ynew) # We will use these in the scatter plot below\nFnew = wave2d_approx(xnew, ynew) # The interpolating function automatically creates the meshgrid!\n\nFnew.shape",
"Plot the original course grid of points, along with the interpolated function values on a fine grid:",
"plt.pcolor(xnew, ynew, Fnew);\nplt.colorbar();\nplt.scatter(X, Y, label='original points')\nplt.scatter(Xnew, Ynew, marker='.', color='green', label='interpolated points')\nplt.xlim(0,1)\nplt.ylim(0,1)\nplt.xlabel('x')\nplt.ylabel('y');\nplt.legend(bbox_to_anchor=(1.2, 1), loc=2, borderaxespad=0.);",
"Notice how the interpolated values (green points) are now smooth and continuous. The amazing thing is that the interpolation algorithm doesn't know anything about the actual function. It creates this nice approximation using only the original course grid (blue points).\n2d unstructured\nIt is also possible to perform interpolation when the original data is not on a regular grid. For this, we will use the griddata function:",
"from scipy.interpolate import griddata",
"There is an important difference between griddata and the interp1d/interp2d:\n\ninterp1d and interp2d return callable Python objects (functions).\ngriddata returns the interpolated function evaluated on a finer grid.\n\nThis means that you have to pass griddata an array that has the finer grid points to be used. Here is the course unstructured grid we will use:",
"x = np.random.rand(100)\ny = np.random.rand(100)",
"Notice how we pass these 1d arrays to our function and don't use meshgrid:",
"f = wave2d(x, y)",
"It is clear that our grid is very unstructured:",
"plt.scatter(x, y);\nplt.xlim(0,1)\nplt.ylim(0,1)\nplt.xlabel('x')\nplt.ylabel('y');",
"To use griddata we need to compute the final (strcutured) grid we want to compute the interpolated function on:",
"xnew = np.linspace(x.min(), x.max(), 40)\nynew = np.linspace(y.min(), y.max(), 40)\nXnew, Ynew = np.meshgrid(xnew, ynew)\n\nXnew.shape, Ynew.shape\n\nFnew = griddata((x,y), f, (Xnew, Ynew), method='cubic', fill_value=0.0)\n\nFnew.shape\n\nplt.pcolor(Xnew, Ynew, Fnew, label=\"points\")\nplt.colorbar()\nplt.scatter(x, y, label='original points')\nplt.scatter(Xnew, Ynew, marker='.', color='green', label='interpolated points')\nplt.xlim(0,1)\nplt.ylim(0,1)\nplt.xlabel('x')\nplt.ylabel('y');\nplt.legend(bbox_to_anchor=(1.2, 1), loc=2, borderaxespad=0.);",
"Notice how the interpolated function is smooth in the interior regions where the original data is defined. However, outside those points, the interpolated function is missing (it returns nan)."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
BinRoot/TensorFlow-Book
|
ch09_cnn/Concept03_cnn.ipynb
|
mit
|
[
"Ch 09: Concept 03\nConvolution Neural Network\nLoad data from CIFAR-10.",
"import numpy as np\nimport matplotlib.pyplot as plt\nimport cifar_tools\nimport tensorflow as tf\n\nlearning_rate = 0.001\n\nnames, data, labels = \\\n cifar_tools.read_data('./cifar-10-batches-py')\n\n",
"Define the placeholders and variables for the CNN model:",
"x = tf.placeholder(tf.float32, [None, 24 * 24])\ny = tf.placeholder(tf.float32, [None, len(names)])\nW1 = tf.Variable(tf.random_normal([5, 5, 1, 64]))\nb1 = tf.Variable(tf.random_normal([64]))\nW2 = tf.Variable(tf.random_normal([5, 5, 64, 64]))\nb2 = tf.Variable(tf.random_normal([64]))\nW3 = tf.Variable(tf.random_normal([6*6*64, 1024]))\nb3 = tf.Variable(tf.random_normal([1024]))\nW_out = tf.Variable(tf.random_normal([1024, len(names)]))\nb_out = tf.Variable(tf.random_normal([len(names)]))",
"Define helper functions for the convolution and maxpool layers:",
"def conv_layer(x, W, b):\n conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n conv_with_b = tf.nn.bias_add(conv, b)\n conv_out = tf.nn.relu(conv_with_b)\n return conv_out\n\n\ndef maxpool_layer(conv, k=2):\n return tf.nn.max_pool(conv, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')",
"The CNN model is defined all within the following method:",
"def model():\n x_reshaped = tf.reshape(x, shape=[-1, 24, 24, 1])\n\n conv_out1 = conv_layer(x_reshaped, W1, b1)\n maxpool_out1 = maxpool_layer(conv_out1)\n norm1 = tf.nn.lrn(maxpool_out1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)\n conv_out2 = conv_layer(norm1, W2, b2)\n norm2 = tf.nn.lrn(conv_out2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)\n maxpool_out2 = maxpool_layer(norm2)\n\n maxpool_reshaped = tf.reshape(maxpool_out2, [-1, W3.get_shape().as_list()[0]])\n local = tf.add(tf.matmul(maxpool_reshaped, W3), b3)\n local_out = tf.nn.relu(local)\n\n out = tf.add(tf.matmul(local_out, W_out), b_out)\n return out",
"Here's the cost function to train the classifier.",
"model_op = model()\n\ncost = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(logits=model_op, labels=y)\n)\ntrain_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\ncorrect_pred = tf.equal(tf.argmax(model_op, 1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))",
"Let's train the classifier on our data:",
"with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n onehot_labels = tf.one_hot(labels, len(names), on_value=1., off_value=0., axis=-1)\n onehot_vals = sess.run(onehot_labels)\n batch_size = len(data) // 200\n print('batch size', batch_size)\n for j in range(0, 1000):\n avg_accuracy_val = 0.\n batch_count = 0.\n for i in range(0, len(data), batch_size):\n batch_data = data[i:i+batch_size, :]\n batch_onehot_vals = onehot_vals[i:i+batch_size, :]\n _, accuracy_val = sess.run([train_op, accuracy], feed_dict={x: batch_data, y: batch_onehot_vals})\n avg_accuracy_val += accuracy_val\n batch_count += 1.\n avg_accuracy_val /= batch_count\n print('Epoch {}. Avg accuracy {}'.format(j, avg_accuracy_val))\n"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
diegocavalca/Studies
|
deep-learnining-specialization/2. improving deep neural networks/week1/Initialization.ipynb
|
cc0-1.0
|
[
"Initialization\nWelcome to the first assignment of \"Improving Deep Neural Networks\". \nTraining your neural network requires specifying an initial value of the weights. A well chosen initialization method will help learning. \nIf you completed the previous course of this specialization, you probably followed our instructions for weight initialization, and it has worked out so far. But how do you choose the initialization for a new neural network? In this notebook, you will see how different initializations lead to different results. \nA well chosen initialization can:\n- Speed up the convergence of gradient descent\n- Increase the odds of gradient descent converging to a lower training (and generalization) error \nTo get started, run the following cell to load the packages and the planar dataset you will try to classify.",
"import numpy as np\nimport matplotlib.pyplot as plt\nimport sklearn\nimport sklearn.datasets\nfrom init_utils import sigmoid, relu, compute_loss, forward_propagation, backward_propagation\nfrom init_utils import update_parameters, predict, load_dataset, plot_decision_boundary, predict_dec\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n# load image dataset: blue/red dots in circles\ntrain_X, train_Y, test_X, test_Y = load_dataset()",
"You would like a classifier to separate the blue dots from the red dots.\n1 - Neural Network model\nYou will use a 3-layer neural network (already implemented for you). Here are the initialization methods you will experiment with:\n- Zeros initialization -- setting initialization = \"zeros\" in the input argument.\n- Random initialization -- setting initialization = \"random\" in the input argument. This initializes the weights to large random values.\n- He initialization -- setting initialization = \"he\" in the input argument. This initializes the weights to random values scaled according to a paper by He et al., 2015. \nInstructions: Please quickly read over the code below, and run it. In the next part you will implement the three initialization methods that this model() calls.",
"def model(X, Y, learning_rate = 0.01, num_iterations = 15000, print_cost = True, initialization = \"he\"):\n \"\"\"\n Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.\n \n Arguments:\n X -- input data, of shape (2, number of examples)\n Y -- true \"label\" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)\n learning_rate -- learning rate for gradient descent \n num_iterations -- number of iterations to run gradient descent\n print_cost -- if True, print the cost every 1000 iterations\n initialization -- flag to choose which initialization to use (\"zeros\",\"random\" or \"he\")\n \n Returns:\n parameters -- parameters learnt by the model\n \"\"\"\n \n grads = {}\n costs = [] # to keep track of the loss\n m = X.shape[1] # number of examples\n layers_dims = [X.shape[0], 10, 5, 1]\n \n # Initialize parameters dictionary.\n if initialization == \"zeros\":\n parameters = initialize_parameters_zeros(layers_dims)\n elif initialization == \"random\":\n parameters = initialize_parameters_random(layers_dims)\n elif initialization == \"he\":\n parameters = initialize_parameters_he(layers_dims)\n\n # Loop (gradient descent)\n\n for i in range(0, num_iterations):\n\n # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.\n a3, cache = forward_propagation(X, parameters)\n \n # Loss\n cost = compute_loss(a3, Y)\n\n # Backward propagation.\n grads = backward_propagation(X, Y, cache)\n \n # Update parameters.\n parameters = update_parameters(parameters, grads, learning_rate)\n \n # Print the loss every 1000 iterations\n if print_cost and i % 1000 == 0:\n print(\"Cost after iteration {}: {}\".format(i, cost))\n costs.append(cost)\n \n # plot the loss\n plt.plot(costs)\n plt.ylabel('cost')\n plt.xlabel('iterations (per hundreds)')\n plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n \n return parameters",
"2 - Zero initialization\nThere are two types of parameters to initialize in a neural network:\n- the weight matrices $(W^{[1]}, W^{[2]}, W^{[3]}, ..., W^{[L-1]}, W^{[L]})$\n- the bias vectors $(b^{[1]}, b^{[2]}, b^{[3]}, ..., b^{[L-1]}, b^{[L]})$\nExercise: Implement the following function to initialize all parameters to zeros. You'll see later that this does not work well since it fails to \"break symmetry\", but lets try it anyway and see what happens. Use np.zeros((..,..)) with the correct shapes.",
"# GRADED FUNCTION: initialize_parameters_zeros \n\ndef initialize_parameters_zeros(layers_dims):\n \"\"\"\n Arguments:\n layer_dims -- python array (list) containing the size of each layer.\n \n Returns:\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", ..., \"WL\", \"bL\":\n W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])\n b1 -- bias vector of shape (layers_dims[1], 1)\n ...\n WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])\n bL -- bias vector of shape (layers_dims[L], 1)\n \"\"\"\n \n parameters = {}\n L = len(layers_dims) # number of layers in the network\n \n for l in range(1, L):\n ### START CODE HERE ### (≈ 2 lines of code)\n parameters['W' + str(l)] = np.zeros((layers_dims[l], layers_dims[l-1])) \n parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))\n ### END CODE HERE ###\n return parameters\n\nparameters = initialize_parameters_zeros([3,2,1])\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))",
"Expected Output:\n<table> \n <tr>\n <td>\n **W1**\n </td>\n <td>\n [[ 0. 0. 0.]\n [ 0. 0. 0.]]\n </td>\n </tr>\n <tr>\n <td>\n **b1**\n </td>\n <td>\n [[ 0.]\n [ 0.]]\n </td>\n </tr>\n <tr>\n <td>\n **W2**\n </td>\n <td>\n [[ 0. 0.]]\n </td>\n </tr>\n <tr>\n <td>\n **b2**\n </td>\n <td>\n [[ 0.]]\n </td>\n </tr>\n\n</table>\n\nRun the following code to train your model on 15,000 iterations using zeros initialization.",
"parameters = model(train_X, train_Y, initialization = \"zeros\")\nprint (\"On the train set:\")\npredictions_train = predict(train_X, train_Y, parameters)\nprint (\"On the test set:\")\npredictions_test = predict(test_X, test_Y, parameters)",
"The performance is really bad, and the cost does not really decrease, and the algorithm performs no better than random guessing. Why? Lets look at the details of the predictions and the decision boundary:",
"print (\"predictions_train = \" + str(predictions_train))\nprint (\"predictions_test = \" + str(predictions_test))\n\nplt.title(\"Model with Zeros initialization\")\naxes = plt.gca()\naxes.set_xlim([-1.5,1.5])\naxes.set_ylim([-1.5,1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)",
"The model is predicting 0 for every example. \nIn general, initializing all the weights to zero results in the network failing to break symmetry. This means that every neuron in each layer will learn the same thing, and you might as well be training a neural network with $n^{[l]}=1$ for every layer, and the network is no more powerful than a linear classifier such as logistic regression. \n<font color='blue'>\nWhat you should remember:\n- The weights $W^{[l]}$ should be initialized randomly to break symmetry. \n- It is however okay to initialize the biases $b^{[l]}$ to zeros. Symmetry is still broken so long as $W^{[l]}$ is initialized randomly. \n3 - Random initialization\nTo break symmetry, lets intialize the weights randomly. Following random initialization, each neuron can then proceed to learn a different function of its inputs. In this exercise, you will see what happens if the weights are intialized randomly, but to very large values. \nExercise: Implement the following function to initialize your weights to large random values (scaled by *10) and your biases to zeros. Use np.random.randn(..,..) * 10 for weights and np.zeros((.., ..)) for biases. We are using a fixed np.random.seed(..) to make sure your \"random\" weights match ours, so don't worry if running several times your code gives you always the same initial values for the parameters.",
"# GRADED FUNCTION: initialize_parameters_random\n\ndef initialize_parameters_random(layers_dims):\n \"\"\"\n Arguments:\n layer_dims -- python array (list) containing the size of each layer.\n \n Returns:\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", ..., \"WL\", \"bL\":\n W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])\n b1 -- bias vector of shape (layers_dims[1], 1)\n ...\n WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])\n bL -- bias vector of shape (layers_dims[L], 1)\n \"\"\"\n \n np.random.seed(3) # This seed makes sure your \"random\" numbers will be the as ours\n parameters = {}\n L = len(layers_dims) # integer representing the number of layers\n \n for l in range(1, L):\n ### START CODE HERE ### (≈ 2 lines of code)\n parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l-1]) * 10\n parameters['b' + str(l)] = np.zeros((layers_dims[l],1))\n ### END CODE HERE ###\n\n return parameters\n\nparameters = initialize_parameters_random([3, 2, 1])\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))",
"Expected Output:\n<table> \n <tr>\n <td>\n **W1**\n </td>\n <td>\n [[ 17.88628473 4.36509851 0.96497468]\n [-18.63492703 -2.77388203 -3.54758979]]\n </td>\n </tr>\n <tr>\n <td>\n **b1**\n </td>\n <td>\n [[ 0.]\n [ 0.]]\n </td>\n </tr>\n <tr>\n <td>\n **W2**\n </td>\n <td>\n [[-0.82741481 -6.27000677]]\n </td>\n </tr>\n <tr>\n <td>\n **b2**\n </td>\n <td>\n [[ 0.]]\n </td>\n </tr>\n\n</table>\n\nRun the following code to train your model on 15,000 iterations using random initialization.",
"parameters = model(train_X, train_Y, initialization = \"random\")\nprint (\"On the train set:\")\npredictions_train = predict(train_X, train_Y, parameters)\nprint (\"On the test set:\")\npredictions_test = predict(test_X, test_Y, parameters)",
"If you see \"inf\" as the cost after the iteration 0, this is because of numerical roundoff; a more numerically sophisticated implementation would fix this. But this isn't worth worrying about for our purposes. \nAnyway, it looks like you have broken symmetry, and this gives better results. than before. The model is no longer outputting all 0s.",
"print (predictions_train)\nprint (predictions_test)\n\nplt.title(\"Model with large random initialization\")\naxes = plt.gca()\naxes.set_xlim([-1.5,1.5])\naxes.set_ylim([-1.5,1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)",
"Observations:\n- The cost starts very high. This is because with large random-valued weights, the last activation (sigmoid) outputs results that are very close to 0 or 1 for some examples, and when it gets that example wrong it incurs a very high loss for that example. Indeed, when $\\log(a^{[3]}) = \\log(0)$, the loss goes to infinity.\n- Poor initialization can lead to vanishing/exploding gradients, which also slows down the optimization algorithm. \n- If you train this network longer you will see better results, but initializing with overly large random numbers slows down the optimization.\n<font color='blue'>\nIn summary:\n- Initializing weights to very large random values does not work well. \n- Hopefully intializing with small random values does better. The important question is: how small should be these random values be? Lets find out in the next part! \n4 - He initialization\nFinally, try \"He Initialization\"; this is named for the first author of He et al., 2015. (If you have heard of \"Xavier initialization\", this is similar except Xavier initialization uses a scaling factor for the weights $W^{[l]}$ of sqrt(1./layers_dims[l-1]) where He initialization would use sqrt(2./layers_dims[l-1]).)\nExercise: Implement the following function to initialize your parameters with He initialization.\nHint: This function is similar to the previous initialize_parameters_random(...). The only difference is that instead of multiplying np.random.randn(..,..) by 10, you will multiply it by $\\sqrt{\\frac{2}{\\text{dimension of the previous layer}}}$, which is what He initialization recommends for layers with a ReLU activation.",
"# GRADED FUNCTION: initialize_parameters_he\n\ndef initialize_parameters_he(layers_dims):\n \"\"\"\n Arguments:\n layer_dims -- python array (list) containing the size of each layer.\n \n Returns:\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", ..., \"WL\", \"bL\":\n W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])\n b1 -- bias vector of shape (layers_dims[1], 1)\n ...\n WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])\n bL -- bias vector of shape (layers_dims[L], 1)\n \"\"\"\n \n np.random.seed(3)\n parameters = {}\n L = len(layers_dims) - 1 # integer representing the number of layers\n \n for l in range(1, L + 1):\n ### START CODE HERE ### (≈ 2 lines of code)\n parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l-1]) * np.sqrt(2/layers_dims[l-1])\n parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))\n ### END CODE HERE ###\n \n return parameters\n\nparameters = initialize_parameters_he([2, 4, 1])\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))",
"Expected Output:\n<table> \n <tr>\n <td>\n **W1**\n </td>\n <td>\n [[ 1.78862847 0.43650985]\n [ 0.09649747 -1.8634927 ]\n [-0.2773882 -0.35475898]\n [-0.08274148 -0.62700068]]\n </td>\n </tr>\n <tr>\n <td>\n **b1**\n </td>\n <td>\n [[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]]\n </td>\n </tr>\n <tr>\n <td>\n **W2**\n </td>\n <td>\n [[-0.03098412 -0.33744411 -0.92904268 0.62552248]]\n </td>\n </tr>\n <tr>\n <td>\n **b2**\n </td>\n <td>\n [[ 0.]]\n </td>\n </tr>\n\n</table>\n\nRun the following code to train your model on 15,000 iterations using He initialization.",
"parameters = model(train_X, train_Y, initialization = \"he\")\nprint (\"On the train set:\")\npredictions_train = predict(train_X, train_Y, parameters)\nprint (\"On the test set:\")\npredictions_test = predict(test_X, test_Y, parameters)\n\nplt.title(\"Model with He initialization\")\naxes = plt.gca()\naxes.set_xlim([-1.5,1.5])\naxes.set_ylim([-1.5,1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)",
"Observations:\n- The model with He initialization separates the blue and the red dots very well in a small number of iterations.\n5 - Conclusions\nYou have seen three different types of initializations. For the same number of iterations and same hyperparameters the comparison is:\n<table> \n <tr>\n <td>\n **Model**\n </td>\n <td>\n **Train accuracy**\n </td>\n <td>\n **Problem/Comment**\n </td>\n\n </tr>\n <td>\n 3-layer NN with zeros initialization\n </td>\n <td>\n 50%\n </td>\n <td>\n fails to break symmetry\n </td>\n <tr>\n <td>\n 3-layer NN with large random initialization\n </td>\n <td>\n 83%\n </td>\n <td>\n too large weights \n </td>\n </tr>\n <tr>\n <td>\n 3-layer NN with He initialization\n </td>\n <td>\n 99%\n </td>\n <td>\n recommended method\n </td>\n </tr>\n</table>\n\n<font color='blue'>\nWhat you should remember from this notebook:\n- Different initializations lead to different results\n- Random initialization is used to break symmetry and make sure different hidden units can learn different things\n- Don't intialize to values that are too large\n- He initialization works well for networks with ReLU activations."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
kubeflow/kfp-tekton-backend
|
components/gcp/dataproc/submit_sparksql_job/sample.ipynb
|
apache-2.0
|
[
"Name\nData preparation using SparkSQL on YARN with Cloud Dataproc\nLabel\nCloud Dataproc, GCP, Cloud Storage, YARN, SparkSQL, Kubeflow, pipelines, components \nSummary\nA Kubeflow Pipeline component to prepare data by submitting a SparkSql job on YARN to Cloud Dataproc.\nDetails\nIntended use\nUse the component to run an Apache SparkSql job as one preprocessing step in a Kubeflow Pipeline.\nRuntime arguments\nArgument| Description | Optional | Data type| Accepted values| Default |\n:--- | :---------- | :--- | :------- | :------ | :------\nproject_id | The ID of the Google Cloud Platform (GCP) project that the cluster belongs to. | No| GCPProjectID | | |\nregion | The Cloud Dataproc region to handle the request. | No | GCPRegion|\ncluster_name | The name of the cluster to run the job. | No | String| | |\nqueries | The queries to execute the SparkSQL job. Specify multiple queries in one string by separating them with semicolons. You do not need to terminate queries with semicolons. | Yes | List | | None | \nquery_file_uri | The HCFS URI of the script that contains the SparkSQL queries.| Yes | GCSPath | | None |\nscript_variables | Mapping of the query’s variable names to their values (equivalent to the SparkSQL command: SET name=\"value\";).| Yes| Dict | | None |\nsparksql_job | The payload of a SparkSqlJob. | Yes | Dict | | None |\njob | The payload of a Dataproc job. | Yes | Dict | | None |\nwait_interval | The number of seconds to pause between polling the operation. | Yes |Integer | | 30 |\nOutput\nName | Description | Type\n:--- | :---------- | :---\njob_id | The ID of the created job. | String\nCautions & requirements\nTo use the component, you must:\n* Set up a GCP project by following this guide.\n* Create a new cluster.\n* The component can authenticate to GCP. Refer to Authenticating Pipelines to GCP for details.\n* Grant the Kubeflow user service account the role roles/dataproc.editor on the project.\nDetailed Description\nThis component creates a Pig job from Dataproc submit job REST API.\nFollow these steps to use the component in a pipeline:\n1. Install the Kubeflow Pipeline SDK:",
"%%capture --no-stderr\n\nKFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz'\n!pip3 install $KFP_PACKAGE --upgrade",
"Load the component using KFP SDK",
"import kfp.components as comp\n\ndataproc_submit_sparksql_job_op = comp.load_component_from_url(\n 'https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataproc/submit_sparksql_job/component.yaml')\nhelp(dataproc_submit_sparksql_job_op)",
"Sample\nNote: The following sample code works in an IPython notebook or directly in Python code. See the sample code below to learn how to execute the template.\nSetup a Dataproc cluster\nCreate a new Dataproc cluster (or reuse an existing one) before running the sample code.\nPrepare a SparkSQL job\nEither put your SparkSQL queries in the queires list, or upload your SparkSQL queries into a file to a Cloud Storage bucket and then enter the Cloud Storage bucket’s path in query_file_uri. In this sample, we will use a hard coded query in the queries list to select data from a public CSV file from Cloud Storage.\nFor more details about Spark SQL, see Spark SQL, DataFrames and Datasets Guide\nSet sample parameters",
"PROJECT_ID = '<Please put your project ID here>'\nCLUSTER_NAME = '<Please put your existing cluster name here>'\nREGION = 'us-central1'\nQUERY = '''\nDROP TABLE IF EXISTS natality_csv;\nCREATE EXTERNAL TABLE natality_csv (\n source_year BIGINT, year BIGINT, month BIGINT, day BIGINT, wday BIGINT,\n state STRING, is_male BOOLEAN, child_race BIGINT, weight_pounds FLOAT,\n plurality BIGINT, apgar_1min BIGINT, apgar_5min BIGINT,\n mother_residence_state STRING, mother_race BIGINT, mother_age BIGINT,\n gestation_weeks BIGINT, lmp STRING, mother_married BOOLEAN,\n mother_birth_state STRING, cigarette_use BOOLEAN, cigarettes_per_day BIGINT,\n alcohol_use BOOLEAN, drinks_per_week BIGINT, weight_gain_pounds BIGINT,\n born_alive_alive BIGINT, born_alive_dead BIGINT, born_dead BIGINT,\n ever_born BIGINT, father_race BIGINT, father_age BIGINT,\n record_weight BIGINT\n)\nROW FORMAT DELIMITED FIELDS TERMINATED BY ','\nLOCATION 'gs://public-datasets/natality/csv';\n\nSELECT * FROM natality_csv LIMIT 10;'''\nEXPERIMENT_NAME = 'Dataproc - Submit SparkSQL Job'",
"Example pipeline that uses the component",
"import kfp.dsl as dsl\nimport json\n@dsl.pipeline(\n name='Dataproc submit SparkSQL job pipeline',\n description='Dataproc submit SparkSQL job pipeline'\n)\ndef dataproc_submit_sparksql_job_pipeline(\n project_id = PROJECT_ID, \n region = REGION,\n cluster_name = CLUSTER_NAME,\n queries = json.dumps([QUERY]),\n query_file_uri = '',\n script_variables = '', \n sparksql_job='', \n job='', \n wait_interval='30'\n):\n dataproc_submit_sparksql_job_op(\n project_id=project_id, \n region=region, \n cluster_name=cluster_name, \n queries=queries, \n query_file_uri=query_file_uri,\n script_variables=script_variables, \n sparksql_job=sparksql_job, \n job=job, \n wait_interval=wait_interval)\n ",
"Compile the pipeline",
"pipeline_func = dataproc_submit_sparksql_job_pipeline\npipeline_filename = pipeline_func.__name__ + '.zip'\nimport kfp.compiler as compiler\ncompiler.Compiler().compile(pipeline_func, pipeline_filename)",
"Submit the pipeline for execution",
"#Specify pipeline argument values\narguments = {}\n\n#Get or create an experiment and submit a pipeline run\nimport kfp\nclient = kfp.Client()\nexperiment = client.create_experiment(EXPERIMENT_NAME)\n\n#Submit a pipeline run\nrun_name = pipeline_func.__name__ + ' run'\nrun_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)",
"References\n\nSpark SQL, DataFrames and Datasets Guide\nSparkSqlJob\nCloud Dataproc job\n\nLicense\nBy deploying or using this software you agree to comply with the AI Hub Terms of Service and the Google APIs Terms of Service. To the extent of a direct conflict of terms, the AI Hub Terms of Service will control."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
dmolina/es_intro_python
|
03-example_iris.ipynb
|
gpl-3.0
|
[
"Getting started in scikit-learn with the famous iris dataset\nFrom the video series: Introduction to machine learning with scikit-learn\nAgenda\n\nWhat is the famous iris dataset, and how does it relate to machine learning?\nHow do we load the iris dataset into scikit-learn?\nHow do we describe a dataset using machine learning terminology?\nWhat are scikit-learn's four key requirements for working with data?\n\nIntroducing the iris dataset\n\n\n50 samples of 3 different species of iris (150 samples total)\nMeasurements: sepal length, sepal width, petal length, petal width",
"from IPython.display import IFrame\nIFrame('http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', width=300, height=200)",
"Machine learning on the iris dataset\n\nFramed as a supervised learning problem: Predict the species of an iris using the measurements\nFamous dataset for machine learning because prediction is easy\nLearn more about the iris dataset: UCI Machine Learning Repository\n\nLoading the iris dataset into scikit-learn",
"# import load_iris function from datasets module\nfrom sklearn.datasets import load_iris\n\n# save \"bunch\" object containing iris dataset and its attributes\niris = load_iris()\ntype(iris)\n\n# print the iris data\nprint(iris.data)",
"Machine learning terminology\n\nEach row is an observation (also known as: sample, example, instance, record)\nEach column is a feature (also known as: predictor, attribute, independent variable, input, regressor, covariate)",
"# print the names of the four features\nprint(iris.feature_names)\n\n# print integers representing the species of each observation\nprint(iris.target)\n\n# print the encoding scheme for species: 0 = setosa, 1 = versicolor, 2 = virginica\nprint(iris.target_names)",
"Each value we are predicting is the response (also known as: target, outcome, label, dependent variable)\nClassification is supervised learning in which the response is categorical\nRegression is supervised learning in which the response is ordered and continuous\n\nRequirements for working with data in scikit-learn\n\nFeatures and response are separate objects\nFeatures and response should be numeric\nFeatures and response should be NumPy arrays\nFeatures and response should have specific shapes",
"# check the types of the features and response\nprint(type(iris.data))\nprint(type(iris.target))\n\n# check the shape of the features (first dimension = number of observations, second dimensions = number of features)\nprint(iris.data.shape)\n\n# check the shape of the response (single dimension matching the number of observations)\nprint(iris.target.shape)\n\n# store feature matrix in \"X\"\nX = iris.data\n\n# store response vector in \"y\"\ny = iris.target",
"Resources\n\nscikit-learn documentation: Dataset loading utilities\nJake VanderPlas: Fast Numerical Computing with NumPy (slides, video)\nScott Shell: An Introduction to NumPy (PDF)\n\nComments or Questions?\n\nEmail: kevin@dataschool.io\nWebsite: http://dataschool.io\nTwitter: @justmarkham"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
philippgrafendorfe/stackedautoencoders
|
IONOSPHERE_Autoencoder.ipynb
|
mit
|
[
"from keras.layers import Input, Dense, Dropout, Concatenate, Add\nfrom keras.models import Model\nfrom keras.datasets import mnist\nfrom keras.models import Sequential, load_model\nfrom keras.optimizers import RMSprop\nfrom keras.callbacks import TensorBoard\nfrom __future__ import print_function\nfrom keras.utils import plot_model\nfrom IPython.display import SVG\nfrom keras.utils.vis_utils import model_to_dot\nfrom sklearn import preprocessing\nfrom keras import layers\nfrom keras import initializers\nfrom matplotlib import axes\nfrom matplotlib import rc\n\nimport keras\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport pydot\nimport graphviz\nimport pandas as pd\nimport IPython\n\n%matplotlib inline\nfont = {'family' : 'monospace',\n 'weight' : 'bold',\n 'size' : 20}\n\nrc('font', **font)\n\nseed=42",
"Data Set Information\nThis radar data was collected by a system in Goose Bay, Labrador. This system consists of a phased array of 16 high-frequency antennas with a total transmitted power on the order of 6.4 kilowatts. See the paper for more details. The targets were free electrons in the ionosphere. \"Good\" radar returns are those showing evidence of some type of structure in the ionosphere. \"Bad\" returns are those that do not; their signals pass through the ionosphere. \nReceived signals were processed using an autocorrelation function whose arguments are the time of a pulse and the pulse number. There were 17 pulse numbers for the Goose Bay system. Instances in this databse are described by 2 attributes per pulse number, corresponding to the complex values returned by the function resulting from the complex electromagnetic signal.\nAttribute Information\n\nAll 34 are continuous \nThe 35th attribute is either \"good\" or \"bad\" according to the definition summarized above. This is a binary classification task. \nhttps://archive.ics.uci.edu/ml/machine-learning-databases/ionosphere/ionosphere.names\n\nData Import and preprocessing",
"# data = np.genfromtxt(\"data/ionosphere.data\")\n\ndata = pd.read_csv('data/ionosphere.data', sep=\",\", header=None)\ndata.head()\n\ndata.describe()",
"This is a very small dataset.",
"df_tab = data\ndf_tab[34] = df_tab[34].astype('category')\ntab = pd.crosstab(index=df_tab[34], columns=\"frequency\")\ntab.index.name = 'Class/Direction'\ntab/tab.sum()\n\ndata.drop(data.columns[1], inplace=True, axis=1)\ndata[34] = [1 if e is \"g\" else 0 for e in data[34]]\n\n# sample the dataframe\ndata_train = data.sample(frac=0.9, random_state=seed)\ndata_valid = data.drop(data_train.index)\n\ndf_x_train = data_train.iloc[:,:-1]\ndf_x_train = df_x_train.transform(lambda x: (x - x.min()) / (x.max() - x.min()))\ndf_y_train = data_train.iloc[:,-1]\n\ndf_x_valid = data_valid.iloc[:,:-1]\ndf_x_valid = df_x_valid.transform(lambda x: (x - x.min()) / (x.max() - x.min()))\ndf_y_valid = data_valid.iloc[:,-1]\n\ndf_x_train.describe()\n\ndf_y_train.sum()/len(df_y_train)",
"About 63% of all observations are good.\npreprocessing",
"x_train = np.array(df_x_train.as_matrix())\ny_train = np.array(pd.DataFrame(df_y_train).as_matrix())\n\nx_val = np.array(df_x_valid.as_matrix())\ny_val = np.array(pd.DataFrame(df_y_valid).as_matrix())\ny_eval = y_val\n\ny_train = keras.utils.to_categorical(y_train, 2)\ny_val = keras.utils.to_categorical(y_val, 2)",
"Set Global Parameters",
"epochsize = 60\nbatchsize = 4\nshuffle = False\ndropout = 0.1\nnum_classes = 2\ninput_dim = x_train.shape[1]\nhidden1_dim = 40\nhidden2_dim = 40\n# weights = keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=42)",
"Train Classifier\nThe goal is to get a very accurate classifier.",
"input_data = Input(shape=(input_dim,), dtype='float32', name='main_input')\nhidden_layer1 = Dense(hidden1_dim\n , activation='relu'\n , input_shape=(input_dim,)\n# , kernel_initializer=weights\n )(input_data)\ndropout1 = Dropout(dropout)(hidden_layer1)\nhidden_layer2 = Dense(hidden2_dim\n , activation='relu'\n , input_shape=(input_dim,)\n# , kernel_initializer=weights\n )(dropout1)\ndropout2 = Dropout(dropout)(hidden_layer2)\noutput_layer = Dense(num_classes\n , activation='sigmoid'\n# , kernel_initializer=weights\n )(dropout2)\n\nmodel = Model(inputs=input_data, outputs=output_layer)\n\nmodel.compile(loss='binary_crossentropy',\n optimizer=RMSprop(),\n metrics=['accuracy'])\n\nplot_model(model, to_file='images/ionosphere_nn.png', show_shapes=True, show_layer_names=True)\n\nIPython.display.Image(\"images/ionosphere_nn.png\")\n\nmodel.fit(x_train, y_train, \n batch_size=batchsize,\n epochs=epochsize,\n verbose=0,\n shuffle=shuffle,\n validation_split=0.05)\nnn_score = model.evaluate(x_val, y_val)[1]\nprint(nn_score)\n\nfig = plt.figure(figsize=(20,10))\nplt.plot(model.history.history['val_acc'])\nplt.plot(model.history.history['acc'])\nplt.axhline(y=nn_score, c=\"red\")\nplt.text(0, nn_score, \"test: \" + str(round(nn_score, 4)), fontdict=font)\nplt.title('model accuracy for neural net with 2 hidden layers')\nplt.ylabel('accuracy')\nplt.xlabel('epochs')\nplt.legend(['valid', 'train'], loc='lower right')\nplt.show()\n\nimport itertools\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n# Compute confusion matrix\ncnf_matrix = confusion_matrix(y_eval, model.predict(x_val).argmax(axis=-1))\nnp.set_printoptions(precision=2)\n\n# Plot non-normalized confusion matrix\nplt.figure(figsize=(16,8))\nplot_confusion_matrix(cnf_matrix, classes=['bad', 'good'],\n title='Confusion matrix, without normalization')\n\n# Plot normalized confusion matrix\nplt.figure(figsize=(16,8))\nplot_confusion_matrix(cnf_matrix, classes=['bad', 'good'], normalize=True,\n title='Normalized confusion matrix')",
"Whats the best dimensionality reduction with single autoencoder?",
"# the initial coding dimension s.t. there is no dim reduction at the beginning\nencoding_dim = input_dim\nresult = {'encoding_dim': [], 'auto_classifier_acc': []}\n\nwhile encoding_dim > 0:\n main_input = Input(shape=(input_dim,), dtype='float32', name='main_input')\n\n encoding_layer = Dense(encoding_dim\n , activation='relu'\n , name='encoder'\n# , kernel_initializer='normal'\n )\n encoding_layer_output = encoding_layer(main_input)\n decoding_layer_output = Dense(input_dim, activation='sigmoid'\n ,name='decoder_output'\n# ,kernel_initializer='normal'\n )(encoding_layer_output)\n\n x = Dense(hidden1_dim\n , activation='relu'\n# , kernel_initializer=weights\n )(encoding_layer_output)\n x = Dropout(dropout)(x)\n x = Dense(hidden2_dim\n , activation='relu'\n# , kernel_initializer=weights\n )(x)\n x = Dropout(dropout)(x)\n\n classifier_output = Dense(num_classes\n , activation='sigmoid'\n , name='main_output'\n# , kernel_initializer=weights\n )(x)\n\n auto_classifier = Model(inputs=main_input, outputs=[classifier_output, decoding_layer_output])\n\n auto_classifier.compile(optimizer=RMSprop(),\n loss={'main_output': 'binary_crossentropy', 'decoder_output': 'mean_squared_error'},\n loss_weights={'main_output': .2, 'decoder_output': .8},\n metrics=['accuracy'])\n\n auto_classifier.fit({'main_input': x_train},\n {'main_output': y_train, 'decoder_output': x_train},\n epochs=epochsize, \n batch_size=batchsize,\n shuffle=shuffle,\n validation_split=0.05,\n verbose=0)\n\n accuracy = auto_classifier.evaluate(x=x_val, y=[y_val, x_val], verbose=1)[3]\n result['encoding_dim'].append(encoding_dim)\n result['auto_classifier_acc'].append(accuracy)\n \n encoding_dim -=1\n\nresult_df = pd.DataFrame(result)\nresult_df['neural_net_acc'] = nn_score\nresult_df.head()\n\nfig = plt.figure(figsize=(20,10))\nplt.bar(result_df['encoding_dim'], result_df['auto_classifier_acc'])\nplt.axhline(y=result_df['neural_net_acc'][0], c=\"red\")\nplt.text(0, result_df['neural_net_acc'][0], \"best neural net: \" + str(round(result_df['neural_net_acc'][0], 4))\n ,fontdict=font)\nplt.title('model accuracy for different encoding dimensions')\nplt.ylabel('accuracy')\nplt.xlabel('dimension')\nplt.ylim(0.6, 1)",
"The result is implausible. This might be due to a very small number ob observations.",
"result_df.to_csv('results/ionosphere_results.csv')"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.