repo_name
stringlengths 6
77
| path
stringlengths 8
215
| license
stringclasses 15
values | cells
list | types
list |
|---|---|---|---|---|
amirziai/learning
|
algorithms/Merge-Sort.ipynb
|
mit
|
[
"Merge Sort\nKnown to John von Neumann in 1945, 70+ years ago\nStep 0- Testing utilities\nTake a look at resources/utils.py if you're curious.",
"import random\nrandom.seed(0)\nfrom resources.utils import run_tests",
"Step 1- split\nGiven a list let's split it into two lists right down the middle",
"def split(input_list):\n \"\"\"\n Splits a list into two pieces\n :param input_list: list\n :return: left and right lists (list, list)\n \"\"\"\n input_list_len = len(input_list)\n midpoint = input_list_len // 2\n return input_list[:midpoint], input_list[midpoint:]\n\ntests_split = [\n ({'input_list': [1, 2, 3]}, ([1], [2, 3])),\n ({'input_list': [1, 2, 3, 4]}, ([1, 2], [3, 4])),\n ({'input_list': [1, 2, 3, 4, 5]}, ([1, 2], [3, 4, 5])),\n ({'input_list': [1]}, ([], [1])),\n ({'input_list': []}, ([], []))\n]\n\nrun_tests(tests_split, split)",
"Step 2- merge sorted lists\nGiven two sorted lists we should be able to \"merge\" them into a single list as a linear operation",
"def merge_sorted_lists(list_left, list_right):\n \"\"\"\n Merge two sorted lists\n This is a linear operation\n O(len(list_right) + len(list_right))\n :param left_list: list\n :param right_list: list\n :return merged list\n \"\"\"\n # Special case: one or both of lists are empty\n if len(list_left) == 0:\n return list_right\n elif len(list_right) == 0:\n return list_left\n \n # General case\n index_left = index_right = 0\n list_merged = [] # list to build and return\n list_len_target = len(list_left) + len(list_right)\n while len(list_merged) < list_len_target:\n if list_left[index_left] <= list_right[index_right]:\n # Value on the left list is smaller (or equal so it should be selected)\n list_merged.append(list_left[index_left])\n index_left += 1\n else:\n # Right value bigger\n list_merged.append(list_right[index_right])\n index_right += 1\n \n # If we are at the end of one of the lists we can take a shortcut\n if index_right == len(list_right):\n # Reached the end of right\n # Append the remainder of left and break\n list_merged += list_left[index_left:]\n break\n elif index_left == len(list_left):\n # Reached the end of left\n # Append the remainder of right and break\n list_merged += list_right[index_right:]\n break\n \n return list_merged\n\ntests_merged_sorted_lists = [\n ({'list_left': [1, 5], 'list_right': [3, 4]}, [1, 3, 4, 5]),\n ({'list_left': [5], 'list_right': [1]}, [1, 5]),\n ({'list_left': [], 'list_right': []}, []),\n ({'list_left': [1, 2, 3, 5], 'list_right': [4]}, [1, 2, 3, 4, 5]),\n ({'list_left': [1, 2, 3], 'list_right': []}, [1, 2, 3]),\n ({'list_left': [1], 'list_right': [1, 2, 3]}, [1, 1, 2, 3]),\n ({'list_left': [1, 1], 'list_right': [1, 1]}, [1, 1, 1, 1]),\n ({'list_left': [1, 1], 'list_right': [1, 2]}, [1, 1, 1, 2]),\n ({'list_left': [3, 3], 'list_right': [1, 4]}, [1, 3, 3, 4]),\n]\n\nrun_tests(tests_merged_sorted_lists, merge_sorted_lists)",
"Step 3- merge sort\n\nMerge sort only needs to utilize the previous 2 functions\nWe need to split the lists until they have a single element\nA list with a single element is sorted (duh)\nNow we can merge these single-element (or empty) lists",
"def merge_sort(input_list):\n if len(input_list) <= 1:\n return input_list\n else:\n left, right = split(input_list)\n # The following line is the most important piece in this whole thing\n return merge_sorted_lists(merge_sort(left), merge_sort(right))\n\nrandom_list = [random.randint(1, 1000) for _ in range(100)]\ntests_merge_sort = [\n ({'input_list': [1, 2]}, [1, 2]),\n ({'input_list': [2, 1]}, [1, 2]),\n ({'input_list': []}, []),\n ({'input_list': [1]}, [1]),\n ({'input_list': [5, 1, 1]}, [1, 1, 5]),\n ({'input_list': [9, 1, 10, 2]}, [1, 2, 9, 10]),\n ({'input_list': range(10)[::-1]}, list(range(10))),\n ({'input_list': random_list}, sorted(random_list))\n]\n\nrun_tests(tests_merge_sort, merge_sort)",
"Example walk through\nmerge_sort keeps splitting until we get to single-element lists. Once we're there (the base case of recursion) the callers can start applying merge_sorted_list. For the following example here's what's going on:\n- input_list=[9, 1, 10, 2]\n- left=[9, 1] and right=[10, 2]\n- merge_sort([9, 1]) is responsible for sorting [9, 1], let's call it L1.\n- merge_sort([10, 2]) is reponsible for sorting [10, 2], let's call it R1.\nFor L1:\n- left=[9] and right=[1]\n- merge_sort([9]) returns [9] since it's the base case and merge_sort([1]) returns [1]\n- merge_sorted_lists([9], [1]) returns [1, 9] which is sorted\nSame thing happens for R1 and the result is [2, 10]. Now merge_sorted_lists(L1, R1) returns the final answer.\n<img src=\"resources/mergesort.png\">"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
anilcs13m/MachineLearning_Mastering
|
Overfitting_Ridge_Lasso.ipynb
|
gpl-2.0
|
[
"Overfitting\nCreate a dataset based on a true sinusoidal relationship\nLet's look at a synthetic dataset consisting of 30 points drawn from the sinusoid $y = \\sin(4x)$:",
"import graphlab\nimport math\nimport random\nimport numpy\nfrom matplotlib import pyplot as plt\n%matplotlib inline",
"Create random values for x in interval [0,1)",
"random.seed(98103)\nn = 30\nx = graphlab.SArray([random.random() for i in range(n)]).sort()",
"Compute y",
"y = x.apply(lambda x: math.sin(4*x))",
"Add random Gaussian noise to y",
"random.seed(1)\ne = graphlab.SArray([random.gauss(0,1.0/3.0) for i in range(n)])\ny = y + e",
"Put data into an SFrame to manipulate later",
"data = graphlab.SFrame({'X1':x,'Y':y})\ndata",
"Create a function to plot the data, since we'll do it many times",
"def plot_data(data): \n plt.plot(data['X1'],data['Y'],'k.')\n plt.xlabel('x')\n plt.ylabel('y')\n\nplot_data(data)",
"Define some useful polynomial regression functions\nDefine a function to create our features for a polynomial regression model of any degree:",
"def polynomial_features(data, deg):\n data_copy=data.copy()\n for i in range(1,deg):\n data_copy['X'+str(i+1)]=data_copy['X'+str(i)]*data_copy['X1']\n return data_copy",
"Define a function to fit a polynomial linear regression model of degree \"deg\" to the data in \"data\":",
"def polynomial_regression(data, deg):\n model = graphlab.linear_regression.create(polynomial_features(data,deg), \n target='Y', l2_penalty=0.,l1_penalty=0.,\n validation_set=None,verbose=False)\n return model",
"Define function to plot data and predictions made, since we are going to use it many times.",
"def plot_poly_predictions(data, model):\n plot_data(data)\n\n # Get the degree of the polynomial\n deg = len(model.coefficients['value'])-1\n \n # Create 200 points in the x axis and compute the predicted value for each point\n x_pred = graphlab.SFrame({'X1':[i/200.0 for i in range(200)]})\n y_pred = model.predict(polynomial_features(x_pred,deg))\n \n # plot predictions\n plt.plot(x_pred['X1'], y_pred, 'g-', label='degree ' + str(deg) + ' fit')\n plt.legend(loc='upper left')\n plt.axis([0,1,-1.5,2])",
"Create a function that prints the polynomial coefficients in a pretty way :)",
"def print_coefficients(model): \n # Get the degree of the polynomial\n deg = len(model.coefficients['value'])-1\n\n # Get learned parameters as a list\n w = list(model.coefficients['value'])\n\n # Numpy has a nifty function to print out polynomials in a pretty way\n # (We'll use it, but it needs the parameters in the reverse order)\n print 'Learned polynomial for degree ' + str(deg) + ':'\n w.reverse()\n print numpy.poly1d(w)",
"Fit a degree-2 polynomial\nFit our degree-2 polynomial to the data generated above:",
"model = polynomial_regression(data, deg=2)",
"Inspect learned parameters",
"print_coefficients(model)",
"Form and plot our predictions along a grid of x values:",
"plot_poly_predictions(data,model)",
"Fit a degree-4 polynomial",
"model = polynomial_regression(data, deg=4)\nprint_coefficients(model)\nplot_poly_predictions(data,model)",
"Fit a degree-16 polynomial",
"model = polynomial_regression(data, deg=16)\nprint_coefficients(model)",
"Woah!!!! Those coefficients are crazy! On the order of 10^6.",
"plot_poly_predictions(data,model)",
"Above: Fit looks pretty wild, too. Here's a clear example of how overfitting is associated with very large magnitude estimated coefficients.\nRidge Regression\nRidge regression aims to avoid overfitting by adding a cost to the RSS term of standard least squares that depends on the 2-norm of the coefficients $\\|w\\|$. The result is penalizing fits with large coefficients. The strength of this penalty, and thus the fit vs. model complexity balance, is controled by a parameter lambda (here called \"L2_penalty\").\nDefine our function to solve the ridge objective for a polynomial regression model of any degree:",
"def polynomial_ridge_regression(data, deg, l2_penalty):\n model = graphlab.linear_regression.create(polynomial_features(data,deg), \n target='Y', l2_penalty=l2_penalty,\n validation_set=None,verbose=False)\n return model",
"Perform a ridge fit of a degree-16 polynomial using a very small penalty strength",
"model = polynomial_ridge_regression(data, deg=16, l2_penalty=1e-25)\nprint_coefficients(model)\n\nplot_poly_predictions(data,model)",
"Perform a ridge fit of a degree-16 polynomial using a very large penalty strength",
"model = polynomial_ridge_regression(data, deg=16, l2_penalty=100)\nprint_coefficients(model)\n\nplot_poly_predictions(data,model)",
"Let's look at fits for a sequence of increasing lambda values",
"for l2_penalty in [1e-25, 1e-10, 1e-6, 1e-3, 1e2]:\n model = polynomial_ridge_regression(data, deg=16, l2_penalty=l2_penalty)\n print 'lambda = %.2e' % l2_penalty\n print_coefficients(model)\n print '\\n'\n plt.figure()\n plot_poly_predictions(data,model)\n plt.title('Ridge, lambda = %.2e' % l2_penalty)",
"Perform a ridge fit of a degree-16 polynomial using a \"good\" penalty strength\nWe will learn about cross validation later in this course as a way to select a good value of the tuning parameter (penalty strength) lambda. Here, we consider \"leave one out\" (LOO) cross validation, which one can show approximates average mean square error (MSE). As a result, choosing lambda to minimize the LOO error is equivalent to choosing lambda to minimize an approximation to average MSE.",
"# LOO cross validation -- return the average MSE\ndef loo(data, deg, l2_penalty_values):\n # Create polynomial features\n polynomial_features(data, deg)\n \n # Create as many folds for cross validatation as number of data points\n num_folds = len(data)\n folds = graphlab.cross_validation.KFold(data,num_folds)\n \n # for each value of l2_penalty, fit a model for each fold and compute average MSE\n l2_penalty_mse = []\n min_mse = None\n best_l2_penalty = None\n for l2_penalty in l2_penalty_values:\n next_mse = 0.0\n for train_set, validation_set in folds:\n # train model\n model = graphlab.linear_regression.create(train_set,target='Y', \n l2_penalty=l2_penalty,\n validation_set=None,verbose=False)\n \n # predict on validation set \n y_test_predicted = model.predict(validation_set)\n # compute squared error\n next_mse += ((y_test_predicted-validation_set['Y'])**2).sum()\n \n # save squared error in list of MSE for each l2_penalty\n next_mse = next_mse/num_folds\n l2_penalty_mse.append(next_mse)\n if min_mse is None or next_mse < min_mse:\n min_mse = next_mse\n best_l2_penalty = l2_penalty\n \n return l2_penalty_mse,best_l2_penalty",
"Run LOO cross validation for \"num\" values of lambda, on a log scale",
"l2_penalty_values = numpy.logspace(-4, 10, num=10)\nl2_penalty_mse,best_l2_penalty = loo(data, 16, l2_penalty_values)",
"Plot results of estimating LOO for each value of lambda",
"plt.plot(l2_penalty_values,l2_penalty_mse,'k-')\nplt.xlabel('$\\L2_penalty$')\nplt.ylabel('LOO cross validation error')\nplt.xscale('log')\nplt.yscale('log')",
"Find the value of lambda, $\\lambda_{\\mathrm{CV}}$, that minimizes the LOO cross validation error, and plot resulting fit",
"best_l2_penalty\n\nmodel = polynomial_ridge_regression(data, deg=16, l2_penalty=best_l2_penalty)\nprint_coefficients(model)\n\nplot_poly_predictions(data,model)",
"Lasso Regression\nLasso regression jointly shrinks coefficients to avoid overfitting, and implicitly performs feature selection by setting some coefficients exactly to 0 for sufficiently large penalty strength lambda (here called \"L1_penalty\"). In particular, lasso takes the RSS term of standard least squares and adds a 1-norm cost of the coefficients $\\|w\\|$.\nDefine our function to solve the lasso objective for a polynomial regression model of any degree:",
"def polynomial_lasso_regression(data, deg, l1_penalty):\n model = graphlab.linear_regression.create(polynomial_features(data,deg), \n target='Y', l2_penalty=0.,\n l1_penalty=l1_penalty,\n validation_set=None, \n solver='fista', verbose=False,\n max_iterations=3000, convergence_threshold=1e-10)\n return model",
"Explore the lasso solution as a function of a few different penalty strengths\nWe refer to lambda in the lasso case below as \"l1_penalty\"",
"for l1_penalty in [0.0001, 0.01, 0.1, 10]:\n model = polynomial_lasso_regression(data, deg=16, l1_penalty=l1_penalty)\n print 'l1_penalty = %e' % l1_penalty\n print 'number of nonzeros = %d' % (model.coefficients['value']).nnz()\n print_coefficients(model)\n print '\\n'\n plt.figure()\n plot_poly_predictions(data,model)\n plt.title('LASSO, lambda = %.2e, # nonzeros = %d' % (l1_penalty, (model.coefficients['value']).nnz()))",
"Above: We see that as lambda increases, we get sparser and sparser solutions. However, even for our non-sparse case for lambda=0.0001, the fit of our high-order polynomial is not too wild. This is because, like in ridge, coefficients included in the lasso solution are shrunk relative to those of the least squares (unregularized) solution. This leads to better behavior even without sparsity. Of course, as lambda goes to 0, the amount of this shrinkage decreases and the lasso solution approaches the (wild) least squares solution."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
hyperion-rt/paper-2017-sed-models
|
notebook/using_the_models.ipynb
|
bsd-2-clause
|
[
"Using the Robitaille (2017) YSO SED models\nThis notebook demonstrates how to use the YSO SED models published in Robitaille (2017).\nFormat\nThe published models include a tar file for each set of models. The name of each\nmodel set is composed of several characters that indicate which component is present. The characters, in order, are:\n\ns (star)\np (passive disk)\np (power-law envelope) or u (Ulrich envelope)\nb (bipolar cavities)\nh (inner hole)\nm (ambient medium)\ni (interstellar dust).\n\nIf a component is absent, a hyphen (-) is given instead.\nEach tar file expands to give a directory with the same model set name. The format for each directory is described here. \nThe easiest way to access and fit these models in Python is to make use of the astropy and sedfitter packages.\nAccessing Parameters\nEach model directory contains a parameters.fits file that includes the parameters for all the models. To read this, you can use for example the astropy.table package:",
"from astropy.table import Table\n\nt = Table.read('sp--s-i/parameters.fits')",
"We can take a look at the first 15 rows of the table:",
"t[:15]",
"The model name is a unique name that identifies each model and the viewing angle is indicated in the suffix (e.g. _01). The value of the inclination is also given in the inclination column. The remaining columns give the parameters for the models (which columns are present depends on the model set). The scattering column indicates whether scattered light is included in the SEDs (for some very optically thick models, scattering was disabled).\nAccessing SEDs\nThe easiest way to access the SEDs in Python is to use the SEDCube class from the sedfitter package to read in the flux.fits file for the model set you are interested in:",
"from sedfitter.sed import SEDCube\n\nseds = SEDCube.read('sp--s-i/flux.fits')",
"This 'SED cube' is an efficient way to store the models fluxes in a single 3D array, where the three dimensions are the model, the aperture, and the wavelength.\nThe model names can be accessed with:",
"print(seds.names)",
"while the apertures, wavelengths, and frequencies can be accessed with:",
"print(seds.apertures)\n\nprint(seds.wav)\n\nprint(seds.nu)",
"A valid flag is used to indicate models that do not have complete/valid SEDs (for example because the model run did not complete):",
"print(seds.valid)",
"The fluxes and errors can be obtained using the val and\nerror attributes. We can check the shape of these arrays to check that they are indeed 3D arrays:",
"seds.val.shape\n\nseds.val.shape",
"For this model set, there are 90000 models (10000 physical models times 9 inclinations), 20 apertures, and 200 wavelengths.\nTo access a specific SED, you can call seds.get_sed using a particular\nmodel name:",
"sed = seds.get_sed('00p13Elr_03')",
"The wavelength, flux, and error can then be accessed with:",
"print(sed.wav)\n\nprint(sed.flux)\n\nprint(sed.error)",
"The SED is a 2D array with dimensions the number of apertures (20) and the number of wavelengths (200):",
"sed.flux.shape",
"We can use this to visualize the SED:",
"%matplotlib inline\nimport matplotlib.pyplot as plt\n\n_ = plt.loglog(sed.wav, sed.flux.transpose(), 'k-', alpha=0.5)\n_ = plt.ylim(1e-2, 1e8)",
"Fitting SEDs to data\nTo fit SEDs to observed data, you can also make use of the sedfitter package. What follows is a very short example - for more information on using the sedfitter package, be sure to read over the documentation.\nTo demonstrate this, we will fit the above models to the data for the NGC2264 source modelled in Robitaille (2017):",
"%cat data_ngc2264_20",
"We start off by setting up the list of filters/wavelengths and approximate aperture radii used:",
"from astropy import units as u\n\nfilters = ['BU', 'BB', 'BV', 'BR', 'BI', '2J', '2H', '2K', 'I1', 'I2',\n 5.580 * u.micron, 7.650 * u.micron, 9.95 * u.micron,\n 12.93 * u.micron, 17.72 * u.micron, 24.28 * u.micron,\n 29.95 * u.micron, 35.06 * u.micron,\n 'M2', 'M3', 'W1', 'W2']\n\napertures = [3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3.,\n 3., 3., 20., 30., 30., 30.] * u.arcsec",
"We also set up the extinction law used in Robitaille (2017):",
"from sedfitter.extinction import Extinction\n\nextinction = Extinction.from_file('whitney.r550.par')",
"Finally, we run the fitting:",
"import sedfitter\n\nsedfitter.fit('data_ngc2264_20', filters, apertures, 'sp--s-i',\n 'output_ngc2264_sp--s-i.fitinfo',\n extinction_law=extinction,\n distance_range=[0.869, 0.961] * u.kpc,\n av_range=[0., 40.],\n output_format=('F', 3.),\n output_convolved=False, remove_resolved=True)",
"We now generate the SED plots with the data to examine the fit:",
"sedfitter.plot('output_ngc2264_sp--s-i.fitinfo',\n output_dir='plots_sed', format='png',\n plot_mode='A',\n select_format=('F', 3.),\n show_convolved=False, show_sed=True,\n x_mode='M', x_range=(0.1, 2000),\n y_mode='M', y_range=(1.e-14, 2e-8))\n\nfrom IPython.display import Image\nImage('plots_sed/20.png')"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
amorgun/shad-ml-notebooks
|
notebooks/s2-1/Ensembles.ipynb
|
unlicense
|
[
"%pylab inline\nimport pandas as pd\nimport xgboost as xgb\nfrom xgboost.sklearn import XGBRegressor\nfrom sklearn import metrics\nfrom sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\nrcParams['figure.figsize'] = 8, 5",
"Dataset",
"def ground_truth(x):\n return x * np.sin(x) + np.sin(2 * x)\n\ndef gen_data(n_samples=200):\n np.random.seed(13)\n x = np.random.uniform(0, 10, size=n_samples)\n x.sort()\n y = ground_truth(x) + 0.75 * np.random.normal(size=n_samples)\n train_mask = np.random.randint(0, 2, size=n_samples).astype(np.bool)\n x_train, y_train = x[train_mask, np.newaxis], y[train_mask]\n x_test, y_test = x[~train_mask, np.newaxis], y[~train_mask]\n return x_train, x_test, y_train, y_test\n\nX_train, X_test, y_train, y_test = gen_data(200)\n\nx_plot = np.linspace(0, 10, 500)\n\ndef plot_data(figsize=(8, 5)):\n fig = plt.figure(figsize=figsize)\n gt = plt.plot(x_plot, ground_truth(x_plot), alpha=0.4, label='ground truth')\n\n plt.scatter(X_train, y_train, s=10, alpha=0.4)\n plt.scatter(X_test, y_test, s=10, alpha=0.4, color='red')\n plt.xlim((0, 10))\n plt.ylabel('y')\n plt.xlabel('x')\n\nplot_data(figsize=(8, 5));",
"RF",
"n_estimators = 1000\nrf = RandomForestRegressor(n_estimators=n_estimators, random_state=30)\nloss = metrics.mean_squared_error\nrf.fit(X_train, y_train)\nrf_errors = []\nrf_estimators = rf.estimators_\nfor n in range(1, n_estimators):\n rf.estimators_ = rf_estimators[:n]\n rf_errors.append(loss(y_test, rf.predict(X_test)))\n\ndef plot_errors(*args, fig=None, **kwargs):\n ax = gca()\n ax.plot(*args, **kwargs)\n ax.set_ylim((0, 1.5))\n ylabel('MSE')\n xlabel('n_estimators')\n\nplot_errors(rf_errors, c='r')",
"GBT\n$$\\tilde{x}^m = \\tilde{x}^{m-1} - \\lambda_m \\nabla f(\\tilde{x}^{m-1})$$\n$$\\tilde{y}^m = \\tilde{y}^{m-1} - \\lambda_m \\nabla Q(\\tilde{y}^{m-1}, y)$$\n\n$$b_i = learn(X, -\\nabla Q(\\tilde{y}^{m-1}, y))$$\nExample\n$$ Q(\\tilde{y}^m, y) = \\frac12 \\sum_{i=1}^L (\\tilde{y}i^m - y_i)^2 $$\n$$ -\\nabla Q(\\tilde{y}^m, y) \n= -\\nabla \\left( \\frac12 \\sum{i=1}^L (\\tilde{y}i^m - y_i)^2 \\right)\n= \\sum{i=1}^L (y_i - \\tilde{y}_i^m)\n$$\nAnyBoost\n$$ Q(\\tilde{y}^m, y) = \\frac1L \\sum_{i=1}^L \\mathcal{L}(\\tilde{y}^m y) $$\n$$ -\\nabla Q(\\tilde{y}^m, y) \n= -\\frac1L \\left [\\frac{\\partial \\mathcal{L}(\\tilde{y}i^m y_i)}{\\partial \\tilde{y}_i^m} \\right]{i=1}^L \n= -\\frac1L \\left[ \\frac{\\partial \\mathcal{L}(M_i^m)}{\\partial M_i^m} y_i \\right]_{i=1}^L$$\nAdaBoost\n$$w_i^m \n= - \\frac{\\partial \\mathcal{L}(M_i^m)}{\\partial M_i^m} y_i\n= - \\frac{\\partial \\exp(-M_i^m)}{\\partial M_i^m} y_i\n= \\exp(-M_i^m)\n= \\exp \\left( -y_i \\sum_{t=1}^{m} \\lambda_t b_t(x_i) \\right)\n= \\prod_{t=1}^m \\exp(-y_i \\lambda_t b_t(x_i))\n= w_i^{m-1} \\exp(-y_i \\lambda_m b_m(x_i))\n$$",
"def get_ensemble_errors(clf):\n clf.fit(X_train, y_train)\n train_loss , test_loss= [], []\n estimators = clf.estimators_\n for n in range(1, n_estimators):\n clf.estimators_ = estimators[:n]\n train_loss.append(loss(y_train, clf.predict(X_train)))\n test_loss.append(loss(y_test, clf.predict(X_test)))\n return train_loss , test_loss\n \nsklearn_gbt = GradientBoostingRegressor(\n n_estimators=n_estimators,\n max_depth=1,\n learning_rate=1.0,\n random_state=50)\n\ngbt_train_loss , gbt_test_loss = get_ensemble_errors(sklearn_gbt)\n\nplot_errors(rf_errors, c='r', label='RF')\nplot_errors(gbt_train_loss, c='b', label='GBT train')\nplot_errors(gbt_test_loss, c='g', label='GBT test')\nlegend();\n\nfor clf_args in [\n {'subsample': 1, 'learning_rate': 1},\n {'subsample': 1, 'learning_rate': 0.1},\n {'subsample': 1, 'learning_rate': 0.01},\n {'subsample': 0.5, 'learning_rate': 1},\n {'subsample': 0.5, 'learning_rate': 0.1},\n {'subsample': 0.5, 'learning_rate': 0.01},\n ]:\n clf = GradientBoostingRegressor(\n n_estimators=n_estimators,\n random_state=50,\n **clf_args)\n suffix = \"subsample: {subsample}; rate: {learning_rate}\".format(**clf_args)\n train_loss , test_loss = get_ensemble_errors(clf)\n # plot_errors(train_loss, label='Train ' + suffix)\n plot_errors(test_loss, label='Test ' + suffix + \"; Best: {:.3}\".format(min(test_loss)))\nplot_errors(rf_errors, label='RF test')\n\nax = gca()\nax.set_ylim((0, 2.5))\nlegend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.);",
"Links\n\nhttps://xgboost.readthedocs.io/en/latest/model.html\nhttps://github.com/dmlc/xgboost/tree/master/demo\nhttps://www.datarobot.com/blog/gradient-boosted-regression-trees/\nhttps://www.analyticsvidhya.com/blog/2016/03/complete-guide-parameter-tuning-xgboost-with-codes-python/\nhttps://zhanpengfang.github.io/418home.html"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
reata/ProbabilityAndStatistics
|
Random Variable and its Distribution.ipynb
|
mit
|
[
"随机变量及其分布 Random Variable and its Distribution\n包括以下内容:\n 1. 随机变量 Random Variable\n 2. 伯努利分布 Bernoulli Distribution\n 3. 二项分布 Binomial Distribution\n 4. 泊松分布 Poisson Distribution\n 5. 均匀分布 Uniform Distribution\n 6. 指数分布 Exponential Distribution\n 7. 正态分布 Normal Distribution\n引入科学计算及绘图相关包",
"import math\nimport numpy as np\nimport pandas as pd\nfrom pandas import Series, DataFrame\n# 引入绘图包\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_style('whitegrid')\n%matplotlib inline",
"1. 随机变量 Random Variable\n定义:设随机试验的样本空间为 S = {e}。X = X(e)是定义在样本空间S上的实值单值函数。称 X = X(e)为随机变量。\n例:将一枚硬币抛掷三次,观察出现正面和反面的情况,样本空间是\nS = {HHH, HHT, HTH, THH, HTT, THT, TTH, TTT}。\n以X记三次投掷得到正面H的总数,那么,对于样本空间 S = {e}(用 e 代表样本空间的元素,而将样本空间记成{e})中的每一个样本点 e,X 都有一个数与之对应。X 是定义在样本空间 S 上的一个实值单值函数。它的定义域是样本空间 S,值域是实数集合{0, 1, 2, 3}。使用函数记号可将X写成\n$$ X = X(e) =\\left{\n\\begin{aligned}\n3 & , e = HHH, \\\n2 & , e = HHT, HTH, THH, \\\n1 & , e = HTT, THT, TTH, \\\n0 & , e = TTT.\n\\end{aligned}\n\\right.\n$$\n有许多随机试验,它们的结果本身是一个数,即样本点 e 本身是一个数。我们令 X = X(e) = e,那么 X 就是一个随机变量。例如,用 Y 记某车间一天的缺勤人数,以 W 记某地区第一季度的降雨量,以 Z 记某工厂一天的耗电量,以 N 记某医院一天的挂号人数。那么 Y, W, Z, N 都是随机变量。\n一般用大写的字母如 X, Y, Z, W, ... 表示随机变量,而以小写字母 x, y, z, w, ... 表示实数。\n随机变量的取值随试验的结果而定,而试验的各个结果出现有一定的概率,因为随机变量的取值有一定的概率。例如,在上述例子中 X 取值为2,记成{X = 2},对应样本点的集合 A = {HHT, HTH, THH},这是一个时间,当且仅当事件 A 发生时有{X = 2}。我们称概率P(A) = P{HHT, HTH, THH}为{X = 2}的概率,即P{X = 2} = P(A) = 3 / 8。以后,还将事件 A = {HHT, HTH, THH}说成是事件{X = 2}。类似地有\n$$ P{X \\leq 1} = P{HTT, THT, TTH, TTT} = \\frac{1}{2} $$\n一般,若 L 是一个实数集合,将 X 在 L 上的取值写成{X ∈ L}。它表示事件 B = {e | X(e) ∈ L},即 B 是由 S 中使得 X(e) ∈ L 的所有样本点 e 所组成的事件,此时有\n$$ P{X \\in L } = P(B) = P{ e | X(e) \\in L} $$\n1.1 离散型随机变量 Discrete Random Variable\n有些随机变量,它全部可能取到的值是有限个或可列无限多个,这种随机变量称为离散型随机变量。\n容易知道,要掌握一个离散型随机变量 X 的统计规律,必须且只需知道 X 的所有可能取值以及取每一个可能值的概率。\n设离散型随机变量 X 的所有可能取的值为 $x_k$(k = 1, 2, ...),X 取各个可能值的概率,即事件{X = $x_k$}的概率,为\n$$ P{X = X_k } = p_k,k = 1,2, ... $$\n由概率的定义,p<sub>k</sub>满足如下两个条件:\n$$ p_k \\geq 0, k = 1,2,...; $$\n$$ \\begin{equation}\n\\sum_{k=1}^\\infty p_k = 1\n\\end{equation}\n$$\n其中,条件二是由于 ${X = x_1} \\cup {X = x_2} \\cup ... $ 是必然事件,且 ${X = x_1} \\cap {X = x_2} \\cap ... = \\emptyset $,$ k \\neq j $,故$ 1 = P[\\bigcup_{k=1}^\\infty {X = x_k}] = \\sum_{k=1}^\\infty P{X = x_k} $,即$ \\sum_{k=1}^\\infty p_k = 1 $。\n我们称$ P{X = X_k } = p_k,k = 1,2, ... $为离散型随机变量 X 的分布律。分布律也可以用表格的形式来表示\n$$\\begin{array}{rr} \\hline\nX &x_1 &x_2 &... &x_n &... \\ \\hline\nP_k &p_1 &p_2 &... &p_n &... \\ \\hline\n\\end{array}$$\n将分布律写成函数的形式,表示离散型随机变量在各特定取值上的概率,该函数称为概率质量函数 Probability Mass Function, pmf。\n1.2 随机变量的分布函数 Distribution Function of Random Variable\n对于非离散型随机变量 X,由于其可能取的值不能一一列举出来,因而就不能像离散型随机变量那样可以用分布律来描述它。另外,我们通常所遇到的非离散型随机变量取任一指定的实数值的概率都等于0。再者,在实际中,对于这样的随机变量,我们并不会对取某一特定值的概率感兴趣,而是考虑在某个区间$(x_1, x_2]$内的概率:$P{x_1 < X \\leq x_2 }$。但由于$ P{x_1 < X \\leq x_2 } = P{X \\leq x_2} - P{X \\leq x_1} $,所以我们只需要知道$ P{X \\leq x_2 } $和$ P{X \\leq x_1 } $就可以了。下面引入随机变量的分布函数的概念。\n定义:设 X 是一个随机变量,x是任意实数,函数$ F(x) = P{X \\leq x }, -\\infty < x < \\infty $称为X的分布函数。\n对于任意实数$x_1, x_2(x_1 < x_2)$,有$P{x_1 < X \\leq x_2} = P{X \\leq x_2}-P{X \\leq x_1} = F(x_2) - F(x_1)$,因此,若已知 X 的分布函数,我们就知道 X 落在任一区间$(x_1, x_2]$上的概率,从这个意义上说,分布函数完整地描述了随机变量的统计规律性。\n分布函数是一个普通的函数,正是通过它,我们将能用数学分析的方法来研究随机变量。\n如果将 X 看成是数轴上的随机点的坐标,那么,分布函数F(x)在x处的函数值就表示X落在区间$(-\\infty, x_2]$上的概率。\n分布函数F(x)具有以下的基本性质:\n\nF(x)是一个不减函数。事实上,对于任意实数$x_1, x_2(x_1 < x_2)$,有\n$$F(x_2) - F(x_1) = P{x_1 < X \\leq x_2} \\geq 0$$\n$0 \\leq F(x) \\leq 1$,且\n$$F(-\\infty) = \\lim_{x \\to -\\infty} = 0, F(\\infty) = \\lim_{x \\to \\infty} = 1$$\n$F(x+0)=F(x)$,即F(x)是右连续的。\n\n反之,可证具备上述性质的函数F(x)必是某个随机变量的分布函数。\n1.3 连续型随机变量及其概率密度 Continuous Random Variable and its Probability Density\n如果对于随机变量 X 的分布函数F(x),存在非负可积函数f(x),使对于任意实数 x 有\n$$ F(x) = \\int_{-\\infty}^x f(t)dt $$\n则称 X 为连续型随机变量,f(x)称为 X 的概率密度函数,简称概率密度。\n据数学分析的知识知连续型随机变量的分布函数是连续函数。\n由定义知道,概率密度f(x)具有以下性质:\n\n$f(x) \\geq 0$;\n$\\int_{-\\infty}^{\\infty} f(x)dx = 1$;\n对于任意实数$x_1, x_2(x_1 \\leq x_2)$,\n$$ P{x_1 < X \\leq x_2} = F(x_2) - F(x_1) = \\int_{x_1}^{x_2} f(x)dx $$\n若f(x)在点 x 处连续,则有$F'(x) = f(x)$\n\n反之,若f(x)具备性质1,2,引入$G(x) = \\int_{-\\infty}^x f(t)dt$,它是某一随机变量 X 的分布函数,f(x)是 X 的概率密度。\n由性质2知道介于曲线y=f(x)与Ox轴之间的面积等于1,由3知道 X 落在区间$(x_1, x_2]$的概率$P{x_1 < X \\leq x_2}$等于区间$(x_1, x_2]$上曲线y=f(x)之下的曲边梯形的面积。\n2. 伯努利分布 Bernoulli Distribution\n伯努利分布又称(0 - 1)分布\n设随机变量X只可能取 0 与 1 两个值,它的分布律是\n$$ P{X=k} = p^k(1-p)^{1-k}, k=0,1 (0 < p < 1) $$\n则称X服从以p为参数的(0 - 1)分布或两点分布。\n(0 - 1)分布的分布律也可写成\n$$\\begin{array}{rr} \\hline\nX &0 &1 \\ \\hline\nP_k &1-p &p \\ \\hline\n\\end{array}$$\n3. 二项分布 Binomial Distribution\n设试验 E 只有两个可能结果:$A$及$\\overline{A}$,则称 E 为伯努利试验,设$P(A)=p(0<p<1)$,此时$P(\\overline{A})=1-p$。将 E 独立重复n次,则称这一串重复的独立试验为n重伯努利试验。\n这里“重复”是指在每次试验中$P(A)=p$保持不变;“独立”是指各次试验的结果互不影响,若以$C_i$记第 i 次试验的结果,$C_i$为$A$或$\\overline{A}$, i=1,2,...,n。“独立”是指\n$$ P(C_{1}C_{2}...C{n}) = P(C_1)P(C_2)...P(C_n) $$\n以 X 表示n重伯努利试验中事件 A 发生的次数,X 是一个随机变量,X 所有可能取的值为0, 1, 2, ..., n。由于各次试验是相互独立的,因为事件 A 在指定的$k(0\\leq k \\leq n)$次试验中发生,在其他n - k次试验中 A 不发生的概率为\n$$ \\underbrace{\\left({p \\cdot p \\cdot ... \\cdot p}\\right)}k \\cdot \\underbrace{\\left({(1-p) \\cdot (1-p) \\cdot ... \\cdot (1-p)}\\right)}{n-k} = p^{k}(1-p)^{n-k}$$\n这种指定的方式共有$\\binom{n}{k}$种,它们是两两互不相容的,故在 n 次试验中 A 发生 k 次的概率为$\\binom{n}{k}p^{k}(1-p)^{n-k}$,记$q=1-p$,即有\n$$ P{X=k} = \\binom{n}{k}p^{k}q^{n-k}, k=0,1,2,..,n $$\n我们称随机变量 X 服从参数为n, p的二项分布,并记为$X \\sim b(n, p)$。\n特别,当n=1时,二项分布化为$P{X=k}=p^{k}q^{1-k}, k=0,1$,这就是(0 - 1)分布。\nnumpy.random.binomial函数可以根据二项分布进行抽样:",
"# 投掷硬币10次,正面朝上的次数;重复100次\nn, p = 10, .5\nnp.random.binomial(n, p, 100)",
"一个现实生活中的例子。一家钻井公司探索九个矿井,预计每个开采成功率为0.1;九个矿井全部开采失败的概率是多少?\n根据公式,$n = 9, p = 0.1, P{X = 0} = \\binom{9}{0} \\cdot 0.1^{0} \\cdot 0.9^{9} \\approx 0.3874$\n我们对该模型进行20000次试验,计算其中得到0的概率:",
"sum(np.random.binomial(9, 0.1, 20000) == 0) / 20000",
"将试验次数增加,可以模拟出更加逼近准确值的结果。\n4. 泊松分布 Poisson Distribution\n设随机变量 X 所有可能取的值为0, 1, 2, ..., 而取各个值的概率为\n$$P{X=k} = \\frac{\\lambda^ke^{-\\lambda}}{k!}, k=0,1,2,...,$$\n其中 $\\lambda > 0$ 是常数,则称 $X$ 服从参数为 $\\lambda$ 的泊松分布,记为 $X \\sim \\pi(\\lambda)$。\n易知,$P{X=k}\\geq0,k=0,1,2,...$,且有\n$$ \\sum_{k=0}^\\infty P{X=k} = \\sum_{k=0}^\\infty \\frac{\\lambda^{k}e^{-\\lambda}}{k!} = e^{-\\lambda}\\sum_{k=0}^\\infty \\frac{\\lambda^k}{k!} = e^{-\\lambda} \\cdot e^{\\lambda} = 1 $$\n具有泊松分布的随机变量在实际应用中是非常多的。例如,一本书一页中的印刷错误数、某地区在一天内邮递遗失的信件数、某一医院在一天内的急诊病人数、某一地区一个时间间隔内发生交通事故的次数、在一个时间间隔内某种放射性物质发出的、经过计数器的 $\\alpha$ 粒子数等都服从泊松分布。\nnumpy.random.poisson函数可以根据泊松分布进行抽样:",
"lb = 5\ns = np.random.poisson(lb, 10000)\ncount, bins, ignored = plt.hist(s, 14, normed=True)",
"5. 均匀分布 Uniform Distribution\n若连续型随机变量 X 具有概率密度\n$$ f(x) =\\left{\n\\begin{aligned}\n& \\frac{1}{b-a}, & a < x < b, \\\n& 0, & 其它 \\\n\\end{aligned}\n\\right.\n$$\n则称 X 在区间(a, b)上服从均匀分布,记为$X \\sim U(a, b)$\nnumpy.random.uniform函数可以根据均匀分布进行抽样:",
"# 取a = -1, b = 0, 样本数10000\na, b = -1, 0\ns = np.random.uniform(a, b, 10000)\n\n# 所有样本的值均大于a\nnp.all(s >= a)\n\n# 所有样本的值均小于b\nnp.all(s < b)\n\n# 绘制样本直方图及密度函数\ncount, bins, ignored = plt.hist(s, 15, normed=True)\nplt.plot(bins, np.ones_like(bins) / (b - a), linewidth=2, color='r')\nplt.show()",
"6. 指数分布 Exponential Distribution\n若连续型随机变量 X 具有概率密度\n$$ f(x) =\\left{\n\\begin{aligned}\n& \\frac{1}{\\theta}e^{-\\frac{x}{\\theta}}, & x > 0, \\\n& 0, & 其它 \\\n\\end{aligned}\n\\right.\n$$\n其中$\\theta > 0$为常数,则称 X 服从参数为$\\theta$的指数分布。\nnumpy.random.exponential函数可以根据均匀分布进行抽样:",
"# 取theta = 1,绘制样本直方图及密度函数\ntheta = 1\nf = lambda x: math.e ** (-x / theta) / theta\n\ns = np.random.exponential(theta, 10000)\ncount, bins, ignored = plt.hist(s, 100, normed=True)\nplt.plot(bins, f(bins), linewidth=2, color='r')\nplt.show()",
"7. 正态分布 Normal Distribution\n若连续型随机变量 X 的概率密度为\n$$ f(x) = \\frac{1}{\\sqrt{2\\pi}\\sigma}e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}, -\\infty < x < \\infty $$\n其中$\\mu, \\sigma(\\sigma > 0)$为常数,则称 X 服从参数为$\\mu, \\sigma$的正态分布或高斯(Gauss)分布,记为$X \\sim N(\\mu, \\sigma^2)$。\nf(x)的图形具有以下性质:\n\n\n曲线关于$x = \\mu$对称。这表明对于任意$h > 0$有\n$$ P{\\mu - h < X \\leq \\mu } = P{\\mu < X \\leq \\mu + h} $$\n\n\n当$x = \\mu$时取到最大值\n$$ f(\\mu) = \\frac{1}{\\sqrt{2\\pi}\\sigma} $$\nx离$\\mu$越远,f(x)的值越小。这表明对于同样长度的区间,当区间离$\\mu$越远,X落在这个区间上的概率越小。\n\n\n在$x = \\mu \\pm \\sigma$处曲线有拐点。曲线以Ox轴为渐近线。\n如果固定$\\sigma$,改变$\\mu$的值,则图形沿着Ox轴平移,而不改变其形状,可见正太分布的概率密度曲线y=f(x)的位置完全由参数$\\mu$所确定。$\\mu$称为位置参数。\n如果固定$\\mu$,改变$\\sigma$,由于最大值$f(\\mu)=\\frac{1}{\\sqrt{2\\pi}\\sigma}$,可知当$\\sigma$越小时图形变得越尖,因为 X 落在$\\mu$附近的概率越大。\n特别,当$\\mu = 0, \\sigma = 1$时称随机变量 X 服从标准正态分布。其概率密度和分布函数分别用$\\varphi(x), \\Phi(x)$表示,即有\n$$ \\varphi(x) = \\frac{1}{\\sqrt{2\\pi}}e^{-\\frac{x^2}{2}} $$\n$$ \\Phi(x) = \\frac{1}{\\sqrt{2\\pi}}\\int_{-\\infty}^{x} e^{-\\frac{x^2}{2}}dx $$\n易知\n$$ \\Phi(-x) = 1 - \\Phi(x) $$\nnumpy.random.normal函数可以根据正态分布进行抽样:",
"# 取均值0,标准差0.1\nmu, sigma = 0, 0.1\ns = np.random.normal(mu, sigma, 1000)\n\n# 验证均值\nabs(mu - np.mean(s)) < 0.01\n\n# 验证标准差\nabs(sigma - np.std(s, ddof=1)) < 0.01\n\n# 绘制样本直方图及密度函数\ncount, bins, ignored = plt.hist(s, 30, normed=True)\nplt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) * np.exp( - (bins - mu)**2 / (2 * sigma**2) ), linewidth=2, color='r')\nplt.show()",
"附: 参考资料\n1) Developers, Numpy. \"NumPy Documentation. 2013.\" (2014).\n2) 盛骤, and 谢式千. \"概率论与数理统计及其应用.\" (2004)."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
dsiufl/2015-Fall-Hadoop
|
notes/2-pyspark-rdd-examples.ipynb
|
mit
|
[
"Spark RDD Transformation and Action Examples\nBorrowed materials from http://nbviewer.ipython.org/github/jkthompson/pyspark-pictures/blob/master/pyspark-pictures.ipynb\nFor official documents, please refer to Spark Programming Guide.",
"import findspark\nimport os\nfindspark.init('/home/ubuntu/shortcourse/spark-1.5.1-bin-hadoop2.6')\n\nfrom pyspark import SparkContext, SparkConf\nconf = SparkConf().setAppName(\"pyspark-example\").setMaster(\"local[2]\")\nsc = SparkContext(conf=conf)",
"<img align=left src=\"files/images/pyspark-page2.svg\" width=500 height=500 />",
"# print Spark version\nprint(\"pyspark version:\" + str(sc.version))",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.map\">\n<img align=left src=\"files/images/pyspark-page3.svg\" width=500 height=500 />\n</a>",
"# map\nx = sc.parallelize([1,2,3]) # sc = spark context, parallelize creates an RDD from the passed object\ny = x.map(lambda x: (x,x**2))\nprint(x.collect()) # collect copies RDD elements to a list on the driver\nprint(y.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.flatMap\">\n<img align=left src=\"files/images/pyspark-page4.svg\" width=500 height=500 />\n</a>",
"# flatMap\nx = sc.parallelize([1,2,3])\ny = x.flatMap(lambda x: (x, 100*x, x**2))\nprint(x.collect())\nprint(y.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.mapPartitions\">\n<img align=left src=\"files/images/pyspark-page5.svg\" width=500 height=500 />\n</a>",
"# mapPartitions\nx = sc.parallelize([1,2,3], 2)\ndef f(iterator): yield sum(iterator)\ny = x.mapPartitions(f)\nprint(x.glom().collect()) # glom() flattens elements on the same partition\nprint(y.glom().collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.mapPartitionsWithIndex\">\n<img align=left src=\"files/images/pyspark-page6.svg\" width=500 height=500 />\n</a>",
"# mapPartitionsWithIndex\nx = sc.parallelize([1,2,3], 2)\ndef f(partitionIndex, iterator): yield (partitionIndex,sum(iterator))\ny = x.mapPartitionsWithIndex(f)\nprint(x.glom().collect()) # glom() flattens elements on the same partition\nprint(y.glom().collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.getNumPartitions\">\n<img align=left src=\"files/images/pyspark-page7.svg\" width=500 height=500 />\n</a>",
"# getNumPartitions\nx = sc.parallelize([1,2,3], 2)\ny = x.getNumPartitions()\nprint(x.glom().collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.filter\">\n<img align=left src=\"files/images/pyspark-page8.svg\" width=500 height=500 />\n</a>",
"# filter\nx = sc.parallelize([1,2,3])\ny = x.filter(lambda x: x%2 == 1) # filters out even elements\nprint(x.collect())\nprint(y.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.distinct\">\n<img align=left src=\"files/images/pyspark-page9.svg\" width=500 height=500 />\n</a>",
"# distinct\nx = sc.parallelize(['A','A','B'])\ny = x.distinct()\nprint(x.collect())\nprint(y.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.sample\">\n<img align=left src=\"files/images/pyspark-page10.svg\" width=500 height=500 />\n</a>",
"# sample\nx = sc.parallelize(range(7))\nylist = [x.sample(withReplacement=False, fraction=0.5) for i in range(5)] # call 'sample' 5 times\nprint('x = ' + str(x.collect()))\nfor cnt,y in zip(range(len(ylist)), ylist):\n print('sample:' + str(cnt) + ' y = ' + str(y.collect()))",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.takeSample\">\n<img align=left src=\"files/images/pyspark-page11.svg\" width=500 height=500 />\n</a>",
"# takeSample\nx = sc.parallelize(range(7))\nylist = [x.takeSample(withReplacement=False, num=3) for i in range(5)] # call 'sample' 5 times\nprint('x = ' + str(x.collect()))\nfor cnt,y in zip(range(len(ylist)), ylist):\n print('sample:' + str(cnt) + ' y = ' + str(y)) # no collect on y",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.union\">\n<img align=left src=\"files/images/pyspark-page12.svg\" width=500 height=500 />\n</a>",
"# union\nx = sc.parallelize(['A','A','B'])\ny = sc.parallelize(['D','C','A'])\nz = x.union(y)\nprint(x.collect())\nprint(y.collect())\nprint(z.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.intersection\">\n<img align=left src=\"files/images/pyspark-page13.svg\" width=500 height=500 />\n</a>",
"# intersection\nx = sc.parallelize(['A','A','B'])\ny = sc.parallelize(['A','C','D'])\nz = x.intersection(y)\nprint(x.collect())\nprint(y.collect())\nprint(z.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.sortByKey\">\n<img align=left src=\"files/images/pyspark-page14.svg\" width=500 height=500 />\n</a>",
"# sortByKey\nx = sc.parallelize([('B',1),('A',2),('C',3)])\ny = x.sortByKey()\nprint(x.collect())\nprint(y.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.sortBy\">\n<img align=left src=\"files/images/pyspark-page15.svg\" width=500 height=500 />\n</a>",
"# sortBy\nx = sc.parallelize(['Cat','Apple','Bat'])\ndef keyGen(val): return val[0]\ny = x.sortBy(keyGen)\nprint(y.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.glom\">\n<img align=left src=\"files/images/pyspark-page16.svg\" width=500 height=500 />\n</a>",
"# glom\nx = sc.parallelize(['C','B','A'], 2)\ny = x.glom()\nprint(x.collect()) \nprint(y.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.cartesian\">\n<img align=left src=\"files/images/pyspark-page17.svg\" width=500 height=500 />\n</a>",
"# cartesian\nx = sc.parallelize(['A','B'])\ny = sc.parallelize(['C','D'])\nz = x.cartesian(y)\nprint(x.collect())\nprint(y.collect())\nprint(z.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.groupBy\">\n<img align=left src=\"files/images/pyspark-page18.svg\" width=500 height=500 />\n<",
"# groupBy\nx = sc.parallelize([1,2,3])\ny = x.groupBy(lambda x: 'A' if (x%2 == 1) else 'B' )\nprint(x.collect())\nprint([(j[0],[i for i in j[1]]) for j in y.collect()]) # y is nested, this iterates through it",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.pipe\">\n<img align=left src=\"files/images/pyspark-page19.svg\" width=500 height=500 />\n</a>",
"# pipe\nx = sc.parallelize(['A', 'Ba', 'C', 'AD'])\ny = x.pipe('grep -i \"A\"') # calls out to grep, may fail under Windows \nprint(x.collect())\nprint(y.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.foreach\">\n<img align=left src=\"files/images/pyspark-page20.svg\" width=500 height=500 />\n</a>",
"# foreach\nfrom __future__ import print_function\nx = sc.parallelize([1,2,3])\ndef f(el):\n '''side effect: append the current RDD elements to a file'''\n f1=open(\"./foreachExample.txt\", 'a+') \n print(el,file=f1)\n\nopen('./foreachExample.txt', 'w').close() # first clear the file contents\n\ny = x.foreach(f) # writes into foreachExample.txt\n\nprint(x.collect())\nprint(y) # foreach returns 'None'\n# print the contents of foreachExample.txt\nwith open(\"./foreachExample.txt\", \"r\") as foreachExample:\n print (foreachExample.read())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.foreachPartition\">\n<img align=left src=\"files/images/pyspark-page21.svg\" width=500 height=500 />\n</a>",
"# foreachPartition\nfrom __future__ import print_function\nx = sc.parallelize([1,2,3],5)\ndef f(parition):\n '''side effect: append the current RDD partition contents to a file'''\n f1=open(\"./foreachPartitionExample.txt\", 'a+') \n print([el for el in parition],file=f1)\n\nopen('./foreachPartitionExample.txt', 'w').close() # first clear the file contents\n\ny = x.foreachPartition(f) # writes into foreachExample.txt\n\nprint(x.glom().collect())\nprint(y) # foreach returns 'None'\n# print the contents of foreachExample.txt\nwith open(\"./foreachPartitionExample.txt\", \"r\") as foreachExample:\n print (foreachExample.read())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.collect\">\n<img align=left src=\"files/images/pyspark-page22.svg\" width=500 height=500 />\n</a>",
"# collect\nx = sc.parallelize([1,2,3])\ny = x.collect()\nprint(x) # distributed\nprint(y) # not distributed",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.reduce\">\n<img align=left src=\"files/images/pyspark-page23.svg\" width=500 height=500 />\n</a>",
"# reduce\nx = sc.parallelize([1,2,3])\ny = x.reduce(lambda obj, accumulated: obj + accumulated) # computes a cumulative sum\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.fold\">\n<img align=left src=\"files/images/pyspark-page24.svg\" width=500 height=500 />\n</a>",
"# fold\nx = sc.parallelize([1,2,3])\nneutral_zero_value = 0 # 0 for sum, 1 for multiplication\ny = x.fold(neutral_zero_value,lambda obj, accumulated: accumulated + obj) # computes cumulative sum\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.aggregate\">\n<img align=left src=\"files/images/pyspark-page25.svg\" width=500 height=500 />\n</a>",
"# aggregate\nx = sc.parallelize([2,3,4])\nneutral_zero_value = (0,1) # sum: x+0 = x, product: 1*x = x\nseqOp = (lambda aggregated, el: (aggregated[0] + el, aggregated[1] * el)) \ncombOp = (lambda aggregated, el: (aggregated[0] + el[0], aggregated[1] * el[1]))\ny = x.aggregate(neutral_zero_value,seqOp,combOp) # computes (cumulative sum, cumulative product)\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.max\">\n<img align=left src=\"files/images/pyspark-page26.svg\" width=500 height=500 />\n</a>",
"# max\nx = sc.parallelize([1,3,2])\ny = x.max()\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.min\">\n<img align=left src=\"files/images/pyspark-page27.svg\" width=500 height=500 />\n</a>",
"# min\nx = sc.parallelize([1,3,2])\ny = x.min()\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.sum\">\n<img align=left src=\"files/images/pyspark-page28.svg\" width=500 height=500 />\n</a>",
"# sum\nx = sc.parallelize([1,3,2])\ny = x.sum()\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.count\">\n<img align=left src=\"files/images/pyspark-page29.svg\" width=500 height=500 />\n</a>",
"# count\nx = sc.parallelize([1,3,2])\ny = x.count()\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.histogram\">\n<img align=left src=\"files/images/pyspark-page30.svg\" width=500 height=500 />\n</a>",
"# histogram (example #1)\nx = sc.parallelize([1,3,1,2,3])\ny = x.histogram(buckets = 2)\nprint(x.collect())\nprint(y)\n\n# histogram (example #2)\nx = sc.parallelize([1,3,1,2,3])\ny = x.histogram([0,0.5,1,1.5,2,2.5,3,3.5])\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.mean\">\n<img align=left src=\"files/images/pyspark-page31.svg\" width=500 height=500 />\n</a>",
"# mean\nx = sc.parallelize([1,3,2])\ny = x.mean()\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.variance\">\n<img align=left src=\"files/images/pyspark-page32.svg\" width=500 height=500 />\n</a>",
"# variance\nx = sc.parallelize([1,3,2])\ny = x.variance() # divides by N\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.stdev\">\n<img align=left src=\"files/images/pyspark-page33.svg\" width=500 height=500 />\n</a>",
"# stdev\nx = sc.parallelize([1,3,2])\ny = x.stdev() # divides by N\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.sampleStdev\">\n<img align=left src=\"files/images/pyspark-page34.svg\" width=500 height=500 />\n</a>",
"# sampleStdev\nx = sc.parallelize([1,3,2])\ny = x.sampleStdev() # divides by N-1\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.sampleVariance\">\n<img align=left src=\"files/images/pyspark-page35.svg\" width=500 height=500 />\n</a>",
"# sampleVariance\nx = sc.parallelize([1,3,2])\ny = x.sampleVariance() # divides by N-1\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.countByValue\">\n<img align=left src=\"files/images/pyspark-page36.svg\" width=500 height=500 />\n</a>",
"# countByValue\nx = sc.parallelize([1,3,1,2,3])\ny = x.countByValue()\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.top\">\n<img align=left src=\"files/images/pyspark-page37.svg\" width=500 height=500 />\n</a>",
"# top\nx = sc.parallelize([1,3,1,2,3])\ny = x.top(num = 3)\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.takeOrdered\">\n<img align=left src=\"files/images/pyspark-page38.svg\" width=500 height=500 />\n</a>",
"# takeOrdered\nx = sc.parallelize([1,3,1,2,3])\ny = x.takeOrdered(num = 3)\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.take\">\n<img align=left src=\"files/images/pyspark-page39.svg\" width=500 height=500 />\n</a>",
"# take\nx = sc.parallelize([1,3,1,2,3])\ny = x.take(num = 3)\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.first\">\n<img align=left src=\"files/images/pyspark-page40.svg\" width=500 height=500 />\n</a>",
"# first\nx = sc.parallelize([1,3,1,2,3])\ny = x.first()\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.collectAsMap\">\n<img align=left src=\"files/images/pyspark-page41.svg\" width=500 height=500 />\n</a>",
"# collectAsMap\nx = sc.parallelize([('C',3),('A',1),('B',2)])\ny = x.collectAsMap()\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.keys\">\n<img align=left src=\"files/images/pyspark-page42.svg\" width=500 height=500 />\n</a>",
"# keys\nx = sc.parallelize([('C',3),('A',1),('B',2)])\ny = x.keys()\nprint(x.collect())\nprint(y.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.values\">\n<img align=left src=\"files/images/pyspark-page43.svg\" width=500 height=500 />\n</a>",
"# values\nx = sc.parallelize([('C',3),('A',1),('B',2)])\ny = x.values()\nprint(x.collect())\nprint(y.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.reduceByKey\">\n<img align=left src=\"files/images/pyspark-page44.svg\" width=500 height=500 />\n</a>",
"# reduceByKey\nx = sc.parallelize([('B',1),('B',2),('A',3),('A',4),('A',5)])\ny = x.reduceByKey(lambda agg, obj: agg + obj)\nprint(x.collect())\nprint(y.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.reduceByKeyLocally\">\n<img align=left src=\"files/images/pyspark-page45.svg\" width=500 height=500 />\n</a>",
"# reduceByKeyLocally\nx = sc.parallelize([('B',1),('B',2),('A',3),('A',4),('A',5)])\ny = x.reduceByKeyLocally(lambda agg, obj: agg + obj)\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.countByKey\">\n<img align=left src=\"files/images/pyspark-page46.svg\" width=500 height=500 />\n</a>",
"# countByKey\nx = sc.parallelize([('B',1),('B',2),('A',3),('A',4),('A',5)])\ny = x.countByKey()\nprint(x.collect())\nprint(y)",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.join\">\n<img align=left src=\"files/images/pyspark-page47.svg\" width=500 height=500 />\n</a>",
"# join\nx = sc.parallelize([('C',4),('B',3),('A',2),('A',1)])\ny = sc.parallelize([('A',8),('B',7),('A',6),('D',5)])\nz = x.join(y)\nprint(x.collect())\nprint(y.collect())\nprint(z.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.leftOuterJoin\">\n<img align=left src=\"files/images/pyspark-page48.svg\" width=500 height=500 />\n</a>",
"# leftOuterJoin\nx = sc.parallelize([('C',4),('B',3),('A',2),('A',1)])\ny = sc.parallelize([('A',8),('B',7),('A',6),('D',5)])\nz = x.leftOuterJoin(y)\nprint(x.collect())\nprint(y.collect())\nprint(z.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.rightOuterJoin\">\n<img align=left src=\"files/images/pyspark-page49.svg\" width=500 height=500 />\n</a>",
"# rightOuterJoin\nx = sc.parallelize([('C',4),('B',3),('A',2),('A',1)])\ny = sc.parallelize([('A',8),('B',7),('A',6),('D',5)])\nz = x.rightOuterJoin(y)\nprint(x.collect())\nprint(y.collect())\nprint(z.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.partitionBy\">\n<img align=left src=\"files/images/pyspark-page50.svg\" width=500 height=500 />\n</a>",
"# partitionBy\nx = sc.parallelize([(0,1),(1,2),(2,3)],2)\ny = x.partitionBy(numPartitions = 3, partitionFunc = lambda x: x) # only key is passed to paritionFunc\nprint(x.glom().collect())\nprint(y.glom().collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.combineByKey\">\n<img align=left src=\"files/images/pyspark-page51.svg\" width=500 height=500 />\n</a>",
"# combineByKey\nx = sc.parallelize([('B',1),('B',2),('A',3),('A',4),('A',5)])\ncreateCombiner = (lambda el: [(el,el**2)]) \nmergeVal = (lambda aggregated, el: aggregated + [(el,el**2)]) # append to aggregated\nmergeComb = (lambda agg1,agg2: agg1 + agg2 ) # append agg1 with agg2\ny = x.combineByKey(createCombiner,mergeVal,mergeComb)\nprint(x.collect())\nprint(y.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.aggregateByKey\">\n<img align=left src=\"files/images/pyspark-page52.svg\" width=500 height=500 />\n</a>",
"# aggregateByKey\nx = sc.parallelize([('B',1),('B',2),('A',3),('A',4),('A',5)])\nzeroValue = [] # empty list is 'zero value' for append operation\nmergeVal = (lambda aggregated, el: aggregated + [(el,el**2)])\nmergeComb = (lambda agg1,agg2: agg1 + agg2 )\ny = x.aggregateByKey(zeroValue,mergeVal,mergeComb)\nprint(x.collect())\nprint(y.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.foldByKey\">\n<img align=left src=\"files/images/pyspark-page53.svg\" width=500 height=500 />\n</a>",
"# foldByKey\nx = sc.parallelize([('B',1),('B',2),('A',3),('A',4),('A',5)])\nzeroValue = 1 # one is 'zero value' for multiplication\ny = x.foldByKey(zeroValue,lambda agg,x: agg*x ) # computes cumulative product within each key\nprint(x.collect())\nprint(y.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.groupByKey\">\n<img align=left src=\"files/images/pyspark-page54.svg\" width=500 height=500 />\n</a>",
"# groupByKey\nx = sc.parallelize([('B',5),('B',4),('A',3),('A',2),('A',1)])\ny = x.groupByKey()\nprint(x.collect())\nprint([(j[0],[i for i in j[1]]) for j in y.collect()])",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.flatMapValues\">\n<img align=left src=\"files/images/pyspark-page55.svg\" width=500 height=500 />\n</a>",
"# flatMapValues\nx = sc.parallelize([('A',(1,2,3)),('B',(4,5))])\ny = x.flatMapValues(lambda x: [i**2 for i in x]) # function is applied to entire value, then result is flattened\nprint(x.collect())\nprint(y.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.mapValues\">\n<img align=left src=\"files/images/pyspark-page56.svg\" width=500 height=500 />\n</a>",
"# mapValues\nx = sc.parallelize([('A',(1,2,3)),('B',(4,5))])\ny = x.mapValues(lambda x: [i**2 for i in x]) # function is applied to entire value\nprint(x.collect())\nprint(y.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.groupWith\">\n<img align=left src=\"files/images/pyspark-page57.svg\" width=500 height=500 />\n</a>",
"# groupWith\nx = sc.parallelize([('C',4),('B',(3,3)),('A',2),('A',(1,1))])\ny = sc.parallelize([('B',(7,7)),('A',6),('D',(5,5))])\nz = sc.parallelize([('D',9),('B',(8,8))])\na = x.groupWith(y,z)\nprint(x.collect())\nprint(y.collect())\nprint(z.collect())\nprint(\"Result:\")\nfor key,val in list(a.collect()): \n print(key, [list(i) for i in val])",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.cogroup\">\n<img align=left src=\"files/images/pyspark-page58.svg\" width=500 height=500 />\n</a>",
"# cogroup\nx = sc.parallelize([('C',4),('B',(3,3)),('A',2),('A',(1,1))])\ny = sc.parallelize([('A',8),('B',7),('A',6),('D',(5,5))])\nz = x.cogroup(y)\nprint(x.collect())\nprint(y.collect())\nfor key,val in list(z.collect()):\n print(key, [list(i) for i in val])",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.sampleByKey\">\n<img align=left src=\"files/images/pyspark-page59.svg\" width=500 height=500 />\n</a>",
"# sampleByKey\nx = sc.parallelize([('A',1),('B',2),('C',3),('B',4),('A',5)])\ny = x.sampleByKey(withReplacement=False, fractions={'A':0.5, 'B':1, 'C':0.2})\nprint(x.collect())\nprint(y.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.subtractByKey\">\n<img align=left src=\"files/images/pyspark-page60.svg\" width=500 height=500 />\n</a>",
"# subtractByKey\nx = sc.parallelize([('C',1),('B',2),('A',3),('A',4)])\ny = sc.parallelize([('A',5),('D',6),('A',7),('D',8)])\nz = x.subtractByKey(y)\nprint(x.collect())\nprint(y.collect())\nprint(z.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.subtract\">\n<img align=left src=\"files/images/pyspark-page61.svg\" width=500 height=500 />\n</a>",
"# subtract\nx = sc.parallelize([('C',4),('B',3),('A',2),('A',1)])\ny = sc.parallelize([('C',8),('A',2),('D',1)])\nz = x.subtract(y)\nprint(x.collect())\nprint(y.collect())\nprint(z.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.keyBy\">\n<img align=left src=\"files/images/pyspark-page62.svg\" width=500 height=500 />\n</a>",
"# keyBy\nx = sc.parallelize([1,2,3])\ny = x.keyBy(lambda x: x**2)\nprint(x.collect())\nprint(y.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.repartition\">\n<img align=left src=\"files/images/pyspark-page63.svg\" width=500 height=500 />\n</a>",
"# repartition\nx = sc.parallelize([1,2,3,4,5],2)\ny = x.repartition(numPartitions=3)\nprint(x.glom().collect())\nprint(y.glom().collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.coalesce\">\n<img align=left src=\"files/images/pyspark-page64.svg\" width=500 height=500 />\n</a>",
"# coalesce\nx = sc.parallelize([1,2,3,4,5],2)\ny = x.coalesce(numPartitions=1)\nprint(x.glom().collect())\nprint(y.glom().collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.zip\">\n<img align=left src=\"files/images/pyspark-page65.svg\" width=500 height=500 />\n</a>",
"# zip\nx = sc.parallelize(['B','A','A'])\ny = x.map(lambda x: ord(x)) # zip expects x and y to have same #partitions and #elements/partition\nz = x.zip(y)\nprint(x.collect())\nprint(y.collect())\nprint(z.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.zipWithIndex\">\n<img align=left src=\"files/images/pyspark-page66.svg\" width=500 height=500 />\n</a>",
"# zipWithIndex\nx = sc.parallelize(['B','A','A'],2)\ny = x.zipWithIndex()\nprint(x.glom().collect())\nprint(y.collect())",
"<a href=\"http://spark.apache.org/docs/1.2.0/api/python/pyspark.html#pyspark.RDD.zipWithUniqueId\">\n<img align=left src=\"files/images/pyspark-page67.svg\" width=500 height=500 />\n</a>",
"# zipWithUniqueId\nx = sc.parallelize(['B','A','A'],2)\ny = x.zipWithUniqueId()\nprint(x.glom().collect())\nprint(y.collect())\n\n# stop the spark context\nsc.stop()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
vravishankar/Jupyter-Books
|
Lists.ipynb
|
mit
|
[
"Lists\nLists are constructed with square brackets with elements separated by a comma.\nLists are mutable, meaning the individual items in the list can be changed.\nExample 1",
"vowels = ['a','e','i','o','u']\nprint(vowels)",
"Example 2\nLists can also hold multiple object types",
"list1 = [1,'a',\"This is a list\",5.25]\nprint(list1)",
"Example 3",
"# Find the length of the list\nlen(list1)",
"Example 4 - Slicing & Indexing",
"# Get the element using the index\nprint(list1[0])\nprint(list1[2])\n\n# Grab index 1 and everything after it\nprint(list1[1:])\n\n# Grab the element from index position 1 to 3 (1 less than given)\nprint(list1[1:3])\n\n# Grab elements upto 3rd item\nprint(list1[:3])\n\n# Grab the last item in the list\nprint(list1[-1])\n\nprint(list1[-1:])\n\n# Third parameter is the jump parameter\nlist2 = ['a','b','c','d','e','f','g']\nlist2[1:4:2]\n\n# Concatenate Elements\nlist2 + [\"added\"]\n\n# unless reassigned the added item is not permanently added\nlist2\n\nlist2 = list2 + [\"added permanently\"]\nlist2\n\nlist3 = list2 * 2\nlist3",
"List Methods",
"list4 = ['a','b','d','e']\nlist4.insert(2,'c')\nlist4\n\nlist4.append(['f','g'])\nlist4\n\npopped_item = list4.pop()\npopped_item\n\nprint(list4)\n\n# sort elements\nlist4.sort()\nlist4\n\n# reverse elements\nlist4.reverse()\nlist4\n\nlist4.remove('a')\nlist4\n\nlist5 = ['a','f']\nlist4.extend(list5)\nlist4\n\ndel list4[2]\nlist4\n\n# count the items\nlist4.count('a')\n\n# check if item exists in a list\nif 'a' in list4:\n print('found')\n\nprint(list4.index('a'))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
jlawman/jlawman.github.io
|
content/deep-learning/Activation Functions.ipynb
|
mit
|
[
"Deep Learning activation functions examined below: \n1. ReLU\n2. Leaky ReLU\n3. sigmoid\n4. tanh\nActivation plotting pleminaries",
"import matplotlib.pyplot as plt\nimport numpy as np\n%matplotlib inline\n\n#Create array of possible z values\nz = np.linspace(-5,5,num=1000)\n\ndef draw_activation_plot(a,quadrants=2,y_ticks=[0],two_quad_y_lim=[0,5], four_quad_y_lim=[-1,1]): \n \"\"\"\n Draws plot of activation function\n \n Parameters\n ----------\n a : Output of activation function over domain z.\n \n quadrants: The number of quadrants in the plot (options: 2 or 4)\n \n y_ticks: Ticks to show on the y-axis.\n \n two_quad_y_lim: The limit of the y axis for 2 quadrant plots.\n \n four_quad_y_lim: The limit of the y axis for 4 quadrant plots.\n \n \"\"\"\n #Create figure and axis\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n\n #Move left axis \n ax.spines['left'].set_position('center')\n \n #Remove top and right axes\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n\n #Set x and y labels\n plt.xlabel('z')\n plt.ylabel('a')\n \n #Set ticks\n plt.xticks([])\n plt.yticks(y_ticks)\n \n #Set ylim\n plt.ylim(two_quad_y_lim)\n \n #4 Quadrant conditions\n if quadrants==4:\n #Move up bottom axis\n ax.spines['bottom'].set_position('center')\n \n #Move x and y labels for readability\n ax.yaxis.set_label_coords(.48,.75)\n ax.xaxis.set_label_coords(.75,.48)\n \n ##Set y_lim for 4 quadrant graphs\n plt.ylim(four_quad_y_lim)\n\n #Plot z vs. activation function\n plt.plot(z,a);",
"1. ReLU\nA great default choice for hidden layers. It is frequently used in industry and is almost always adequete to solve a problem.\nAlthough this graph is not differentiable at z=0, it is not usually a problem in practice since an exact value of 0 is rare. The derivative at z=0 can usually be set to 0 or 1 without a problem.",
"relu = np.maximum(z,0)\n\ndraw_activation_plot(relu)",
"2. Leaky ReLU\nCan be better than ReLU, but it is used less often in practice. \nIt provides a differentiable point at 0 to address the concern mentioned above.",
"leaky_ReLU = np.maximum(0.01*z,z)\n\ndraw_activation_plot(leaky_ReLU)",
"3. sigmoid\nAlmost never used except in output layer when dealing with binary classification. It's most useful feature is that it guarentees an output between 0 and 1.\nHowever, when z is very small or very large, the derivative of the sigmoid function is very small which can slow down gradient descent.",
"sigmoid = 1/(1+np.exp(-z))\n\ndraw_activation_plot(sigmoid,y_ticks=[0,1], two_quad_y_lim=[0,1])",
"4. tanh\nThis is essentially a shifted version of the sigmoid function which is usually strictly better. The mean of activations is closer to 0 which makes training on centered data easier. tanh is also a great default choice for hidden layers.",
"tanh = (np.exp(z)-np.exp(-z))/(np.exp(z)+np.exp(-z))\n\ndraw_activation_plot(tanh,y_ticks=[-1,0,1],quadrants=4)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ecell/ecell4-notebooks
|
en/tutorials/tutorial05.ipynb
|
gpl-2.0
|
[
"5. How to Log and Visualize Simulations\nHere we explain how to take a log of simulation results and how to visualize it.",
"%matplotlib inline\nimport math\nfrom ecell4.prelude import *",
"5.1. Logging Simulations with Observers\nE-Cell4 provides special classes for logging, named Observer. Observer class is given when you call the run function of Simulator.",
"def create_simulator(f=gillespie.Factory()):\n m = NetworkModel()\n A, B, C = Species('A', 0.005, 1), Species('B', 0.005, 1), Species('C', 0.005, 1)\n m.add_species_attribute(A)\n m.add_species_attribute(B)\n m.add_species_attribute(C)\n m.add_reaction_rule(create_binding_reaction_rule(A, B, C, 0.01))\n m.add_reaction_rule(create_unbinding_reaction_rule(C, A, B, 0.3))\n \n w = f.world()\n w.bind_to(m)\n w.add_molecules(C, 60)\n \n sim = f.simulator(w)\n sim.initialize()\n return sim",
"One of most popular Observer is FixedIntervalNumberObserver, which logs the number of molecules with the given time interval. FixedIntervalNumberObserver requires an interval and a list of serials of Species for logging.",
"obs1 = FixedIntervalNumberObserver(0.1, ['A', 'B', 'C'])\nsim = create_simulator()\nsim.run(1.0, obs1)",
"data function of FixedIntervalNumberObserver returns the data logged.",
"print(obs1.data())",
"targets() returns a list of Species, which you specified as an argument of the constructor.",
"print([sp.serial() for sp in obs1.targets()])",
"NumberObserver logs the number of molecules after every steps when a reaction occurs. This observer is useful to log all reactions, but not available for ode.",
"obs1 = NumberObserver(['A', 'B', 'C'])\nsim = create_simulator()\nsim.run(1.0, obs1)\nprint(obs1.data())",
"TimingNumberObserver allows you to give the times for logging as an argument of its constructor.",
"obs1 = TimingNumberObserver([0.0, 0.1, 0.2, 0.5, 1.0], ['A', 'B', 'C'])\nsim = create_simulator()\nsim.run(1.0, obs1)\nprint(obs1.data())",
"run function accepts multile Observers at once.",
"obs1 = NumberObserver(['C'])\nobs2 = FixedIntervalNumberObserver(0.1, ['A', 'B'])\nsim = create_simulator()\nsim.run(1.0, [obs1, obs2])\nprint(obs1.data())\nprint(obs2.data())",
"FixedIntervalHDF5Observedr logs the whole data in a World to an output file with the fixed interval. Its second argument is a prefix for output filenames. filename() returns the name of a file scheduled to be saved next. At most one format string like %02d is allowed to use a step count in the file name. When you do not use the format string, it overwrites the latest data to the file.",
"obs1 = FixedIntervalHDF5Observer(0.2, 'test%02d.h5')\nprint(obs1.filename())\nsim = create_simulator()\nsim.run(1.0, obs1) # Now you have steped 5 (1.0/0.2) times\nprint(obs1.filename())\n\nw = load_world('test05.h5')\nprint(w.t(), w.num_molecules(Species('C')))",
"The usage of FixedIntervalCSVObserver is almost same with that of FixedIntervalHDF5Observer. It saves positions (x, y, z) of particles with the radius (r) and serial number of Species (sid) to a CSV file.",
"obs1 = FixedIntervalCSVObserver(0.2, \"test%02d.csv\")\nprint(obs1.filename())\nsim = create_simulator()\nsim.run(1.0, obs1)\nprint(obs1.filename())",
"Here is the first 10 lines in the output CSV file.",
"print(''.join(open(\"test05.csv\").readlines()[: 10]))",
"For particle simulations, E-Cell4 also provides Observer to trace a trajectory of a molecule, named FixedIntervalTrajectoryObserver. When no ParticleID is specified, it logs all the trajectories. Once some ParticleID is lost for the reaction during a simulation, it just stop to trace the particle any more.",
"sim = create_simulator(spatiocyte.Factory(0.005))\nobs1 = FixedIntervalTrajectoryObserver(0.01)\nsim.run(0.1, obs1)\n\nprint([tuple(pos) for pos in obs1.data()[0]])",
"Generally, World assumes a periodic boundary for each plane. To avoid the big jump of a particle at the edge due to the boundary condition, FixedIntervalTrajectoryObserver tries to keep the shift of positions. Thus, the positions stored in the Observer are not necessarily limited in the cuboid given for the World. To track the diffusion over the boundary condition accurately, the step interval for logging must be small enough. Of course, you can disable this option. See help(FixedIntervalTrajectoryObserver).\n5.2. Visualization of Data Logged\nIn this section, we explain the visualization tools for data logged by Observer.\nFirstly, for time course data, plotting.plot_number_observer plots the data provided by NumberObserver, FixedIntervalNumberObserver and TimingNumberObserver. For the detailed usage of plotting.plot_number_observer, see help(plotting.plot_number_observer).",
"obs1 = NumberObserver(['C'])\nobs2 = FixedIntervalNumberObserver(0.1, ['A', 'B'])\nsim = create_simulator()\nsim.run(10.0, [obs1, obs2])\n\nplotting.plot_number_observer(obs1, obs2, step=True)",
"You can set the style for plotting, and even add an arbitrary function to plot.",
"plotting.plot_number_observer(obs1, '-', obs2, ':', lambda t: 60 * (1 + 2 * math.exp(-0.9 * t)) / (2 + math.exp(-0.9 * t)), '--', step=True)",
"Plotting in the phase plane is also available by specifing the x-axis and y-axis.",
"plotting.plot_number_observer(obs2, 'o', x='A', y='B')",
"For spatial simulations, to visualize the state of World, plotting.plot_world is available. This function plots the points of particles in three-dimensional volume in the interactive way. You can save the image by clicking a right button on the drawing region.",
"sim = create_simulator(spatiocyte.Factory(0.005))\nplotting.plot_world(sim.world())",
"You can also make a movie from a series of HDF5 files, given as a FixedIntervalHDF5Observer. plotting.plot_movie requires an extra library, ffmpeg.",
"sim = create_simulator(spatiocyte.Factory(0.005))\nobs1 = FixedIntervalHDF5Observer(0.02, 'test%02d.h5')\nsim.run(1.0, obs1)\nplotting.plot_movie(obs1)",
"Finally, corresponding to FixedIntervalTrajectoryObserver, plotting.plot_trajectory provides a visualization of particle trajectories.",
"sim = create_simulator(spatiocyte.Factory(0.005))\nobs1 = FixedIntervalTrajectoryObserver(1e-3)\nsim.run(1, obs1)\nplotting.plot_trajectory(obs1)",
"show internally calls these plotting functions corresponding to the given observer. Thus, you can do simply as follows:",
"show(obs1)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
bspalding/research_public
|
lectures/drafts/Measures of Dispersion.ipynb
|
apache-2.0
|
[
"Dispersion\nBy Evgenia \"Jenny\" Nitishinskaya and Delaney Granizo-Mackenzie\nNotebook released under the Creative Commons Attribution 4.0 License.\n\nDispersion measures how spread out a set of data is. This corresponds to risk when our data set is returns over time. Data with low dispersion is heavily clustered around the mean, while high dispersion a indicates many very large and very small values.\nLet's generate an array of random integers to work with.",
"import numpy as np\nimport math\n\nnp.random.seed(121)\nX = np.sort(np.random.randint(100, size=20))\nprint 'X:', X\nmu = np.mean(X)\nprint 'Mean of X:', mu",
"Range\nRange is simply the difference between the maximum and minimum values in a dataset. Not surprisingly, it is very sensitive to outliers.",
"print 'Range of X:', np.ptp(X)",
"Mean absolute deviation\nThe mean absolute deviation is the average of the distances of observations from the arithmetic mean. We use the absolute value of the deviation, so that 5 above the mean and 5 below the mean both contribute 5, because otherwise the deviations always sum to 0.\n$$ MAD = \\frac{\\sum_{i=1}^n |X_i - \\mu|}{n} $$\nwhere $n$ is the number of observations and $\\mu$ is their mean.",
"abs_dispersion = [abs(mu - x) for x in X]\nMAD = sum(abs_dispersion)/len(abs_dispersion)\nprint 'Mean absolute deviation of X:', MAD",
"Variance and standard deviation\nThe variance $\\sigma^2$ is defined as the average of the squared deviations around the mean:\n$$ \\sigma^2 = \\frac{\\sum_{i=1}^n (X_i - \\mu)^2}{n} $$\nThis is sometimes more convenient than the mean absolute deviation because absolute value is not differentiable, while squaring is smooth, and some optimization algorithms rely on differentiability.\nStandard deviation is defined as the square root of the variance, $\\sigma$, and it is the easier of the two to interpret because it is in the same units as the observations.",
"print 'Variance of X:', np.var(X)\nprint 'Standard deviation of X:', np.std(X)",
"One way to interpret standard deviation is by referring to Chebyshev's inequality. This tells us that the proportion of samples within $k$ standard deviations (that is, within a distance of $k \\cdot$ standard deviation) of the mean is at least $1 - 1/k^2$ for all $k>1$.\nLet's check that this is true for our data set.",
"k = 1.25\ndist = k*np.std(X)\nl = [x for x in X if abs(x - mu) <= dist]\nprint 'Observations within', k, 'stds of mean:', l\nprint 'Confirming that', float(len(l))/len(X), '>', 1 - 1/k**2",
"The bound given by Chebyshev's inequality seems fairly loose in this case. This bound is rarely strict, but it is useful because it holds for all data sets and distributions.\nSemivariance and semideviation\nAlthough variance and standard deviation tell us how volatile a quantity is, they do not differentiate between deviations upward and deviations downward. Often, such as in the case of returns on an asset, we are more worried about deviations downward. This is addressed by semivariance and semideviation, which only count the observations that fall below the mean. Semivariance is defined as\n$$ \\frac{\\sum_{X_i < \\mu} (X_i - \\mu)^2}{n_<} $$\nwhere $n_<$ is the number of observations which are smaller than the mean. Semideviation is the square root of the semivariance.",
"# Because there is no built-in semideviation, we'll compute it ourselves\nlows = [e for e in X if e <= mu]\nsemivar = sum(map(lambda x: (x - mu)**2,lows))/len(lows)\n\nprint 'Semivariance of X:', semivar\nprint 'Semideviation of X:', math.sqrt(semivar)",
"A related notion is target semivariance (and target semideviation), where we average the distance from a target of values which fall below that target:\n$$ \\frac{\\sum_{X_i < B} (X_i - B)^2}{n_{<B}} $$",
"B = 19\nlows_B = [e for e in X if e <= B]\nsemivar_B = sum(map(lambda x: (x - B)**2,lows_B))/len(lows_B)\n\nprint 'Target semivariance of X:', semivar_B\nprint 'Target semideviation of X:', math.sqrt(semivar_B)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
danijel3/ASRDemos
|
notebooks/Theano_HMM.ipynb
|
apache-2.0
|
[
"Theano implementation of HMM algorithms\nThis notebook implements some of the HMM algorithms using Theano. These may be helpful for incorporating them in the graphs of some other models in Theano. These are implemented in the same manner as in the other notebook about HMMs.\nDiscrete HMM\nWe start with a toy HMM example from Rabiner's paper.\nMany improvements can be added to this (and may be added in the future):\n\n\neverything should be moved to the log-domain - for computation stability. This is especially important for GPUs which usually have a lower floating-point precision. This example here works only for the smallest examples (a few observations).\n\n\ncontinous density models - this discrete example cannot be applied to most practical examples.\n\n\ncustom topologies - this is a simple ergodic example. In practice, we'd want to be able to design a specific transition graph for a particular use.\n\n\ncombining models - real applications rely on being able to combine several models to be able to train them for specific problems. E.g. combining many tri-phone models for speech recognition. This should also include the ability to do state-tying.\n\n\nbetter training algorithms - only Baum-Welch and the simplest gradient descent are demonstrated here. There are other methods that oculd work better/faster.\n\n\nThe example below will demonstrate the code using a similar notation and equations from Rabiner's paper. First we import the required stuff:",
"import numpy as np\nimport theano\nimport theano.tensor as T\nfrom theano.ifelse import ifelse\nimport pickle\nfrom collections import OrderedDict",
"HMM class code\nHere we create the HMM class. Everything is compiled in the constructor. We also provide methods for all the individual algorithms:",
"class DiscreteHMM:\n \n def __init__(self, N=3, M=4): \n \n updates={}\n \n pi = theano.shared((np.ones(N)/N).astype(theano.config.floatX))\n a = theano.shared((np.ones((N,N))/(N*np.ones(N))).astype(theano.config.floatX))\n b = theano.shared((np.ones((N,M))/(N*np.ones(M))).astype(theano.config.floatX))\n N = theano.shared(N)\n M = theano.shared(M)\n \n self.pi=pi\n self.a=a\n self.b=b\n self.N=N\n self.M=M\n \n O = T.ivector()\n TT = O.shape[0]\n \n \n #forward algorithm:\n \n alpha0=pi*b[:,O[0]]\n \n alpha_scan,upd = theano.scan(fn=lambda O,alpha_p: T.dot(alpha_p,a)*b[:,O],\n sequences=O[1:],\n outputs_info=alpha0)\n \n updates.update(upd)\n \n alpha=T.concatenate((alpha0.reshape((1,N)),alpha_scan)) \n \n #backward algorithm:\n \n beta0=T.ones(N).astype(theano.config.floatX)\n \n beta_scan,upd = theano.scan(fn=lambda O,beta_p: T.dot(beta_p*b[:,O],a.T),\n sequences=O[1:],\n outputs_info=beta0,\n go_backwards=True)\n updates.update(upd)\n \n beta=T.concatenate((beta_scan[::-1],beta0.reshape((1,N)))) \n \n #full model probability:\n \n full_prob = alpha_scan[-1].sum() \n \n #forward-backward probabilities:\n \n gamma=alpha*beta/full_prob \n \n #viterbi algorithm:\n \n def viterbi_rec_step(O, delta_p, phi_p):\n m=delta_p*a.T\n phi=m.argmax(axis=1)\n delta=m[T.arange(N),phi]*b[:,O]\n return delta,phi\n \n phi0=T.zeros(N).astype('int64')\n\n [delta_scan, phi_scan], upd = theano.scan(fn=viterbi_rec_step,\n sequences=O[1:],\n outputs_info=[alpha0,phi0]) \n \n updates.update(upd)\n \n QT=phi_scan[-1].argmax() \n vite_prob = delta_scan[-1,QT]\n \n Q_scan, upd = theano.scan(fn=lambda phi, Q: phi[Q],\n sequences=phi_scan,\n outputs_info=QT,\n go_backwards=True)\n \n updates.update(upd)\n \n Q=T.concatenate((Q_scan[::-1],QT.reshape((1,))))\n \n #transition probabilities\n \n xi=alpha[:-1].reshape((TT-1,N,1))*a.reshape((1,N,N))*b[:,O[1:]].T.reshape((TT-1,1,N))*beta[1:].reshape((TT-1,1,N))/full_prob\n \n #expected values\n \n exp_pi=gamma[0]\n \n exp_a=xi.sum(axis=0)/gamma[:-1].sum(axis=0).reshape((N,1))\n \n exp_b_map, upd = theano.map(fn=lambda k: T.sum(gamma[T.eq(O,k).nonzero()],axis=0)/T.sum(gamma,axis=0), \n sequences=T.arange(M))\n \n updates.update(upd)\n \n exp_b = exp_b_map.T\n \n exp_err = T.concatenate(((pi-exp_pi).ravel(),(a-exp_a).ravel(),(b-exp_b).ravel()))\n \n exp_mean_err = T.mean(exp_err**2)\n \n #Baum-Welch updates:\n \n baum_welch_updates=OrderedDict()\n exp_updates={pi:exp_pi,a:exp_a,b:exp_b}\n baum_welch_updates.update(updates)\n baum_welch_updates.update(exp_updates)\n \n #Gradient descent:\n \n pi_grad=T.grad(cost=full_prob,wrt=pi)\n a_grad=T.grad(cost=full_prob,wrt=a)\n b_grad=T.grad(cost=full_prob,wrt=b)\n \n lr=T.scalar()\n \n pi_upd=pi*(pi_grad**lr)\n norm_pi_upd=pi_upd/pi_upd.sum()\n \n a_upd=a*(a_grad**lr)\n norm_a_upd=(a_upd.T/a_upd.sum(axis=1)).T\n \n b_upd=b*(b_grad**lr)\n norm_b_upd=b_upd/b_upd.sum(axis=0)\n \n gd_updates=OrderedDict()\n grad_updates={pi:norm_pi_upd,\n a:norm_a_upd,\n b:norm_b_upd}\n gd_updates.update(updates)\n gd_updates.update(grad_updates) \n \n #function definitions\n \n self.forward_fun = theano.function(inputs=[O], outputs=alpha, updates=updates)\n \n self.backward_fun = theano.function(inputs=[O], outputs=beta, updates=updates)\n \n self.full_prob_fun = theano.function(inputs=[O], outputs=full_prob, updates=updates)\n \n self.gamma_fun = theano.function(inputs=[O], outputs=gamma, updates=updates)\n \n self.viterbi_fun = theano.function(inputs=[O], outputs=[Q,vite_prob], updates=updates)\n \n self.xi_fun = theano.function(inputs=[O], outputs=xi, updates=updates) \n \n self.exp_fun = theano.function(inputs=[O], outputs=[exp_pi,exp_a,exp_b], updates=updates)\n \n self.baum_welch_fun = theano.function(inputs=[O], outputs=[full_prob,exp_mean_err], updates=baum_welch_updates)\n \n self.gd_fun = theano.function(inputs=[O,lr], outputs=full_prob, updates=gd_updates) \n \n def setModel(self,pi,a,b,N,M):\n \n self.pi.set_value(pi.astype(theano.config.floatX))\n self.a.set_value(a.astype(theano.config.floatX))\n self.b.set_value(b.astype(theano.config.floatX))\n self.N.set_value(N)\n self.M.set_value(M)\n \n def getModel(self):\n \n return self.pi.get_value(),self.a.get_value(),self.b.get_value(),self.N.get_value(),self.M.get_value()\n \n def forward(self, O):\n \n return self.forward_fun(O.astype('int32')) \n \n \n def backward(self, O):\n \n return self.backward_fun(O.astype('int32')) \n \n def full_prob(self, O):\n \n return self.full_prob_fun(O.astype('int32'))\n \n def gamma(self, O):\n \n return self.gamma_fun(O.astype('int32'))\n \n def viterbi(self, O):\n \n return self.viterbi_fun(O.astype('int32'))\n \n def xi(self, O):\n \n return self.xi_fun(O.astype('int32'))\n \n def exp_values(self, O):\n \n return self.exp_fun(O.astype('int32'))\n \n def baum_welch(self,O):\n \n return self.baum_welch_fun(O.astype('int32'))\n \n def gradient_descent(self,O,lr=0.01):\n \n return self.gd_fun(O.astype('int32'),lr)\n \n \n ",
"Model creation\nWe can either use the default (all equally probable) or some other random values to begin with. Here we will read the model parameters from a file created in my other notebook. It will allow us to make sure all the calculations match the ones there:",
"with open('../data/hmm.pkl') as f:\n O,pi,a,b,N,M,Time=pickle.load(f)\n \nprint 'Number of states: {}'.format(N)\nprint 'Number of observation classes: {}'.format(M)\nprint 'Number of time steps: {}'.format(Time) #T is taken by theano.tensor\nprint 'Observation sequence: {}'.format(O)\nprint 'Priors: {}'.format(pi)\nprint 'Transition matrix:\\n{}'.format(a)\nprint 'Observation probability matrix:\\n{}'.format(b)",
"Here we will contruct the HMM object. The constructor needs to compile everything and since we have a few functions, it may take a little while:",
"%time hmm=DiscreteHMM()\n\n#we can also set the model parameters\nhmm.setModel(pi,a,b,N,M)",
"Algorithms\nLet's test the methots now. You can compare the values with the ones from my other notebook:",
"print 'Forward probabilities:\\n{}'.format(hmm.forward(O))\nprint 'Backward probabilities:\\n{}'.format(hmm.backward(O))\nprint 'Full model probability: {}'.format(hmm.full_prob(O))\nprint 'Complete state probability:\\n{}'.format(hmm.gamma(O))\nseq,vite_prob=hmm.viterbi(O)\nprint 'Viterbi sequence: {} its probability {}'.format(seq,vite_prob)\nprint 'State transition probability:\\n{}'.format(hmm.xi(O))",
"Expected values",
"exp_pi,exp_a,exp_b=hmm.exp_values(O)\nprint 'Expected priors: {}'.format(exp_pi)\nprint 'Expected transitions:\\n{}'.format(exp_a)\nprint 'Expected observations:\\n{}'.format(exp_b)",
"Baum-Welch\nWe will run 15 iterations of the Baum-Welch EM reestimation here. We will also output the model probability (which should increase with each iteration) and also the mean difference between the model parameters and their expected values (which will decrease to 0 as the model converges on the optimum).",
"hmm.setModel(pi,a,b,N,M)\nfor i in range(15): \n prob,exp_err=hmm.baum_welch(O)\n print 'Iteration #{} P={} delta_exp={}'.format(i+1,prob,exp_err)",
"Gradient Descent\nSince this is Theano, we can easily implement GD using the built-in grad method. The parameters are updated by multiplying them with their gradients. The updated values have to also be renormalized to keep the stochasticity of the parameters.",
"hmm.setModel(pi,a,b,N,M)\n\nfor i in range(20):\n prob=hmm.gradient_descent(O,0.2)\n print 'Iteration #{} P={}'.format(i+1,prob)\n\nprint hmm.full_prob(O)\n\npi_n,a_n,b_n,N_n,M_n=hmm.getModel()\n\nnp.set_printoptions(suppress=True)\nprint 'PI: {}'.format(pi_n)\nprint 'A:\\n{}'.format(a_n)\nprint 'B:\\n{}'.format(b_n)\nnp.set_printoptions(suppress=False)",
"This method quickly converges to the optimum, although in this example the optimum is not a very useful model because it stays mostly in one state all the time. Having several different sequences would probably serve as a better test for this method...\nLog model\nThis is the same class above, but moved into the log domain. All the paramaters and calculations are done in the log domain.\nSome log arithmetic hints can be found here.\nIn log domain, things like multiplication and division are trivial, but simple addition, subtraction and sum become a nuisance. That is why they need to be reimplemented by pulling the values back into the normal linear domain and then taking them back after the operation is completed. So add becomes LogAddExp and sum becomes LogSumExp and so on...",
"from pylearn2.expr.basic import log_sum_exp\n\n\ndef LogDot(a,b):\n return log_sum_exp(a + b.T, axis=1)\n\ndef LogSum(a,axis=None):\n return log_sum_exp(a,axis)\n\ndef LogAdd(a,b):\n return T.log(T.exp(a)+T.exp(b))\n\ndef LogSub(a,b):\n return T.log(T.exp(a)-T.exp(b))",
"Here is the actual class in the LogDomain:",
"class LogDiscreteHMM:\n \n def __init__(self, N=3, M=4): \n \n updates={}\n \n pi = theano.shared((np.zeros(N)/N).astype(theano.config.floatX))\n a = theano.shared((np.zeros((N,N))/(N*np.ones(N))).astype(theano.config.floatX))\n b = theano.shared((np.zeros((N,M))/(N*np.ones(M))).astype(theano.config.floatX))\n N = theano.shared(N)\n M = theano.shared(M)\n \n self.pi=pi\n self.a=a\n self.b=b\n self.N=N\n self.M=M\n \n O = T.ivector()\n TT = O.shape[0]\n \n \n #forward algorithm:\n \n alpha0=pi+b[:,O[0]]\n \n alpha_scan,upd = theano.scan(fn=lambda O,alpha_p: LogDot(alpha_p,a)+b[:,O],\n sequences=O[1:],\n outputs_info=alpha0)\n \n updates.update(upd)\n \n alpha=T.concatenate((alpha0.reshape((1,N)),alpha_scan)) \n \n #backward algorithm:\n \n beta0=T.zeros(N).astype(theano.config.floatX)\n \n beta_scan,upd = theano.scan(fn=lambda O,beta_p: LogDot(beta_p+b[:,O],a.T),\n sequences=O[1:],\n outputs_info=beta0,\n go_backwards=True)\n updates.update(upd)\n \n beta=T.concatenate((beta_scan[::-1],beta0.reshape((1,N)))) \n \n #full model probability:\n \n full_prob = LogSum(alpha_scan[-1])\n \n #forward-backward probabilities:\n \n gamma=alpha+beta-full_prob \n \n #viterbi algorithm:\n \n def viterbi_rec_step(O, delta_p, phi_p):\n m=delta_p+a.T\n phi=m.argmax(axis=1)\n delta=m[T.arange(N),phi]+b[:,O]\n return delta,phi\n \n phi0=T.zeros(N).astype('int64')\n\n [delta_scan, phi_scan], upd = theano.scan(fn=viterbi_rec_step,\n sequences=O[1:],\n outputs_info=[alpha0,phi0]) \n \n updates.update(upd)\n \n QT=phi_scan[-1].argmax() \n vite_prob = delta_scan[-1,QT]\n \n Q_scan, upd = theano.scan(fn=lambda phi, Q: phi[Q],\n sequences=phi_scan,\n outputs_info=QT,\n go_backwards=True)\n \n updates.update(upd)\n \n Q=T.concatenate((Q_scan[::-1],QT.reshape((1,))))\n \n #transition probabilities\n \n xi=alpha[:-1].reshape((TT-1,N,1))+a.reshape((1,N,N))+b[:,O[1:]].T.reshape((TT-1,1,N))+beta[1:].reshape((TT-1,1,N))-full_prob\n \n #expected values\n \n exp_pi=gamma[0]\n \n exp_a=LogSum(xi,axis=0)-LogSum(gamma[:-1],axis=0).reshape((N,1))\n \n def exp_b_fun(k):\n return ifelse(T.eq(gamma[T.eq(O,k).nonzero()].shape[0],0),\n T.ones((a.shape[1],))*(-99),\n LogSum(gamma[T.eq(O,k).nonzero()],axis=0)-LogSum(gamma,axis=0))\n \n exp_b_map, upd = theano.map(fn=exp_b_fun, sequences=T.arange(M))\n \n updates.update(upd)\n \n exp_b = exp_b_map.T\n \n exp_err = T.concatenate(((np.exp(pi)-np.exp(exp_pi)).ravel(),\n (np.exp(a)-np.exp(exp_a)).ravel(),\n (np.exp(b)-np.exp(exp_b)).ravel()))\n \n exp_mean_err = T.mean(exp_err**2)\n \n #Baum-Welch updates:\n \n baum_welch_updates=OrderedDict()\n exp_updates={pi:exp_pi,a:exp_a,b:exp_b}\n baum_welch_updates.update(updates)\n baum_welch_updates.update(exp_updates)\n \n #Gradient descent:\n \n pi_grad=T.grad(cost=full_prob,wrt=pi)\n a_grad=T.grad(cost=full_prob,wrt=a)\n b_grad=T.grad(cost=full_prob,wrt=b)\n \n lr=T.scalar()\n \n pi_upd=pi+(pi_grad*lr)\n norm_pi_upd=pi_upd-LogSum(pi_upd)\n \n a_upd=a+(a_grad*lr)\n norm_a_upd=(a_upd.T-LogSum(a_upd,axis=1)).T\n \n b_upd=b+(b_grad*lr)\n norm_b_upd=b_upd-LogSum(b_upd,axis=0)\n \n gd_updates=OrderedDict()\n grad_updates={pi:norm_pi_upd,\n a:norm_a_upd,\n b:norm_b_upd}\n gd_updates.update(updates)\n gd_updates.update(grad_updates) \n \n #function definitions\n \n self.forward_fun = theano.function(inputs=[O], outputs=alpha, updates=updates)\n \n self.backward_fun = theano.function(inputs=[O], outputs=beta, updates=updates)\n \n self.full_prob_fun = theano.function(inputs=[O], outputs=full_prob, updates=updates)\n \n self.gamma_fun = theano.function(inputs=[O], outputs=gamma, updates=updates)\n \n self.viterbi_fun = theano.function(inputs=[O], outputs=[Q,vite_prob], updates=updates)\n \n self.xi_fun = theano.function(inputs=[O], outputs=xi, updates=updates) \n \n self.exp_fun = theano.function(inputs=[O], outputs=[exp_pi,exp_a,exp_b], updates=updates)\n \n self.baum_welch_fun = theano.function(inputs=[O], outputs=[full_prob,exp_mean_err], updates=baum_welch_updates)\n \n self.gd_fun = theano.function(inputs=[O,lr], outputs=full_prob, updates=gd_updates)\n \n def setModel(self,pi,a,b,N,M):\n \n self.pi.set_value(pi.astype(theano.config.floatX))\n self.a.set_value(a.astype(theano.config.floatX))\n self.b.set_value(b.astype(theano.config.floatX))\n self.N.set_value(N)\n self.M.set_value(M)\n \n def getModel(self):\n \n return self.pi.get_value(),self.a.get_value(),self.b.get_value(),self.N.get_value(),self.M.get_value()\n \n \n def forward(self, O):\n \n return self.forward_fun(O.astype('int32')) \n \n \n def backward(self, O):\n \n return self.backward_fun(O.astype('int32')) \n \n def full_prob(self, O):\n \n return self.full_prob_fun(O.astype('int32'))\n \n def gamma(self, O):\n \n return self.gamma_fun(O.astype('int32'))\n \n def viterbi(self, O):\n \n return self.viterbi_fun(O.astype('int32'))\n \n def xi(self, O):\n \n return self.xi_fun(O.astype('int32'))\n \n def exp_values(self, O):\n \n return self.exp_fun(O.astype('int32'))\n \n def baum_welch(self,O):\n \n return self.baum_welch_fun(O.astype('int32'))\n \n def gradient_descent(self,O,lr=0.01):\n \n return self.gd_fun(O.astype('int32'),lr) \n ",
"Here we construct the object. It is not much more complicated than the one above:",
"%time loghmm=LogDiscreteHMM()",
"Since all the parameters are in the log domain, we have to take logarithms of all the values that were used above:",
"loghmm.setModel(np.log(pi),np.log(a),np.log(b),N,M) ",
"And we have to compute the exponential of the results to get back into the normal domain. Nevertheless, the results are the same as above:",
"print 'Forward probabilities:\\n{}'.format(np.exp(loghmm.forward(O)))\nprint 'Backward probabilities:\\n{}'.format(np.exp(loghmm.backward(O)))\nprint 'Full model probability: {}'.format(np.exp(loghmm.full_prob(O)))\nprint 'Complete state probability:\\n{}'.format(np.exp(loghmm.gamma(O)))\nseq,vite_prob=loghmm.viterbi(O)\nprint 'Viterbi sequence: {} its probability {}'.format(seq,np.exp(vite_prob))\nprint 'State transition probability:\\n{}'.format(np.exp(loghmm.xi(O)))",
"The expected values for Baum-Welch are also correct:",
"exp_pi,exp_a,exp_b=loghmm.exp_values(O)\nprint 'Expected priors: {}'.format(np.exp(exp_pi))\nprint 'Expected transitions:\\n{}'.format(np.exp(exp_a))\nprint 'Expected observations:\\n{}'.format(np.exp(exp_b))",
"And the Baum-Welch procedure works the same as well. The only exception here is that the exp_err value is not retrieved in the log domain, since it's more convinient this way:",
"loghmm.setModel(np.log(pi),np.log(a),np.log(b),N,M)\nfor i in range(15): \n prob,exp_err=loghmm.baum_welch(O)\n print 'Iteration #{} P={} delta_exp={}'.format(i+1,np.exp(prob),exp_err)",
"Finally, gradient descent works similarly to the one above:",
"loghmm.setModel(np.log(pi),np.log(a),np.log(b),N,M)\n\nfor i in range(20):\n prob=loghmm.gradient_descent(O,0.2)\n print 'Iteration #{} P={}'.format(i+1,np.exp(prob))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
letsgoexploring/teaching
|
winter2017/econ129/python/Econ129_Class_17.ipynb
|
mit
|
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport linearsolve as ls\n%matplotlib inline",
"Class 17: A Centralized Real Business Cycle Model without Labor\nThe Model\nSetup\nA representative household lives for an infinite number of periods. The expected present value of lifetime utility to the household from consuming $C_0, C_1, C_2, \\ldots $ is denoted by $U_0$:\n\\begin{align}\nU_0 & = \\log (C_0) + \\beta E_0 \\log (C_1) + \\beta^2 E_0 \\log (C_2) + \\cdots\\\n& = E_0\\sum_{t = 0}^{\\infty} \\beta^t \\log (C_t),\n\\end{align}\nwhere $0<\\beta<1$ is the household's subjective discount factor. $E_0$ denotes the expectation with respect to all information available as of date 0.\nThe household enters period 0 with capital $K_0>0$. Production in period $t$:\n\\begin{align}\nF(A_t,K_t) & = A_t K_t^{\\alpha}\n\\end{align}\nwhere TFP $A_t$ is stochastic:\n\\begin{align}\n\\log A_{t+1} & = \\rho \\log A_t + \\epsilon_{t+1}\n\\end{align}\nCapital depreciates at the constant rate $\\delta$ per period and so the household's resource constraint in each period $t$ is:\n\\begin{align}\nC_t + K_{t+1} & = A_t K_{t}^{\\alpha} + (1-\\delta)K_t\n\\end{align}\nOptimization problem\nIn period 0, the household solves:\n\\begin{align}\n& \\max_{C_0,K_1} \\; E_0\\sum_{t=0}^{\\infty}\\beta^t\\log (C_t) \\\n& \\; \\; \\; \\; \\; \\; \\; \\; \\text{s.t.} \\; \\; \\; \\; C_t + K_{t+1} = A_t K_{t}^{\\alpha} + (1-\\delta)K_t\n\\end{align}\nwhich can be written as a choice of $K_1$ only:\n\\begin{align}\n\\max_{K_1} \\; E_0\\sum_{t=0}^{\\infty}\\beta^t\\log \\left( A_t K_{t}^{\\alpha} + (1-\\delta)K_t - K_{t+1}\\right)\n\\end{align}\nEquilibrium\nSo given $K_0>0$ and $A_0$, the equilibrium paths for consumption, capital, and TFP are described described by:\n\\begin{align}\n\\frac{1}{C_t} & = \\beta E_t \\left[\\frac{\\alpha A_{t+1}K_{t+1}^{\\alpha - 1} + 1 - \\delta}{C_{t+1}}\\right]\\\nC_t + K_{t+1} & = A_{t} K_t^{\\alpha} + (1-\\delta) K_t\\\n\\log A_{t+1} & = \\rho \\log A_t + \\epsilon_{t+1}\n\\end{align}\nCalibration\nFor computation purposes, assume the following values for the parameters of the model:\n\\begin{align}\n\\beta & = 0.99\\\n\\rho & = .75\\\n\\sigma & = 0.006\\\n\\alpha & = 0.35\\\n\\delta & = 0.025\n\\end{align}\nSteady State\nThe steady state:\n\\begin{align}\nA & = 1\\\nK & = \\left(\\frac{\\alpha A}{\\beta^{-1} - 1 + \\delta} \\right)^{\\frac{1}{1-\\alpha}}\\\nC & = AK^{\\alpha} - \\delta K\n\\end{align}",
"# 1. Input model parameters and print\n\n\n# 2. Compute the steady state of the model directly\n\n\n# 3. Define a function that evaluates the equilibrium conditions\ndef equilibrium_equations(variables_forward,variables_current,parameters):\n \n # Parameters \n p = parameters\n \n # Variables\n fwd = variables_forward\n cur = variables_current\n \n # Resource constraint\n resource = cur.a*cur.k**p.alpha + (1-p.delta)* cur.k - fwd.k - cur.c\n \n # Exogenous tfp\n\n \n # Euler equation\n\n \n # Stack equilibrium conditions into a numpy array\n return np.array([\n\n ])\n\n# 4. Initialize the model\nmodel = ls.model(equations = equilibrium_equations,\n nstates=,\n varNames=[], # Any order as long as the state variables are named first\n shockNames=[], # Name a shock for each state variable *even if there is no corresponding shock in the model*\n parameters = parameters)\n\n# 5. Set the steady state of the model directly. \nmodel.set_ss([])\n\n# 6. Find the log-linear approximation around the non-stochastic steady state and solve\nmodel.approximate_and_solve()\n\n# 7. Print the approximated model in terms of log-deviations from the stady state\nprint(model.approximated())\n\n# 8(a) Compute impulse responses and print the computed impulse responses\nmodel.impulse(T=41,t0=5,shock=None)\nprint(model.irs['eA'].head(10))\n\n# 8(b) Plot the computed impulse responses to a TFP shock\nfig = plt.figure(figsize=(12,4))\n\nax1 = fig.add_subplot(1,2,1)\nmodel.irs['eA'][['a','k','c']].plot(lw='5',alpha=0.5,grid=True,ax = ax1).legend(loc='upper right',ncol=3)\n\nax2 = fig.add_subplot(1,2,2)\nmodel.irs['eA'][['eA','a']].plot(lw='5',alpha=0.5,grid=True,ax = ax2).legend(loc='upper right',ncol=2)\n\n# 9(a) Compute stochastic simulation and print the simulated values\nmodel.stoch_sim(seed=192,covMat= [[parameters['sigma']**2,0],[0,0]])\nprint(model.simulated.head(10))\n\n# 9(b) Plot the computed stochastic simulation\nfig = plt.figure(figsize=(12,4))\n\nax1 = fig.add_subplot(1,2,1)\nmodel.simulated[['a','c','k']].plot(lw=5,alpha=0.5,grid=True,ax = ax1).legend(loc='upper right',ncol=3)\n\nax2 = fig.add_subplot(1,2,2)\nmodel.simulated[['eA','a']].plot(lw=5,alpha=0.5,grid=True,ax = ax2).legend(loc='upper right',ncol=2)",
"Add Output and Investment\nRecall the three equilibrium conditions fo the model:\n\\begin{align}\n\\frac{1}{C_t} & = \\beta E_t \\left[\\frac{\\alpha A_{t+1}K_{t+1}^{\\alpha - 1} + 1 - \\delta}{C_{t+1}}\\right]\\\nC_t + K_{t+1} & = A_{t} K_t^{\\alpha} + (1-\\delta) K_t\\\n\\log A_{t+1} & = \\rho \\log A_t + \\epsilon_{t+1}\n\\end{align}\nAppend two more equations that determine output and investment:\n\\begin{align}\nY_t & = A_t K_t^{\\alpha}\\\nI_t & = K_{t+1} - (1-\\delta)K_t\n\\end{align}\nRecompute the model using the same parameters from above",
"# 1. Compute the steady state values of Y and I\n\n\n# 2. Define a function that evaluates the equilibrium conditions\n\n\n# 3. Initialize the model\n\n\n# 4. Set the steady state of the model directly. \n\n\n# 5. Find the log-linear approximation around the non-stochastic steady state and solve\n\n\n# 6(a) Compute stochastic simulation and print the simulated values\nmodel.stoch_sim(seed=192,covMat= [[parameters['sigma'],0],[0,0]])\nprint(model.simulated.head(10))\n\n# 6(b) Plot the computed stochastic simulation",
"Evaluation",
"# Compute the standard deviations of A, Y, C, and I\n\n\n# Compute the coefficients of correlation for A, Y, C, and I\n"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
hpi-epic/pricewars-merchant
|
docs/Working with Kafka data.ipynb
|
mit
|
[
"Working with Kafka data\nDuring a simulation, the producer and the marketplace are constantly logging sales and the activity on the market to Kafka. These information are organised in topics. In order to estimate customer demand and predict good prices, merchants can use the Kafka API to access this data.\nThe merchants gets the data in form of a pandas DataFrame.\nIf you want to try the following examples, make sure that the Pricewars plattform is running.\nEither by deploying them individually or by using the docker setup.\nThe following step is specific for this notebook.\nIt is not necessary if your merchant is in the repository root.",
"import sys\nsys.path.append('../')",
"Initialize Kafka API\nYou need a merchant token to use the Kafka API.\nTo get one, register the merchant at the marketplace.",
"from api import Marketplace\nmarketplace = Marketplace()\nregistration = marketplace.register(\n 'http://nobody:55000/',\n merchant_name='kafka_notebook_merchant',\n algorithm_name='human')\n\nregistration",
"It was not possible to connect to the marketplace if you got the following error:\nConnectionError: HTTPConnectionPool(host='marketplace', port=8080)\nIn that case, make sure that the marketplace is running and host and port are correct.\nIf host or port are wrong, you can change it by creating a marketplace object with the host argument:\nmarketplace = Marketplace(host='www.another_host.com:1234')\nSame is true for the upcoming Kafka API \nNext, initialize the Kafka API:",
"from api import Kafka\nkafka = Kafka(token=registration.merchant_token)",
"Request topic\nYou can request data for specific topics. The most important topics are buyOffer which contains your own sales and marketSituation which contains a history of market situations.\nThe call will return the data in form of a pandas DataFrame.\nDepending on how active the simulation is and how much data is logged, this can take some time.",
"sales_data = kafka.download_topic_data('buyOffer')\nsales_data.head()",
"This method may return None if it was not possible to obtain the data. For example, this happens if the merchant doesn't have any sales.",
"len(sales_data)\n\nmarket_situations = kafka.download_topic_data('marketSituation')\nprint(len(market_situations))\nmarket_situations.head()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
nteract/papermill
|
papermill/tests/notebooks/gcs/gcs_in/gcs-simple_notebook.ipynb
|
bsd-3-clause
|
[
"Running Code\nFirst and foremost, the Jupyter Notebook is an interactive environment for writing and running code. The notebook is capable of running code in a wide range of languages. However, each notebook is associated with a single kernel. This notebook is associated with the IPython kernel, therefore runs Python code.\nCode cells allow you to enter and run code\nRun a code cell using Shift-Enter or pressing the <button class='btn btn-default btn-xs'><i class=\"icon-step-forward fa fa-step-forward\"></i></button> button in the toolbar above:",
"a = 10\n\nprint(a)",
"There are two other keyboard shortcuts for running code:\n\nAlt-Enter runs the current cell and inserts a new one below.\nCtrl-Enter run the current cell and enters command mode.\n\nManaging the Kernel\nCode is run in a separate process called the Kernel. The Kernel can be interrupted or restarted. Try running the following cell and then hit the <button class='btn btn-default btn-xs'><i class='icon-stop fa fa-stop'></i></button> button in the toolbar above.",
"import time\ntime.sleep(10)",
"If the Kernel dies you will be prompted to restart it. Here we call the low-level system libc.time routine with the wrong argument via\nctypes to segfault the Python interpreter:",
"import sys\nfrom ctypes import CDLL\n# This will crash a Linux or Mac system\n# equivalent calls can be made on Windows\n\n# Uncomment these lines if you would like to see the segfault\n\n# dll = 'dylib' if sys.platform == 'darwin' else 'so.6'\n# libc = CDLL(\"libc.%s\" % dll) \n# libc.time(-1) # BOOM!!",
"Cell menu\nThe \"Cell\" menu has a number of menu items for running code in different ways. These includes:\n\nRun and Select Below\nRun and Insert Below\nRun All\nRun All Above\nRun All Below\n\nRestarting the kernels\nThe kernel maintains the state of a notebook's computations. You can reset this state by restarting the kernel. This is done by clicking on the <button class='btn btn-default btn-xs'><i class='fa fa-repeat icon-repeat'></i></button> in the toolbar above.\nsys.stdout and sys.stderr\nThe stdout and stderr streams are displayed as text in the output area.",
"print(\"hi, stdout\")\n\nprint('hi, stderr', file=sys.stderr)",
"Output is asynchronous\nAll output is displayed asynchronously as it is generated in the Kernel. If you execute the next cell, you will see the output one piece at a time, not all at the end.",
"import time, sys\nfor i in range(8):\n print(i)\n time.sleep(0.5)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
apark263/tensorflow
|
tensorflow/contrib/eager/python/examples/generative_examples/image_captioning_with_attention.ipynb
|
apache-2.0
|
[
"Copyright 2018 The TensorFlow Authors.\nLicensed under the Apache License, Version 2.0 (the \"License\").\nImage Captioning with Attention\n<table class=\"tfo-notebook-buttons\" align=\"left\"><td>\n<a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/generative_examples/image_captioning_with_attention.ipynb\">\n <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a> \n</td><td>\n<a target=\"_blank\" href=\"https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples/generative_examples/image_captioning_with_attention.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a></td></table>\n\nImage captioning is the task of generating a caption for an image. Given an image like this:\n \nImage Source, License: Public Domain\nOur goal is to generate a caption, such as \"a surfer riding on a wave\". Here, we'll use an attention-based model. This enables us to see which parts of the image the model focuses on as it generates a caption.\n\nThis model architecture below is similar to Show, Attend and Tell: Neural Image Caption Generation with Visual Attention. \nThe code uses tf.keras and eager execution, which you can learn more about in the linked guides.\nThis notebook is an end-to-end example. If you run it, it will download the MS-COCO dataset, preprocess and cache a subset of the images using Inception V3, train an encoder-decoder model, and use it to generate captions on new images.\nThe code requires TensorFlow version >=1.9. If you're running this in Colab\nIn this example, we're training on a relatively small amount of data as an example. On a single P100 GPU, this example will take about ~2 hours to train. We train on the first 30,000 captions (corresponding to about ~20,000 images depending on shuffling, as there are multiple captions per image in the dataset)",
"# Import TensorFlow and enable eager execution\n# This code requires TensorFlow version >=1.9\nimport tensorflow as tf\ntf.enable_eager_execution()\n\n# We'll generate plots of attention in order to see which parts of an image\n# our model focuses on during captioning\nimport matplotlib.pyplot as plt\n\n# Scikit-learn includes many helpful utilities\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\n\nimport re\nimport numpy as np\nimport os\nimport time\nimport json\nfrom glob import glob\nfrom PIL import Image\nimport pickle",
"Download and prepare the MS-COCO dataset\nWe will use the MS-COCO dataset to train our model. This dataset contains >82,000 images, each of which has been annotated with at least 5 different captions. The code below will download and extract the dataset automatically. \nCaution: large download ahead. We'll use the training set, it's a 13GB file.",
"annotation_zip = tf.keras.utils.get_file('captions.zip', \n cache_subdir=os.path.abspath('.'),\n origin = 'http://images.cocodataset.org/annotations/annotations_trainval2014.zip',\n extract = True)\nannotation_file = os.path.dirname(annotation_zip)+'/annotations/captions_train2014.json'\n\nname_of_zip = 'train2014.zip'\nif not os.path.exists(os.path.abspath('.') + '/' + name_of_zip):\n image_zip = tf.keras.utils.get_file(name_of_zip, \n cache_subdir=os.path.abspath('.'),\n origin = 'http://images.cocodataset.org/zips/train2014.zip',\n extract = True)\n PATH = os.path.dirname(image_zip)+'/train2014/'\nelse:\n PATH = os.path.abspath('.')+'/train2014/'",
"Optionally, limit the size of the training set for faster training\nFor this example, we'll select a subset of 30,000 captions and use these and the corresponding images to train our model. As always, captioning quality will improve if you choose to use more data.",
"# read the json file\nwith open(annotation_file, 'r') as f:\n annotations = json.load(f)\n\n# storing the captions and the image name in vectors\nall_captions = []\nall_img_name_vector = []\n\nfor annot in annotations['annotations']:\n caption = '<start> ' + annot['caption'] + ' <end>'\n image_id = annot['image_id']\n full_coco_image_path = PATH + 'COCO_train2014_' + '%012d.jpg' % (image_id)\n \n all_img_name_vector.append(full_coco_image_path)\n all_captions.append(caption)\n\n# shuffling the captions and image_names together\n# setting a random state\ntrain_captions, img_name_vector = shuffle(all_captions,\n all_img_name_vector,\n random_state=1)\n\n# selecting the first 30000 captions from the shuffled set\nnum_examples = 30000\ntrain_captions = train_captions[:num_examples]\nimg_name_vector = img_name_vector[:num_examples]\n\nlen(train_captions), len(all_captions)",
"Preprocess the images using InceptionV3\nNext, we will use InceptionV3 (pretrained on Imagenet) to classify each image. We will extract features from the last convolutional layer. \nFirst, we will need to convert the images into the format inceptionV3 expects by:\n* Resizing the image to (299, 299)\n* Using the preprocess_input method to place the pixels in the range of -1 to 1 (to match the format of the images used to train InceptionV3).",
"def load_image(image_path):\n img = tf.read_file(image_path)\n img = tf.image.decode_jpeg(img, channels=3)\n img = tf.image.resize_images(img, (299, 299))\n img = tf.keras.applications.inception_v3.preprocess_input(img)\n return img, image_path",
"Initialize InceptionV3 and load the pretrained Imagenet weights\nTo do so, we'll create a tf.keras model where the output layer is the last convolutional layer in the InceptionV3 architecture. \n* Each image is forwarded through the network and the vector that we get at the end is stored in a dictionary (image_name --> feature_vector). \n* We use the last convolutional layer because we are using attention in this example. The shape of the output of this layer is 8x8x2048. \n* We avoid doing this during training so it does not become a bottleneck. \n* After all the images are passed through the network, we pickle the dictionary and save it to disk.",
"image_model = tf.keras.applications.InceptionV3(include_top=False, \n weights='imagenet')\nnew_input = image_model.input\nhidden_layer = image_model.layers[-1].output\n\nimage_features_extract_model = tf.keras.Model(new_input, hidden_layer)",
"Caching the features extracted from InceptionV3\nWe will pre-process each image with InceptionV3 and cache the output to disk. Caching the output in RAM would be faster but memory intensive, requiring 8 * 8 * 2048 floats per image. At the time of writing, this would exceed the memory limitations of Colab (although these may change, an instance appears to have about 12GB of memory currently). \nPerformance could be improved with a more sophisticated caching strategy (e.g., by sharding the images to reduce random access disk I/O) at the cost of more code.\nThis will take about 10 minutes to run in Colab with a GPU. If you'd like to see a progress bar, you could: install tqdm (!pip install tqdm), then change this line: \nfor img, path in image_dataset: \nto:\nfor img, path in tqdm(image_dataset):.",
"# getting the unique images\nencode_train = sorted(set(img_name_vector))\n\n# feel free to change the batch_size according to your system configuration\nimage_dataset = tf.data.Dataset.from_tensor_slices(\n encode_train).map(load_image).batch(16)\n\nfor img, path in image_dataset:\n batch_features = image_features_extract_model(img)\n batch_features = tf.reshape(batch_features, \n (batch_features.shape[0], -1, batch_features.shape[3]))\n\n for bf, p in zip(batch_features, path):\n path_of_feature = p.numpy().decode(\"utf-8\")\n np.save(path_of_feature, bf.numpy())",
"Preprocess and tokenize the captions\n\nFirst, we'll tokenize the captions (e.g., by splitting on spaces). This will give us a vocabulary of all the unique words in the data (e.g., \"surfing\", \"football\", etc).\nNext, we'll limit the vocabulary size to the top 5,000 words to save memory. We'll replace all other words with the token \"UNK\" (for unknown).\nFinally, we create a word --> index mapping and vice-versa.\nWe will then pad all sequences to the be same length as the longest one.",
"# This will find the maximum length of any caption in our dataset\ndef calc_max_length(tensor):\n return max(len(t) for t in tensor)\n\n# The steps above is a general process of dealing with text processing\n\n# choosing the top 5000 words from the vocabulary\ntop_k = 5000\ntokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=top_k, \n oov_token=\"<unk>\", \n filters='!\"#$%&()*+.,-/:;=?@[\\]^_`{|}~ ')\ntokenizer.fit_on_texts(train_captions)\ntrain_seqs = tokenizer.texts_to_sequences(train_captions)\n\ntokenizer.word_index['<pad>'] = 0\n\n# creating the tokenized vectors\ntrain_seqs = tokenizer.texts_to_sequences(train_captions)\n\n# padding each vector to the max_length of the captions\n# if the max_length parameter is not provided, pad_sequences calculates that automatically\ncap_vector = tf.keras.preprocessing.sequence.pad_sequences(train_seqs, padding='post')\n\n# calculating the max_length \n# used to store the attention weights\nmax_length = calc_max_length(train_seqs)",
"Split the data into training and testing",
"# Create training and validation sets using 80-20 split\nimg_name_train, img_name_val, cap_train, cap_val = train_test_split(img_name_vector, \n cap_vector, \n test_size=0.2, \n random_state=0)\n\nlen(img_name_train), len(cap_train), len(img_name_val), len(cap_val)",
"Our images and captions are ready! Next, let's create a tf.data dataset to use for training our model.",
"# feel free to change these parameters according to your system's configuration\n\nBATCH_SIZE = 64\nBUFFER_SIZE = 1000\nembedding_dim = 256\nunits = 512\nvocab_size = len(tokenizer.word_index)\n# shape of the vector extracted from InceptionV3 is (64, 2048)\n# these two variables represent that\nfeatures_shape = 2048\nattention_features_shape = 64\n\n# loading the numpy files \ndef map_func(img_name, cap):\n img_tensor = np.load(img_name.decode('utf-8')+'.npy')\n return img_tensor, cap\n\ndataset = tf.data.Dataset.from_tensor_slices((img_name_train, cap_train))\n\n# using map to load the numpy files in parallel\n# NOTE: Be sure to set num_parallel_calls to the number of CPU cores you have\n# https://www.tensorflow.org/api_docs/python/tf/py_func\ndataset = dataset.map(lambda item1, item2: tf.py_func(\n map_func, [item1, item2], [tf.float32, tf.int32]), num_parallel_calls=8)\n\n# shuffling and batching\ndataset = dataset.shuffle(BUFFER_SIZE)\n# https://www.tensorflow.org/api_docs/python/tf/contrib/data/batch_and_drop_remainder\ndataset = dataset.batch(BATCH_SIZE)\ndataset = dataset.prefetch(1)",
"Model\nFun fact, the decoder below is identical to the one in the example for Neural Machine Translation with Attention.\nThe model architecture is inspired by the Show, Attend and Tell paper.\n\nIn this example, we extract the features from the lower convolutional layer of InceptionV3 giving us a vector of shape (8, 8, 2048). \nWe squash that to a shape of (64, 2048).\nThis vector is then passed through the CNN Encoder(which consists of a single Fully connected layer).\nThe RNN(here GRU) attends over the image to predict the next word.",
"def gru(units):\n # If you have a GPU, we recommend using the CuDNNGRU layer (it provides a \n # significant speedup).\n if tf.test.is_gpu_available():\n return tf.keras.layers.CuDNNGRU(units, \n return_sequences=True, \n return_state=True, \n recurrent_initializer='glorot_uniform')\n else:\n return tf.keras.layers.GRU(units, \n return_sequences=True, \n return_state=True, \n recurrent_activation='sigmoid', \n recurrent_initializer='glorot_uniform')\n\nclass BahdanauAttention(tf.keras.Model):\n def __init__(self, units):\n super(BahdanauAttention, self).__init__()\n self.W1 = tf.keras.layers.Dense(units)\n self.W2 = tf.keras.layers.Dense(units)\n self.V = tf.keras.layers.Dense(1)\n \n def call(self, features, hidden):\n # features(CNN_encoder output) shape == (batch_size, 64, embedding_dim)\n \n # hidden shape == (batch_size, hidden_size)\n # hidden_with_time_axis shape == (batch_size, 1, hidden_size)\n hidden_with_time_axis = tf.expand_dims(hidden, 1)\n \n # score shape == (batch_size, 64, hidden_size)\n score = tf.nn.tanh(self.W1(features) + self.W2(hidden_with_time_axis))\n \n # attention_weights shape == (batch_size, 64, 1)\n # we get 1 at the last axis because we are applying score to self.V\n attention_weights = tf.nn.softmax(self.V(score), axis=1)\n \n # context_vector shape after sum == (batch_size, hidden_size)\n context_vector = attention_weights * features\n context_vector = tf.reduce_sum(context_vector, axis=1)\n \n return context_vector, attention_weights\n\nclass CNN_Encoder(tf.keras.Model):\n # Since we have already extracted the features and dumped it using pickle\n # This encoder passes those features through a Fully connected layer\n def __init__(self, embedding_dim):\n super(CNN_Encoder, self).__init__()\n # shape after fc == (batch_size, 64, embedding_dim)\n self.fc = tf.keras.layers.Dense(embedding_dim)\n \n def call(self, x):\n x = self.fc(x)\n x = tf.nn.relu(x)\n return x\n\nclass RNN_Decoder(tf.keras.Model):\n def __init__(self, embedding_dim, units, vocab_size):\n super(RNN_Decoder, self).__init__()\n self.units = units\n\n self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)\n self.gru = gru(self.units)\n self.fc1 = tf.keras.layers.Dense(self.units)\n self.fc2 = tf.keras.layers.Dense(vocab_size)\n \n self.attention = BahdanauAttention(self.units)\n \n def call(self, x, features, hidden):\n # defining attention as a separate model\n context_vector, attention_weights = self.attention(features, hidden)\n \n # x shape after passing through embedding == (batch_size, 1, embedding_dim)\n x = self.embedding(x)\n \n # x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)\n x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)\n \n # passing the concatenated vector to the GRU\n output, state = self.gru(x)\n \n # shape == (batch_size, max_length, hidden_size)\n x = self.fc1(output)\n \n # x shape == (batch_size * max_length, hidden_size)\n x = tf.reshape(x, (-1, x.shape[2]))\n \n # output shape == (batch_size * max_length, vocab)\n x = self.fc2(x)\n\n return x, state, attention_weights\n\n def reset_state(self, batch_size):\n return tf.zeros((batch_size, self.units))\n\nencoder = CNN_Encoder(embedding_dim)\ndecoder = RNN_Decoder(embedding_dim, units, vocab_size)\n\noptimizer = tf.train.AdamOptimizer()\n\n# We are masking the loss calculated for padding\ndef loss_function(real, pred):\n mask = 1 - np.equal(real, 0)\n loss_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=real, logits=pred) * mask\n return tf.reduce_mean(loss_)",
"Training\n\nWe extract the features stored in the respective .npy files and then pass those features through the encoder.\nThe encoder output, hidden state(initialized to 0) and the decoder input (which is the start token) is passed to the decoder.\nThe decoder returns the predictions and the decoder hidden state.\nThe decoder hidden state is then passed back into the model and the predictions are used to calculate the loss.\nUse teacher forcing to decide the next input to the decoder.\nTeacher forcing is the technique where the target word is passed as the next input to the decoder.\nThe final step is to calculate the gradients and apply it to the optimizer and backpropagate.",
"# adding this in a separate cell because if you run the training cell \n# many times, the loss_plot array will be reset\nloss_plot = []\n\nEPOCHS = 20\n\nfor epoch in range(EPOCHS):\n start = time.time()\n total_loss = 0\n \n for (batch, (img_tensor, target)) in enumerate(dataset):\n loss = 0\n \n # initializing the hidden state for each batch\n # because the captions are not related from image to image\n hidden = decoder.reset_state(batch_size=target.shape[0])\n\n dec_input = tf.expand_dims([tokenizer.word_index['<start>']] * BATCH_SIZE, 1)\n \n with tf.GradientTape() as tape:\n features = encoder(img_tensor)\n \n for i in range(1, target.shape[1]):\n # passing the features through the decoder\n predictions, hidden, _ = decoder(dec_input, features, hidden)\n\n loss += loss_function(target[:, i], predictions)\n \n # using teacher forcing\n dec_input = tf.expand_dims(target[:, i], 1)\n \n total_loss += (loss / int(target.shape[1]))\n \n variables = encoder.variables + decoder.variables\n \n gradients = tape.gradient(loss, variables) \n \n optimizer.apply_gradients(zip(gradients, variables), tf.train.get_or_create_global_step())\n \n if batch % 100 == 0:\n print ('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1, \n batch, \n loss.numpy() / int(target.shape[1])))\n # storing the epoch end loss value to plot later\n loss_plot.append(total_loss / len(cap_vector))\n \n print ('Epoch {} Loss {:.6f}'.format(epoch + 1, \n total_loss/len(cap_vector)))\n print ('Time taken for 1 epoch {} sec\\n'.format(time.time() - start))\n\nplt.plot(loss_plot)\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.title('Loss Plot')\nplt.show()",
"Caption!\n\nThe evaluate function is similar to the training loop, except we don't use teacher forcing here. The input to the decoder at each time step is its previous predictions along with the hidden state and the encoder output.\nStop predicting when the model predicts the end token.\nAnd store the attention weights for every time step.",
"def evaluate(image):\n attention_plot = np.zeros((max_length, attention_features_shape))\n\n hidden = decoder.reset_state(batch_size=1)\n\n temp_input = tf.expand_dims(load_image(image)[0], 0)\n img_tensor_val = image_features_extract_model(temp_input)\n img_tensor_val = tf.reshape(img_tensor_val, (img_tensor_val.shape[0], -1, img_tensor_val.shape[3]))\n\n features = encoder(img_tensor_val)\n\n dec_input = tf.expand_dims([tokenizer.word_index['<start>']], 0)\n result = []\n\n for i in range(max_length):\n predictions, hidden, attention_weights = decoder(dec_input, features, hidden)\n\n attention_plot[i] = tf.reshape(attention_weights, (-1, )).numpy()\n\n predicted_id = tf.argmax(predictions[0]).numpy()\n result.append(tokenizer.index_word[predicted_id])\n\n if tokenizer.index_word[predicted_id] == '<end>':\n return result, attention_plot\n\n dec_input = tf.expand_dims([predicted_id], 0)\n\n attention_plot = attention_plot[:len(result), :]\n return result, attention_plot\n\ndef plot_attention(image, result, attention_plot):\n temp_image = np.array(Image.open(image))\n\n fig = plt.figure(figsize=(10, 10))\n \n len_result = len(result)\n for l in range(len_result):\n temp_att = np.resize(attention_plot[l], (8, 8))\n ax = fig.add_subplot(len_result//2, len_result//2, l+1)\n ax.set_title(result[l])\n img = ax.imshow(temp_image)\n ax.imshow(temp_att, cmap='gray', alpha=0.6, extent=img.get_extent())\n\n plt.tight_layout()\n plt.show()\n\n# captions on the validation set\nrid = np.random.randint(0, len(img_name_val))\nimage = img_name_val[rid]\nreal_caption = ' '.join([tokenizer.index_word[i] for i in cap_val[rid] if i not in [0]])\nresult, attention_plot = evaluate(image)\n\nprint ('Real Caption:', real_caption)\nprint ('Prediction Caption:', ' '.join(result))\nplot_attention(image, result, attention_plot)\n# opening the image\nImage.open(img_name_val[rid])",
"Try it on your own images\nFor fun, below we've provided a method you can use to caption your own images with the model we've just trained. Keep in mind, it was trained on a relatively small amount of data, and your images may be different from the training data (so be prepared for weird results!)",
"image_url = 'https://tensorflow.org/images/surf.jpg'\nimage_extension = image_url[-4:]\nimage_path = tf.keras.utils.get_file('image'+image_extension, \n origin=image_url)\n\nresult, attention_plot = evaluate(image_path)\nprint ('Prediction Caption:', ' '.join(result))\nplot_attention(image_path, result, attention_plot)\n# opening the image\nImage.open(image_path)",
"Next steps\nCongrats! You've just trained an image captioning model with attention. Next, we recommend taking a look at this example Neural Machine Translation with Attention. It uses a similar architecture to translate between Spanish and English sentences. You can also experiment with training the code in this notebook on a different dataset."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
TUW-GEO/pygeogrids
|
docs/examples/subsetting_grid_objects_with_shape_files.ipynb
|
mit
|
[
"Subsetting grids using shapefiles.",
"import pygeogrids.grids as grids\nimport pygeogrids.shapefile as shapefile\nimport numpy as np\nimport os\n\ntestgrid = grids.genreg_grid(0.1, 0.1)",
"We can now subset the 0.1x0.1 degree regular grid with the shapefiles from http://biogeo.ucdavis.edu/data/gadm2.8/gadm28_levels.shp.zip which were downloaded/extracted to ~/Downloads/gadm",
"austria = shapefile.get_gad_grid_points(\n testgrid, os.path.join('/home', os.environ['USER'], 'Downloads', 'gadm', 'gadm28_levels.shp.zip'), 0, name='Austria')\n",
"We can the plot the resulting grid using a simple scatterplot.",
"import matplotlib.pyplot as plt\n%matplotlib inline\nplt.scatter(austria.arrlon, austria.arrlat)",
"Behind the scenes this functionality uses the get_shp_grid_points function of the grid object.\nWe can also use this directly using any ogr.Geometry object.",
"ring = ogr.Geometry(ogr.wkbLinearRing)\nring.AddPoint(14, 47)\nring.AddPoint(14, 48)\nring.AddPoint(16, 48)\nring.AddPoint(16, 47)\nring.AddPoint(14, 47)\n\npoly = ogr.Geometry(ogr.wkbPolygon)\npoly.AddGeometry(ring)\nsubgrid = austria.get_shp_grid_points(poly)\nplt.scatter(austria.arrlon, austria.arrlat)\nplt.scatter(subgrid.arrlon, subgrid.arrlat, c='orange')"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
mne-tools/mne-tools.github.io
|
0.18/_downloads/dc0d85321d22190ec4d4c4394d0057f4/plot_opm_data.ipynb
|
bsd-3-clause
|
[
"%matplotlib inline",
"Optically pumped magnetometer (OPM) data\nIn this dataset, electrical median nerve stimulation was delivered to the\nleft wrist of the subject. Somatosensory evoked fields were measured using\nnine QuSpin SERF OPMs placed over the right-hand side somatomotor area.\nHere we demonstrate how to localize these custom OPM data in MNE.",
"# sphinx_gallery_thumbnail_number = 4\n\nimport os.path as op\n\nimport numpy as np\nimport mne\nfrom mayavi import mlab\n\ndata_path = mne.datasets.opm.data_path()\nsubject = 'OPM_sample'\nsubjects_dir = op.join(data_path, 'subjects')\nraw_fname = op.join(data_path, 'MEG', 'OPM', 'OPM_SEF_raw.fif')\nbem_fname = op.join(subjects_dir, subject, 'bem',\n subject + '-5120-5120-5120-bem-sol.fif')\nfwd_fname = op.join(data_path, 'MEG', 'OPM', 'OPM_sample-fwd.fif')\ncoil_def_fname = op.join(data_path, 'MEG', 'OPM', 'coil_def.dat')",
"Prepare data for localization\nFirst we filter and epoch the data:",
"raw = mne.io.read_raw_fif(raw_fname, preload=True)\nraw.filter(None, 90, h_trans_bandwidth=10.)\nraw.notch_filter(50., notch_widths=1)\n\n\n# Set epoch rejection threshold a bit larger than for SQUIDs\nreject = dict(mag=2e-10)\ntmin, tmax = -0.5, 1\n\n# Find Median nerve stimulator trigger\nevent_id = dict(Median=257)\nevents = mne.find_events(raw, stim_channel='STI101', mask=257, mask_type='and')\npicks = mne.pick_types(raw.info, meg=True, eeg=False)\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax,\n reject=reject, picks=picks, proj=False, decim=4)\nevoked = epochs.average()\nevoked.plot()\ncov = mne.compute_covariance(epochs, tmax=0.)",
"Examine our coordinate alignment for source localization and compute a\nforward operator:\n<div class=\"alert alert-info\"><h4>Note</h4><p>The Head<->MRI transform is an identity matrix, as the\n co-registration method used equates the two coordinate\n systems. This mis-defines the head coordinate system\n (which should be based on the LPA, Nasion, and RPA)\n but should be fine for these analyses.</p></div>",
"bem = mne.read_bem_solution(bem_fname)\ntrans = None\n\n# To compute the forward solution, we must\n# provide our temporary/custom coil definitions, which can be done as::\n#\n# with mne.use_coil_def(coil_def_fname):\n# fwd = mne.make_forward_solution(\n# raw.info, trans, src, bem, eeg=False, mindist=5.0,\n# n_jobs=1, verbose=True)\n\nfwd = mne.read_forward_solution(fwd_fname)\n\nwith mne.use_coil_def(coil_def_fname):\n mne.viz.plot_alignment(\n raw.info, trans, subject, subjects_dir, ('head', 'pial'), bem=bem)\n\nmlab.view(45, 60, distance=0.4, focalpoint=(0.02, 0, 0.04))",
"Perform dipole fitting",
"# Fit dipoles on a subset of time points\nwith mne.use_coil_def(coil_def_fname):\n dip_opm, _ = mne.fit_dipole(evoked.copy().crop(0.015, 0.080),\n cov, bem, trans, verbose=True)\nidx = np.argmax(dip_opm.gof)\nprint('Best dipole at t=%0.1f ms with %0.1f%% GOF'\n % (1000 * dip_opm.times[idx], dip_opm.gof[idx]))\n\n# Plot N20m dipole as an example\ndip_opm.plot_locations(trans, subject, subjects_dir,\n mode='orthoview', idx=idx)",
"Perform minimum-norm localization\nDue to the small number of sensors, there will be some leakage of activity\nto areas with low/no sensitivity. Constraining the source space to\nareas we are sensitive to might be a good idea.",
"inverse_operator = mne.minimum_norm.make_inverse_operator(\n evoked.info, fwd, cov)\n\nmethod = \"MNE\"\nsnr = 3.\nlambda2 = 1. / snr ** 2\nstc = mne.minimum_norm.apply_inverse(\n evoked, inverse_operator, lambda2, method=method,\n pick_ori=None, verbose=True)\n\n# Plot source estimate at time of best dipole fit\nbrain = stc.plot(hemi='rh', views='lat', subjects_dir=subjects_dir,\n initial_time=dip_opm.times[idx],\n clim=dict(kind='percent', lims=[99, 99.9, 99.99]))"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
AlertaDengue/InfoDenguePredict
|
Notebooks/Spatial Exploration.ipynb
|
gpl-3.0
|
[
"import geopandas as gpd\nfrom ipywidgets import interact\nimport ipywidgets as widgets\nfrom matplotlib import pyplot as plt\nfrom pathlib import Path\nimport seaborn as sns\nimport geopandas as gpd\nimport geobr\n\nimport pandas as pd\nimport getpass, os\nos.environ['PSQL_USER']='dengueadmin'\nos.environ['PSQL_HOST']='localhost'\nos.environ['PSQL_DB']='dengue'\nos.environ['PSQL_PASSWORD']=getpass.getpass(\"Enter the database password: \")\n\nos.chdir('..')\nfrom infodenguepredict.data.infodengue import get_temperature_data, get_alerta_table, get_tweet_data\n%matplotlib inline",
"Fetching data from Infodengue\nWe can download the data from a full state. Let's pick Goiás.",
"go = get_alerta_table(state='GO', doenca='dengue')\n\ngo\n\nmunicipios = geobr.read_municipality(code_muni='GO')\nmunicipios\n\nmunicipios['code_muni'] = municipios.code_muni.astype('int')\nmunicipios.plot(figsize=(10,10));\n\ngoias = pd.merge(go.reset_index(), municipios,how='left', left_on='municipio_geocodigo', right_on='code_muni')\ngoias\n\ngoias = gpd.GeoDataFrame(goias)\n\nax = goias[goias.SE==202144].plot(figsize=(10,10),\n column='casos_est',\n scheme=\"naturalbreaks\",\n legend=True,\n legend_kwds={'title': \"Casos estimados\"},\n\n );\nax.set_axis_off();",
"Building the dashboard",
"from functools import lru_cache\nfrom IPython.display import display, Markdown\nimport pandas_bokeh\npandas_bokeh.output_notebook()\npd.options.plotting.backend = \"pandas_bokeh\"\n\n@lru_cache(maxsize=27)\ndef get_dados(sigla='PR', doenca='dengue'):\n df = get_alerta_table(state=sigla, doenca=doenca)\n municipios = geobr.read_municipality(code_muni=sigla)\n municipios['code_muni'] = municipios.code_muni.astype('int')\n dados = pd.merge(df.reset_index(), municipios,how='left', left_on='municipio_geocodigo', right_on='code_muni')\n dados = dados.sort_values('SE')\n return gpd.GeoDataFrame(dados)\n\n\ndef gera_SE_seq(anoi, anof):\n ses=[]\n for a in range(anoi,anof+1):\n for w in range(1,52):\n w = str(w).zfill(2)\n ses.append(int(f'{a}{w}'))\n return ses\n\nestado='TO'\n\ndoenca='chik'\ndoenca='dengue'\ngdf = get_dados(estado, doenca)\ntry:\n gdf.set_index('data_iniSE', inplace=True)\nexcept KeyError:\n pass\nmunis = list(set(gdf.name_muni))\ntry: \n munis = sorted(munis)\nexcept: pass\n\n@interact\ndef painel(mun=widgets.Select(options=munis, description='Municipio'), \n week=widgets.SelectionSlider(options=gera_SE_seq(2021,2022), description='SE'), \n ):\n week = gdf.SE.max() if week > gdf.SE.max() else week\n \n umid = pd.DataFrame(gdf.reset_index())[['data_iniSE','umidmin', 'umidmax']].plot_bokeh(kind='line', x='data_iniSE')\n temp = pd.DataFrame(gdf.reset_index())[['data_iniSE','tempmin','tempmax']].plot_bokeh(kind='line', x='data_iniSE')\n mapplot = gdf[gdf.SE==int(week)].plot_bokeh(simplify_shapes=5000,\n dropdown=['casos_est','casos','p_inc100k','nivel'],\n colormap='Viridis', \n hovertool_string=f\"\"\"<h1>@name_muni</h1>\n <h3>Casos: @casos </h3>\"\"\",\n )\n cases = pd.DataFrame(gdf[gdf.name_muni==mun].reset_index())[['data_iniSE','casos','casos_est']].plot_bokeh(kind='line', x='data_iniSE')\n mapplot.width = 900\n umid.width = 450\n temp.width = 450\n cases.width = 900\n\n layout = pandas_bokeh.column(mapplot,\n pandas_bokeh.row(umid, temp),\n cases)\n pandas_bokeh.plot_grid(layout, width=1200)\n\nestado='TO'\ndoenca='chik'\ndoenca='dengue'\ngdf = get_dados(estado, doenca)\ntry:\n gdf.set_index('data_iniSE', inplace=True)\nexcept KeyError:\n pass\nmunis = list(set(gdf.name_muni))\ntry: \n munis = sorted(munis)\nexcept: pass\n\npd.options.plotting.backend = \"matplotlib\"\n\n@interact\ndef painel(mun=widgets.Select(options=munis, description='Municipio'), \n week=widgets.SelectionSlider(options=gera_SE_seq(2021,2022), value=202215, description='SE'),\n variable=['casos','casos_est','p_inc100k']\n ):\n week = gdf.SE.max() if week > gdf.SE.max() else week\n display(Markdown(f\"# {doenca}\"))\n fig, axs = plt.subplot_mosaic([['a', 'c'], ['b', 'c'], ['d', 'd']],\n figsize=(20, 20),\n constrained_layout=True)\n \n for label, ax in axs.items():\n if label == 'a':\n gdf[(gdf.name_muni==mun)&(gdf.SE>=202101)].umidmax.plot(kind='area',ax=ax,alpha=0.3, label='máxima')\n gdf[(gdf.name_muni==mun)&(gdf.SE>=202101)].umidmin.plot(kind='area',ax=ax,alpha=0.3, label='mínima')\n ax.set_title('Umidade')\n ax.legend()\n elif label == 'b':\n gdf[(gdf.name_muni==mun)&(gdf.SE>=202101)].tempmin.plot(ax=ax, label='mínima')\n # gdf.tempmax.plot(ax=ax, label='máxima')\n ax.set_title('Temperatura')\n ax.legend()\n elif label == 'c':\n leg = 'Casos estimados' if variable=='casos_est' else 'Casos notificados'\n gdf[gdf.SE==int(week)].plot(ax=ax,column=variable,scheme=\"User_defined\",\n legend=True,\n classification_kwds=dict(bins=[20,50,100,500,2000,5000]),\n legend_kwds={'title': f\"{leg}\",'loc':'lower right'})\n ax.set_axis_off();\n ax.set_title(str(week));\n elif label == 'd':\n gdf[(gdf.name_muni==mun)&(gdf.SE>=202101)].casos.plot(ax=ax,label='casos')\n gdf[(gdf.name_muni==mun)&(gdf.SE>=202101)].casos_est.plot(ax=ax,label='casos_est')\n ax.legend()\n ax.vlines(x=gdf[gdf.SE==int(week)].index,ymin=0,ymax=500)\n ax.set_title(mun)\n \n plt.show();",
"Building Animated films",
"data_path = Path('./data/')\nmap_path = Path(\"./maps/\")\n\nos.makedirs(data_path, exist_ok=True)\nos.makedirs(map_path, exist_ok=True)",
"Downloading data\nWe will start by Downloading the full alerta table for all diseases.",
"from infodenguepredict.data.infodengue import get_full_alerta_table\n\ndiseases = ['dengue','chik','zika']\nfor dis in diseases:\n os.makedirs(data_path/dis, exist_ok=True)\n\nfor dis in diseases:\n get_full_alerta_table(dis, output_dir=data_path/dis, chunksize=50000, start_SE=202140)\n\nbrmunis = geobr.read_municipality(code_muni='all')\nbrmunis.plot();\n\ndef merge(munis, df):\n munis['code_muni'] = munis.code_muni.astype('int')\n dados = pd.merge(df.reset_index(), munis,how='left', left_on='municipio_geocodigo', right_on='code_muni')\n dados = dados.sort_values('SE')\n return gpd.GeoDataFrame(dados)\n\ndef create_frames(dados, doenca='dengue', variable='casos_est'):\n vnames = {'casos_est': 'Casos Estimados', 'casos': 'Casos Notificados', 'p_inc100k': 'Incidência por 100 mil hab.'}\n leg = vnames[variable]\n if doenca == 'dengue':\n bins = {'casos_est':[20,50,100,500,1000],'p_inc100k':[50,500,1000,2500] }\n elif doenca == 'chik':\n bins = {'casos_est':[5,15,50,120,300],'p_inc100k':[50,100,500,1000] }\n elif doenca == 'zika':\n bins = {'casos_est':[5,15,50,120,300],'p_inc100k':[50,100,500,1000] }\n acumulados = 0\n ews = sorted(list(set(dados.SE)))\n for i,se in enumerate(ews):\n fig = dados[dados.SE==se].plot(figsize=(10,10),\n column=variable,\n scheme=\"user_defined\",\n cmap='plasma',\n classification_kwds={'bins':bins[variable]},\n legend=True,\n legend_kwds={'title': f\"{leg}\",'loc':'lower right'}\n );\n acumulados += dados[dados.SE==se].casos_est.sum()\n fig.set_axis_off();\n fig.text(-50,2, f'Casos: {int(acumulados)}', fontsize=24)\n fig.set_title(f'{leg} de {doenca}\\nna semana {str(se)[-2:]} de {str(se)[:-2]}', fontdict={'fontsize': 14});\n opath = map_path/variable\n os.makedirs(opath, exist_ok=True)\n plt.savefig(opath/f'{doenca}_{i:0>3}.png', dpi=200)\n plt.close()\n ",
"loading data from disk\nwe can load all chunks at once,into a single dataframe, since they are parquet files.doenca",
"dengue = pd.read_parquet(data_path/'dengue')\ndengue.sort_values('SE',inplace=True)\nchik = pd.read_parquet(data_path/'chik')\nchik.sort_values('SE',inplace=True)\nzika = pd.read_parquet(data_path/'zika')\nzika.sort_values('SE',inplace=True)\ndengue\n\ndmdf = merge(brmunis,dengue)\ncmdf = merge(brmunis, chik)\nzmdf = merge(brmunis, zika)\n\ndmdf[dmdf.SE==202208].plot(column='casos_est',scheme=\"naturalbreaks\");\n\nos.getcwd()\n\ncreate_frames(dmdf)\ncreate_frames(dmdf, variable='p_inc100k')\n\ncreate_frames(cmdf, doenca='chik')\ncreate_frames(cmdf, doenca='chik', variable='p_inc100k')\n\ncreate_frames(zmdf, doenca='zika')\ncreate_frames(zmdf, doenca='zika', variable='p_inc100k')",
"Now you can convert the images into a movie with the following command:\nbash\nffmpeg -framerate 2 -i dengue_%03d.png -c:v libx264 -r 30 -pix_fmt yuv420p dengue.mp4"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
quantumlib/Cirq
|
docs/interop.ipynb
|
apache-2.0
|
[
"Copyright 2020 The Cirq Developers",
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"Import/export circuits\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://quantumai.google/cirq/interop\"><img src=\"https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png\" />View on QuantumAI</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/quantumlib/Cirq/blob/master/docs/interop.ipynb\"><img src=\"https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/quantumlib/Cirq/blob/master/docs/interop.ipynb\"><img src=\"https://quantumai.google/site-assets/images/buttons/github_logo_1x.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/Cirq/docs/interop.ipynb\"><img src=\"https://quantumai.google/site-assets/images/buttons/download_icon_1x.png\" />Download notebook</a>\n </td>\n</table>",
"try:\n import cirq\nexcept ImportError:\n print(\"installing cirq...\")\n !pip install --quiet cirq\n print(\"installed cirq.\")",
"Cirq has several features that allow the user to import/export from other quantum languages.\nExporting and importing to JSON\nFor storing circuits or for transfering them between collaborators, JSON can be a good choice. Many objects in cirq can be serialized as JSON and then stored as a text file for transfer, storage, or for posterity.\nAny object that can be serialized, which includes circuits, moments, gates, operations, and many other cirq constructs, can be turned into JSON with the protocol cirq.to_json(obj). This will return a string that contains the serialized JSON.\nTo take JSON and turn it back into a cirq object, the protocol cirq.read_json can be used. This can take a python file or string filename as the first argument (file_or_fn) or can take a named json_text parameter to accept a string input.\nThe following shows how to serialize and de-serialize a circuit.",
"import cirq\n\n# Example circuit\ncircuit = cirq.Circuit(cirq.Z(cirq.GridQubit(1,1)))\n\n# Serialize to a JSON string\njson_string = cirq.to_json(circuit)\nprint('JSON string:')\nprint(json_string)\nprint()\n\n# Now, read back the string into a cirq object\n# cirq.read_json can also read from a file\nnew_circuit = cirq.read_json(json_text=json_string)\n\nprint(f'Deserialized object of type: {type(new_circuit)}:')\nprint(new_circuit)",
"Advanced: Adding JSON compatibility for custom objects in cirq\nMost cirq objects come with serialization functionality added already. If you are adding a custom object (such as a custom gate), you can still serialize the object, but you will need to add the magic functions _json_dict_ and _from_json_dict_ to your object to do so.\nWhen de-serializing, in order to instantiate the correct object, you will also have to pass in a custom resolver. This is a function that will take as input the serialized cirq_type string and output a constructable class. See \ncirq.protocols.json_serialization for more details.\nImporting from OpenQASM\nThe QASM importer is in an experimental state and currently only supports a subset of the full OpenQASM spec. Amongst others, classical control, arbitrary gate definitions, and even some of the gates that don't have a one-to-one representation in Cirq, are not yet supported. The functionality should be sufficient to import interesting quantum circuits. Error handling is very simple - on any lexical or syntactical error, a QasmException is raised.\nImporting cirq.Circuit from QASM format\nRequirements: ply",
"!pip install --quiet cirq\n!pip install --quiet ply==3.4",
"The following call will create a circuit defined by the input QASM string:",
"from cirq.contrib.qasm_import import circuit_from_qasm\ncircuit = circuit_from_qasm(\"\"\"\n OPENQASM 2.0;\n include \"qelib1.inc\";\n qreg q[3];\n creg meas[3];\n h q;\n measure q -> meas;\n \"\"\")\nprint(circuit)",
"Supported control statements\n| Statement|Cirq support|Description|Example|\n|----| --------| --------| --------|\n|OPENQASM 2.0;| supported| Denotes a file in Open QASM format| OPENQASM 2.0;|\n|qreg name[size];| supported (see mapping qubits)| Declare a named register of qubits|qreg q[5];|\n|creg name[size];|supported (see mapping classical register to measurement keys)| Declare a named register of bits|creg c[5];|\n|include \"filename\";| supported ONLY to include the standard \"qelib1.inc\" lib for compatibility| Open and parse another source file|include \"qelib1.inc\";|\n|gate name(params) qargs;|NOT supported| Declare a unitary gate||\n|opaque name(params) qargs;| NOT supported| Declare an opaque gate||\n|// comment text| supported|Comment a line of text|// supported!|\n|U(θ,φ,λ) qubit/qreg;| supported| Apply built-in single qubit gate(s)|U(pi/2,2*pi/3,0) q[0];|\n|CX qubit/qreg,qubit/qreg;| supported|Apply built-in CNOT gate(s)|CX q[0],q[1];|\n|measure qubit/qreg|supported|Make measurements in Z basis||\n|reset qubit/qreg;| NOT supported|Prepare qubit(s) in <code>|0></code>|reset q[0];| \n|gatename(params) qargs;| supported for ONLY the supported subset of standard gates defined in \"qelib1.inc\"|Apply a user-defined unitary gate|rz(pi/2) q[0];|\n|if(creg==int) qop;| NOT supported| Conditionally apply quantum operation|if(c==5) CX q[0],q[1];|\n|barrier qargs;| NOT supported| Prevent transformations across this source line|barrier q[0],q[1];|\nGate conversion rules\nNote: The standard Quantum Experience gates (defined in qelib.inc) are \nbased on the U and CX built-in instructions, and we could generate them dynamically. Instead, we chose to map them to native Cirq gates explicitly, which results in a more user-friendly circuit. \n| QE gates| Cirq translation| Notes|\n| --------| --------| --------|\n|U(θ,φ,λ) |QasmUGate(θ,φ,λ)|| \n|CX |cirq.CX|| \n|u3(θ,φ,λ)|QasmUGate(θ,φ,λ)||\n|u2(φ,λ) = u3(π/2,φ,λ)|QasmUGate(π/2,φ,λ)|| \n|u1 (λ) = u3(0,0,λ)| NOT supported || \n|id|cirq.Identity| one single-qubit Identity gate is created for each qubit if applied on a register|\n|u0(γ)| NOT supported| this is the \"WAIT gate\" for length γ in QE| \n|x|cirq.X|| \n|y|cirq.Y|| \n|z|cirq.Z|| \n|h|cirq.H|| \n|s|cirq.S|| \n|sdg|cirq.S**-1|| \n|t|cirq.T|| \n|tdg|cirq.T**-1||\n|rx(θ)|cirq.Rx(θ)|| \n|ry(θ)|cirq.Ry(θ)|| \n|rz(θ)|cirq.Rz(θ)|| \n|cx|cirq.CX|| \n|cy|cirq.ControlledGate(cirq.Y)|| \n|cz|cirq.CZ|| \n|ch|cirq.ControlledGate(cirq.H)|| \n|swap|cirq.SWAP|| \n|ccx|cirq.CCX|| \n|cswap|cirq.CSWAP|| \n|crz| NOT supported || \n|cu1| NOT supported|| \n|cu3| NOT supported|| \n|rzz| NOT supported|| \nMapping quantum registers to qubits\nFor a quantum register qreg qfoo[n]; the QASM importer will create cirq.NamedQubits named qfoo_0..qfoo_<n-1>. \nMapping classical registers to measurement keys\nFor a classical register creg cbar[n]; the QASM importer will create measurement keys named cbar_0..cbar_<n-1>. \nImporting from Quirk\nQuirk is a drag-and-drop quantum circuit simulator, great for manipulating and exploring small quantum circuits. Quirk's visual style gives a reasonably intuitive feel of what is happening, state displays update in real time as you change the circuit, and the general experience is fast and interactive.\nAfter constructing a circuit in Quirk, you can easily convert it to cirq using the URL generated. Note that not all gates in Quirk are currently convertible.",
"quirk_url = \"https://algassert.com/quirk#circuit=%7B%22cols%22:[[%22H%22,%22H%22],[%22%E2%80%A2%22,%22X%22],[%22H%22,%22H%22]]}\"\nc= cirq.quirk_url_to_circuit(quirk_url)\n\nprint(c)",
"You can also convert the JSON from the \"export\" button on the top bar of Quirk. Note that you must parse the JSON string into a dictionary before passing it to the function:",
"import json\n\nquirk_str=\"\"\"{\n \"cols\": [\n [\n \"H\",\n \"H\"\n ],\n [\n \"•\",\n \"X\"\n ],\n [\n \"H\",\n \"H\"\n ]\n ]\n}\"\"\"\nquirk_json=json.loads(quirk_str)\nc= cirq.quirk_json_to_circuit(quirk_json)\n\nprint(c)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
drjordy66/data-512-a1
|
src/hcds-a1-data-curation.ipynb
|
mit
|
[
"A1: Data curation\nDane Jordan\nImport necessary libraries that will be used",
"import json\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport requests\n\nfrom datetime import datetime\n\n%matplotlib inline",
"Step 1: Data acquisition\nBelow is a function that takes two inputs, the API endpoint (either 'pagecounts' or 'pageviews') and the access parameter. For pagecounts the access parameter can be 'all-sites', 'desktop-site', or 'mobile-site'. For pageviews the access parameter can be 'desktop', 'mobile-app', or 'mobile-web'. The function fills in all other parameters for an API call (thanks Jonathan and Oliver!), and returns the API response.",
"# since we will be performing api calls at least five times, we will functionalize it\ndef data_acquisition(api_endpoint, access):\n\n '''\n call the wikimedia api and return a json format data set\n :param api_endpoint: legacy (pagecounts)\n current (pageviews)\n :param access: legacy (all-sites, desktop-site, mobile-site)\n current (desktop, mobile-app, mobile-web)\n '''\n \n # define the parameters for the api call (params listed below are universal across all api calls)\n params = {'project' : 'en.wikipedia.org',\n 'agent' : 'user',\n 'granularity' : 'monthly',\n 'start' : '2008010100',\n 'end' : '2017100100'\n }\n\n # check api_endpoint variable entered is valid for pagecounts\n if api_endpoint == 'pagecounts':\n endpoint = 'https://wikimedia.org/api/rest_v1/metrics/legacy/pagecounts/aggregate/{project}/{access-site}/{granularity}/{start}/{end}'\n params['access-site'] = access\n \n # check access variable entered is valid for pagecounts, print error otherwise\n if access != 'all-sites' and access != 'desktop-site' and access != 'mobile-site':\n print('error: invalid access (all-sites, desktop-site, mobile-site)')\n else:\n pass\n \n # check api_endpoint variable entered is valid for pageviews\n elif api_endpoint == 'pageviews':\n endpoint = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/{project}/{access}/{agent}/{granularity}/{start}/{end}'\n params['access'] = access\n \n # check access variable entered is valid for pageviews, print error otherwise\n if access != 'desktop' and access != 'mobile-app' and access != 'mobile-web':\n print('error: invalid access (desktop, mobile-app, mobile-web)')\n else:\n pass\n \n # print error message if invalid api_endpoint variable\n else:\n print('error: invalid api_endpoint (pagecounts, pageviews)')\n \n# headers={'User-Agent' : 'https://github.com/drjordy66', 'From' : 'drjordan@uw.edu'}\n \n api_call = requests.get(endpoint.format(**params))\n \n return api_call.json()\n",
"Run the above function to call the API and assign the responses to variables",
"response_pageview_desktop = data_acquisition('pageviews', 'desktop')\nresponse_pageview_mobileweb = data_acquisition('pageviews', 'mobile-web')\nresponse_pageview_mobileapp = data_acquisition('pageviews', 'mobile-app')\nresponse_pagecount_desktop = data_acquisition('pagecounts', 'desktop-site')\nresponse_pagecount_mobile = data_acquisition('pagecounts', 'mobile-site')",
"Export the API raw data files. This section has been commented out in order to not continuously overwrite the raw data files. The raw data files have already been created and will be imported in the next step.",
"# json.dump(response_pageview_desktop, open('../data_raw/pageviews_desktop_' + response_pageview_desktop['items'][0]['timestamp'][:-4] + '-' + response_pageview_desktop['items'][-1]['timestamp'][:-4] + '.json', 'w'), indent=4)\n# json.dump(response_pageview_mobileweb, open('../data_raw/pageviews_mobile-web_' + response_pageview_mobileweb['items'][0]['timestamp'][:-4] + '-' + response_pageview_mobileweb['items'][-1]['timestamp'][:-4] + '.json', 'w'), indent=4)\n# json.dump(response_pageview_mobileapp, open('../data_raw/pageviews_mobile-app_' + response_pageview_mobileapp['items'][0]['timestamp'][:-4] + '-' + response_pageview_mobileapp['items'][-1]['timestamp'][:-4] + '.json', 'w'), indent=4)\n# json.dump(response_pagecount_desktop, open('../data_raw/pagecounts_desktop-site_' + response_pagecount_desktop['items'][0]['timestamp'][:-4] + '-' + response_pagecount_desktop['items'][-1]['timestamp'][:-4] + '.json', 'w'), indent=4)\n# json.dump(response_pagecount_mobile, open('../data_raw/pagecounts_mobile-site_' + response_pagecount_mobile['items'][0]['timestamp'][:-4] + '-' + response_pagecount_mobile['items'][-1]['timestamp'][:-4] + '.json', 'w'), indent=4)",
"Step 2: Data processing\nImport the raw .json files to process and create a new file for analysis.",
"response_pagecount_desktop = json.load(open('../data_raw/pagecounts_desktop-site_200801-201608.json'))\nresponse_pagecount_mobile = json.load(open('../data_raw/pagecounts_mobile-site_201410-201608.json'))\nresponse_pageview_desktop = json.load(open('../data_raw/pageviews_desktop_201507-201709.json'))\nresponse_pageview_mobileapp = json.load(open('../data_raw/pageviews_mobile-app_201507-201709.json'))\nresponse_pageview_mobileweb = json.load(open('../data_raw/pageviews_mobile-web_201507-201709.json'))",
"Functions for processing\nget_views and get_counts take the raw .json files as inputs, strip the timestamps and views/counts, and return arrays with two columns (timestamp, views/counts) and a row with each month's worth of data.\nlookup_val takes the arrays created from the prior functions as one input and a date as a second input. It uses the date to find the index within the array from column 1 (timestamp) and returns the value from that same index in column 2 (counts/views). If the date is not within the array, then a value of 0 is assigned.",
"def get_views(api_response):\n \n '''\n strip all views from an api response\n '''\n \n temp_list = []\n for i in api_response['items']:\n temp_list.append([i['timestamp'], i['views']])\n \n return np.array(temp_list)\n\ndef get_count(api_response):\n \n '''\n strip all views from an api response\n '''\n \n temp_list = []\n for i in api_response['items']:\n temp_list.append([i['timestamp'], i['count']])\n \n return np.array(temp_list)\n\ndef lookup_val(api_views, date):\n \n '''\n find row index against all dates and create views respective column\n '''\n \n if i in list(api_views[:,0]):\n row_idx = list(api_views[:,0]).index(date)\n row_val = int(api_views[:,1][row_idx])\n else:\n row_val = 0\n \n return row_val\n",
"Run the above functions to get all of the views/counts for both the legacy and current API",
"# strip all dates and views/count from api responses\npageview_desktop_views = get_views(response_pageview_desktop)\npageview_mobileweb_views = get_views(response_pageview_mobileweb)\npageview_mobileapp_views = get_views(response_pageview_mobileapp)\npagecount_desktop_views = get_count(response_pagecount_desktop)\npagecount_mobile_views = get_count(response_pagecount_mobile)\n",
"Processing\nFirst, all of the formatted arrays from the API responses are concatenated and the first column (timestamp) is taken as a set() to remove any duplicate timestamps. From here we can easily parse the timestamps into a list of just the years and a list of just the months. This gives us our first two columns of our cleaned data, 'year' and 'month'.",
"# combine all data into one array\nall_dates_views = np.concatenate((pageview_desktop_views,\n pageview_mobileweb_views,\n pageview_mobileapp_views,\n pagecount_desktop_views,\n pagecount_mobile_views))\n\n# strip only dates, remove duplicates, sort\nall_dates = sorted(list(set(all_dates_views[:, 0])))\n\n# parse into years and months\nyear_col = [all_dates[i][:-6] for i in range(len(all_dates))]\nmonth_col = [all_dates[i][4:6] for i in range(len(all_dates))]\n",
"Second, we initialize five (one for each API response) lists where we will obtain just the counts/views from the two column arrays. We will then loop through all of the dates (no duplicates) that we found from the previous step and use the lookup_val function to find the corresponding counts/views for each API response and append these to lists we initialized.",
"# initialize lists for columns of csv file\npageview_desktop_views_col = []\npageview_mobileweb_views_col = []\npageview_mobileapp_views_col = []\npagecount_desktop_views_col = []\npagecount_mobile_views_col = []\n\n# loop through all of the dates and lookup respective values from each api response\nfor i in all_dates:\n pageview_desktop_views_col.append(lookup_val(pageview_desktop_views, i))\n pageview_mobileweb_views_col.append(lookup_val(pageview_mobileweb_views, i))\n pageview_mobileapp_views_col.append(lookup_val(pageview_mobileapp_views, i))\n pagecount_desktop_views_col.append(lookup_val(pagecount_desktop_views, i))\n pagecount_mobile_views_col.append(lookup_val(pagecount_mobile_views, i))\n",
"Third, we need to aggregate the two mobile sets of data from pageviews to get the total mobile data. For both pagecounts and pageviews we aggregate the desktop counts/views and mobile counts/views to get the total views for each.",
"# aggregate the mobile views from pageviews and the \"all views\" from pageviews and pagecounts\npageview_mobile_views_col = [sum(i) for i in zip(pageview_mobileweb_views_col, pageview_mobileapp_views_col)]\npageview_all_views_col = [sum(i) for i in zip(pageview_desktop_views_col, pageview_mobile_views_col)]\npagecount_all_views_col = [sum(i) for i in zip(pagecount_desktop_views_col, pagecount_mobile_views_col)]\n",
"Convert to pandas DataFrame for easy export.",
"# assign column data to a pandas dataframe\ndf = pd.DataFrame({'year': year_col,\n 'month': month_col,\n 'pagecount_all_views': pagecount_all_views_col,\n 'pagecount_desktop_views': pagecount_desktop_views_col,\n 'pagecount_mobile_views': pagecount_mobile_views_col,\n 'pageview_all_views': pageview_all_views_col,\n 'pageview_desktop_views': pageview_desktop_views_col,\n 'pageview_mobile_views': pageview_mobile_views_col})\n\n# organize in correct column order\ndf = df[['year',\n 'month',\n 'pagecount_all_views',\n 'pagecount_desktop_views',\n 'pagecount_mobile_views',\n 'pageview_all_views',\n 'pageview_desktop_views',\n 'pageview_mobile_views']]\n",
"Export data in single csv. This section has been commented out in order to not continuously overwrite the cleaned data file. The cleaned data file has already been created and will be imported in the next step.",
"# write the column data to csv\n# df.to_csv('../data_clean/en-wikipedia_traffic_200801-201709.csv', index=False)\n",
"Step 3: Analysis\nImport the cleaned data file to use for analysis.",
"df = pd.read_csv('../data_clean/en-wikipedia_traffic_200801-201709.csv',\n dtype={'year': str, 'month': str})",
"Plot the data\nThe dates from the csv are converted to a datetime format in order to be plotted neatly. The points from the data are plotted, filtering out non-zero values in y-axis data.\nThe figure is then saved as a .png file.",
"# convert dates to a datetime format for plotting\ndates = np.array([datetime.strptime(list(df['year'])[i] + list(df['month'])[i], '%Y%m') for i in range(len(df))])\n\n# set plot size\nplt.figure(figsize=(16, 8))\n\n# plot the points, filtering on non-zero values in the column data\nplt.plot(dates[np.array(df['pageview_desktop_views']) > 0],\n np.array(df['pageview_desktop_views'])[np.array(df['pageview_desktop_views']) > 0], 'g')\nplt.plot(dates[np.array(df['pageview_mobile_views']) > 0],\n np.array(df['pageview_mobile_views'])[np.array(df['pageview_mobile_views']) > 0], 'b')\nplt.plot(dates[np.array(df['pageview_all_views']) > 0],\n np.array(df['pageview_all_views'])[np.array(df['pageview_all_views']) > 0], 'k')\nplt.plot(dates[np.array(df['pagecount_desktop_views']) > 0][:-1],\n np.array(df['pagecount_desktop_views'])[np.array(df['pagecount_desktop_views']) > 0][:-1], 'g--')\nplt.plot(dates[np.array(df['pagecount_mobile_views']) > 0][:-1],\n np.array(df['pagecount_mobile_views'])[np.array(df['pagecount_mobile_views']) > 0][:-1], 'b--')\nplt.plot(dates[np.array(df['pagecount_all_views']) > 0][:-1],\n np.array(df['pagecount_all_views'])[np.array(df['pagecount_all_views']) > 0][:-1], 'k--')\n\n# add gridlines, x-axis label, scale (to match assignment), title, legend\nplt.grid(True)\nplt.xlabel('May 2015: a new pageview definition took effect, which eliminated all crawler traffic. Dashed lines mark old definition.', color='r')\nplt.ylim(0, 12e9)\nplt.yticks([0, 2e9, 4e9, 6e9, 8e9, 10e9, 12e9], [0, 2000, 4000, 6000, 8000, 10000, 12000])\nplt.title('Page Views on English Wikipedia (x 1,000,000)')\nplt.legend(['main site', 'mobile site', 'total'])\n\n# save an image of the analysis\nplt.savefig('../analysis/analysis.png')"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
bayesimpact/bob-emploi
|
data_analysis/notebooks/datasets/french_urban_entities.ipynb
|
gpl-3.0
|
[
"French Urban Entities\nAuthor: Pascal pascal@bayesimpact.org\nDate: 2017-12-19\nIn November 2017, the CREST asked us to analyze our users through the dimension of people living in urban vs rural areas. We started investigating and found a dataset from INSEE that they call urban entities. \nThis notebook analyses this dataset. For each city in France it gives a mapping to the urban entity it is part of. According to the documentation, an urban entity is a contiguous urban area with less than 200m between buildings. Areas that are populated with less than 2000 inhabitants are considered as rural.\nImport\nHere we use the cleaned_data lib that already does the import and basic cleaning on the data. To get the data required to run this notebook run:\nsh\ndocker-compose run --rm data-analysis-prepare make \\\n data/geo/french_urban_entities.xls \\\n data/geo/french_cities.csv \\\n data/geo/insee_france_cities.tsv\nLet's open 3 datasets related to cities: the urban entities, the index of all French cities and the French city stats:",
"import os\nfrom os import path\n\nimport pandas as pd\n\nfrom bob_emploi.data_analysis.lib import cleaned_data\n\nDATA_FOLDER = os.getenv('DATA_FOLDER')\n\nurban_entities = cleaned_data.french_urban_entities(DATA_FOLDER)\nurban_entities.head()\n\ncities = cleaned_data.french_cities(DATA_FOLDER)\ncities.head()\n\ncity_stats = cleaned_data.french_city_stats(DATA_FOLDER)\ncity_stats.head()",
"Pretty nice: they are all indexed with the city ID, or \"Code Officiel Géographique\" so we can merge those three datasets. While doing that, let's make sure we restrict to current cities only:",
"all_cities = pd.merge(\n cities[cities.current & ~cities.arrondissement], city_stats,\n right_index=True, left_index=True, how='outer')\nall_cities = pd.merge(\n all_cities, urban_entities,\n right_index=True, left_index=True, how='outer')\nall_cities.head()",
"OK we're all set to start looking at the data.\nCoverage",
"official_cities = all_cities[all_cities.name.notnull()]\nofficial_cities.urban.notnull().value_counts()",
"Pretty neat! We have urban data for all the cities. Now let's try to get a better understanding of this data.\nData\nThe two fields we are going to dig are urban and UU2010. Supposedly urban gives a score where 0 means rural and then from 1 to 8, it relates to bigger and bigger urban entities. UU2010 gives the ID of the urban entity the city is part of.\nLet's do some quick point checks:",
"official_cities.sort_values('population', ascending=False)[['name', 'urban', 'UU2010']].head()",
"That sounds good: the biggest cities are inside the biggest urban entities.\nLet's check one of them:",
"official_cities[official_cities.UU2010 == '00758']\\\n .sort_values('population', ascending=False)[['name', 'urban', 'UU2010']].head()",
"Cool, those are indeed cities that are part of the Lyon urban entities.\nLet's check the other side of the spectrum:",
"official_cities[official_cities.urban == 0][['name', 'urban', 'UU2010', 'population']].head()",
"Indeed those seems like small villages (population count is low) however they seem to have an UU2010 field which is common. Apparently that field is not valid for rural cities:",
"official_cities[official_cities.urban == 0]\\\n .groupby(['UU2010', 'departement_id_x'])\\\n .urban.count().to_frame().head()",
"Alright, there seems to be a unique UU2010 per département assigned to all rural cities in this département. We will make sure to ignore it.\nNow let's see global stats for each level of urban entities:",
"def _stats_per_urban_group(cities):\n if cities.urban.iloc[0]:\n entities_population = cities.groupby('UU2010').population.sum()\n else:\n # Not grouping as UU2010 has no meaning for rural areas.\n entities_population = cities.population\n return pd.Series({\n 'total_population': entities_population.sum().astype(int),\n 'min_entity_population': entities_population.min().astype(int),\n 'max_entity_population': entities_population.max().astype(int),\n 'avg_entity_population': entities_population.mean().astype(int),\n 'num_entities': len(entities_population),\n })\nurban_stats = official_cities.groupby('urban').apply(_stats_per_urban_group)\nurban_stats",
"OK, many things interesting in those stats. First the size of entities seems to be globally consistent with the documentation: entities level are defined by their sizes. For the small numbers though, there seem to be some slight inconsistencies but we'll say that population data is not very precise.\nLet's check the distribution of the number of entities by level:",
"COLOR_MAP = [\n '#e0f2f1',\n '#c8e6c9',\n '#c5e1a5',\n '#dce775',\n '#ffee58',\n '#ffc107',\n '#ff9800',\n '#ff5722',\n '#795548',\n]\nurban_stats.num_entities.plot(kind='pie', figsize=(5, 5), colors=COLOR_MAP);",
"The huge majority of cities are rural, and only very few of them are part of the largest urban entities.\nLet's look at it from another angle, and check the population distribution:",
"urban_stats.total_population.plot(kind='pie', figsize=(5, 5), colors=COLOR_MAP);",
"OK, this is a whole other picture: rural areas account only for less than a quarter of the population, and actually half of the population lives in urban entities level 6 or above (each entity is larger than 100k inhabitants).\nFinally let's plot the urban entities for France metropolitan area:",
"is_in_metropol = (official_cities.longitude > -5) & (official_cities.latitude > 25)\nofficial_cities[is_in_metropol & official_cities.urban.notnull()]\\\n .sort_values('urban')\\\n .plot(kind='scatter', x='longitude', y='latitude', s=5, c='urban', figsize=(12, 10));",
"Nice! The largest urban entities seem to be located where we know are the largest cities with the benefit of knowing how far it extends.\nConclusion\nThe urban entities dataset is quite clean. The major learning is that although more than 80% of cities are rural, less than 25% of the population is in a rural area. The slicing by urban level (from 1 to 8) can also be used to distinguish people living in small or large urban areas even though their own city might just be a small city next to a big one."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
davidbrough1/pymks
|
notebooks/homogenization_fiber_2D.ipynb
|
mit
|
[
"Effective Stiffness of Fiber Composite\nIntroduction\nThis example demonstrates the use of the homogenization model from pyMKS on a set of fiber-like structures. These structures are simulated to emulate fiber-reinforced polymer samples. For a summary of homogenization theory and its use with effective stiffness properties please see the Effective Siffness example. This example will first generate a series of random microstructures with various fiber lengths and volume fraction. The ability to vary the volume fraction is a new functionality of this example. Then the generated stuctures will be used to calibrate and test the model based on simulated effective stress values. Finally we will show that the simulated response compare favorably with those generated by the model. \nGenerating Structures\nThese first lines inport important packages that will be used to run pymks.",
"import pymks\n\n%matplotlib inline\n%load_ext autoreload\n%autoreload 2\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n",
"Now we are defining the parameters which we will use to create the microstructures. n_samples will determine how many microstructures of a particular volume fraction we want to create. size determines the number of pixels we want to be included in the microstructure. We will define the material properties to be used in the finite element in elastic_modulus, poissons_ratio and macro_strain. n_phases and grain_size will determine the physical characteristics of the microstructure. We are using a high aspect ratio in creating our microstructures to simulate fiber-like structures. The volume_fraction variable will be used to vary the fraction of each phase. The sum of the volume fractions must be equal to 1. The percent_variance variable introduces some variation in the volume fraction up to the specified percentage.",
"sample_size = 100\nn_samples = 4 * [sample_size]\nsize = (101, 101)\nelastic_modulus = (1.3, 75)\npoissons_ratio = (0.42, .22)\nmacro_strain = 0.001\nn_phases = 2\ngrain_size = [(40, 2), (10, 2), (2, 40), (2, 10)]\nv_frac = [(0.7, 0.3), (0.6, 0.4), (0.3, 0.7), (0.4, 0.6)]\nper_ch = 0.1\n",
"Now we will create the microstructures and generate their responses using the make_elastic_stress_random function from pyMKS. Four datasets are created to create the four different volume fractions that we are simulating. Then the datasets are combined into one variable. The volume fractions are listed in the variable v_frac. Variation around the specified volume fraction can be obtained by varying per_ch. The variation is randomly generated according a uniform distribution around the specified volume fraction.",
"from pymks.datasets import make_elastic_stress_random\n\n\ndataset, stresses = make_elastic_stress_random(n_samples=n_samples, size=size, grain_size=grain_size,\n elastic_modulus=elastic_modulus, poissons_ratio=poissons_ratio,\n macro_strain=macro_strain, volume_fraction=v_frac,\n percent_variance=per_ch)\n",
"Now we are going to print out a few microstructres to look at how the fiber length, orientation and volume fraction are varied.",
"from pymks.tools import draw_microstructures\nexamples = dataset[::sample_size]\ndraw_microstructures(examples)\n",
"Creating the Model\nNext we are going to initiate the model. The MKSHomogenizationModel takes in microstructures and runs two-point statistics on them to get a statistical representation of the microstructures. An expalnation of the use of two-point statistics can be found in the Checkerboard Microstructure Example. Then the model uses PCA and regression models to create a linkage between the calcualted properties and structures.\nHere we simply initiate the model.",
"from pymks import MKSHomogenizationModel\nfrom pymks import PrimitiveBasis\n\n\np_basis = PrimitiveBasis(n_states=2, domain=[0, 1])\nmodel = MKSHomogenizationModel(basis=p_basis, correlations=[(0, 0)], periodic_axes=[0, 1])\n",
"Now we are going to split our data into testing and training segments so we can test and see if our model can accurately predict the effective stress.",
"from sklearn.cross_validation import train_test_split\n\n\nflat_shape = (dataset.shape[0],) + (dataset[0].size,)\ndata_train, data_test, stress_train, stress_test = train_test_split(\n dataset.reshape(flat_shape), stresses, test_size=0.2, random_state=3)\n",
"We will use sklearn's GridSearchCV to optimize the n_components and degree for our model. Let's search over the range of 1st order to 3rd order polynomial for degree and 2 to 7 principal components for n_components.",
"from sklearn.grid_search import GridSearchCV\n\n\nparams_to_tune = {'degree': np.arange(1, 4), 'n_components': np.arange(2, 8)}\nfit_params = {'size': dataset[0].shape}\ngs = GridSearchCV(model, params_to_tune, fit_params=fit_params).fit(data_train, stress_train)\n",
"Let's take a look at the results.",
"print('Order of Polynomial'), (gs.best_estimator_.degree)\nprint('Number of Components'), (gs.best_estimator_.n_components)\nprint('R-squared Value'), (gs.score(data_test, stress_test))\n\n\nfrom pymks.tools import draw_gridscores_matrix\n\ndraw_gridscores_matrix(gs, ['n_components', 'degree'], score_label='R-Squared',\n param_labels=['Number of Components', 'Order of Polynomial'])\n\n\nfrom pymks.tools import draw_gridscores\n\ngs_deg_1 = [x for x in gs.grid_scores_ \\\n if x.parameters['degree'] == 1]\ngs_deg_2 = [x for x in gs.grid_scores_ \\\n if x.parameters['degree'] == 2]\ngs_deg_3 = [x for x in gs.grid_scores_ \\\n if x.parameters['degree'] == 3]\n\ndraw_gridscores([gs_deg_1, gs_deg_2, gs_deg_3], 'n_components', \n data_labels=['1st Order', '2nd Order', '3rd Order'],\n param_label='Number of Components', score_label='R-Squared')\n",
"Our best model was found to have degree equal to 3 and n_components equal to 5. Let's go ahead and use it.",
"model = gs.best_estimator_",
"Structures in PCA space\nNow we want to draw how the samples are spread out in PCA space and look at how the testing and training data line up.",
"from pymks.tools import draw_components_scatter\n\n\nstress_predict = model.predict(data_test)\ndraw_components_scatter([model.reduced_fit_data[:, :3],\n model.reduced_predict_data[:, :3]],\n ['Training Data', 'Testing Data'],\n legend_outside=True)\n",
"It looks like there is pretty good agreement between the testing and the training data. We can also see that the four different fiber sizes are seperated in the PC space. \nDraw Goodness of fit\nNow we are going to look at how well our model predicts the properties of the structures. The calculated properties will be plotted against the properties generated by the model. We should see a linear realtionship with a slope of 1.",
"from pymks.tools import draw_goodness_of_fit\n\n\nfit_data = np.array([stresses, model.predict(dataset)])\npred_data = np.array([stress_test, stress_predict])\ndraw_goodness_of_fit(fit_data, pred_data, ['Training Data', 'Testing Data'])\n",
"Yay! There is a good corrolation between the FE results and those predicted by our linkage."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ciyer/pandas-intro
|
lessons/pandas-for-r-users.ipynb
|
bsd-2-clause
|
[
"Rough outline brainstorm\nReading in data\n\nbriefly address encoding issues. I believe pandas default to ASCII? (-- it's acutally UTF-8)\n\nbasic manipulations\n\nSubsetting\nAccessing rows/columns/individual items\nchanging column headers\ncreating calculated values\nusing lambda functions (or functions in general) to manipulate the data\n\nmore advanced manipulations\n\ngroup by\nmelting\nmerging\n\nmiscellanous thoughts\n\nTBD",
"# Import the packages we will use\nimport pandas as pd\nimport numpy as np\nfrom IPython.display import display, HTML\n\nimport matplotlib\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns",
"Lesson 1 : Demonstration\nThis first example follows Chapter 4, section 3 of Richard McElreath's book Statistical Rethinking.\nThe task is understand height in a population, in this case using data about the !Kung San people. Anthropologist Nancy Howell conducted interviews with the !Kung San and collected the data used here.\nThe data are available in the github repository for the book: https://github.com/rmcelreath/rethinking/blob/master/data/Howell1.csv\nThroughout this lesson, I'll provide the R equivalent for these actions in markdown as such:\nR equivalent:\n```r\n\ndf <- read.csv(\"https://raw.githubusercontent.com/rmcelreath/rethinking/master/data/Howell1.csv\", sep = \";\", header = TRUE)\nhead(df)\n```",
"# Read some data into a frame\n# A frame is like an table in a spreadsheet. \n# It contains columns (which usually have names) and rows (which can be indexed by number, \n# but may also have names)\ndf = pd.read_csv('https://raw.githubusercontent.com/rmcelreath/rethinking/master/data/Howell1.csv', sep=\";\")\ndf.head()",
"R equivalent:\nInstalling ggplot2 is optional if you already have it.\n```R\n\ninstall.packages(\"ggplot2\")\nlibrary(ggplot2)\nggplot(data=df, mapping=aes(x=age, y=height)) + geom_point()\n```",
"# Graph the data -- let's look at height vs. age\ndf.plot.scatter(x='age', y='height')",
"R equivalent\n```R\n\ninstall.packages('dplyr')\nlibrary(dplyr)\nadults_df <- filter(df, age>=18)\nggplot(data=adults_df, mapping=aes(x=age, y=height)) + geom_point()\n```",
"# Filter to adults, since height and age are correlated in children\nadults_df = df[df['age'] >= 18]\n# Look at height vs. age again\nadults_df.plot.scatter(x='age', y='height')",
"R equivalent:\n``` R\n\nnrow(adults_df); nrow(df)\n```",
"# Print out how many rows are in each frame\nlen(df), len(adults_df)",
"R equivalent:\n``` R\n\nggplot(data=adults_df, mapping=aes(x=age)) + geom_histogram()\n```",
"# Let's look at how the data are distributed\nadults_df['height'].plot.hist()",
"R equivalent:\n```R\n\n```",
"# Split data in to male and female\n# -- first add in a sex column to make it less confusing\ndf['sex'] = df.apply(lambda row: 'Male' if row['male'] == 1 else 'Female', axis=1)\n# -- re-apply the filter, since we modified the data\nadults_df = df[df['age'] >= 18]\nadults_df.head()\n\n# Let's summarize the data\nadults_df[['age', 'height', 'weight']].describe()\n\n# Let's look at the data broken down by sex\nadults_df[['age', 'height', 'weight', 'sex']].groupby('sex').describe()\n\n# Let's focus on the means and std\nsummary_df = adults_df[['age', 'height', 'weight', 'sex']].groupby('sex').describe()\nsummary_df.loc[(slice(None),['mean', 'std']), :]\n\n# Let's look at this visually -- plot height broken down by sex\ng = sns.FacetGrid(adults_df, hue='sex', size=6)\ng.map(sns.distplot, \"height\")\ng.add_legend()\n\n# Actually, let's look at everything\n# -- first, get rid of the male column, it's redundant and confusing\ndel adults_df['male']\nadults_df.head()\n\n# -- now flatten the data -- very confusing, it will be explained later\nflat_df = adults_df.set_index('sex', append=True)\nflat_df = flat_df.stack().reset_index([1, 2])\nflat_df.columns = ['sex', 'measurement', 'value']\nflat_df.head()\n\n# Plot!\ng = sns.FacetGrid(flat_df, col='measurement', hue='sex', size=6, sharex=False)\ng.map(sns.distplot, \"value\")\ng.add_legend()",
"Lesson 2: Details\nWhat just happened!? Let's take a deeper look at what was done above.\nReading in data",
"df = pd.read_csv('https://raw.githubusercontent.com/rmcelreath/rethinking/master/data/Howell1.csv', sep=\";\")\ndf.head()",
"One central abstraction in pandas is the DataFrame, which is similar to a data frame in R — that is, basically a spreadsheet. It is made up of columns, which are usually names, and rows, which may be named or just accessed by index.\nPandas is designed to be fast and efficient, so the table isn't necessarily stored the way you think it is internally. In particular, data is stored in columns, and each column is a pandas Series, which itself builds on numpy arrays, not native Python arrays.\nPandas can read data in many formats. CSV and JSON are common ones to use. You can control many aspects about how the data is read. Above, you see that the structure of the file is csv-like, but instead the ';' is used as the column separator. This is not a problem. Pandas can also handle different file encodings (UTF-8 is the default), etc. \nBasic frame manipulations — Accessing columns, rows, elements",
"df['height'].head()",
"In many cases, columns of a frame can be accessed like an array. The result is a pandas Series object, which, as you see, has a name, index, and type.\nAside — why all the calls to 'head()'?\nSeries and frames can be very large. The methods head() and tail() can be used get a few of the first and last rows, respectively. By default, first/last 5 rows are returned. It's used here to limit output to a small number of rows, since there is no need to see the whole table.",
"df.loc[0]",
"Rows are accessed using the method loc or iloc. The method 'loc' takes the index, 'iloc' takes the row index. In the above case, these are the same, but that is not always true. For example...",
"summary_df = df.describe()\nsummary_df.loc['mean']",
"To access an individual cell, specify a row and column using loc.",
"summary_df.loc['mean', 'age']",
"Another aside -- loc vs. iloc\nAs you saw above, the method loc takes the \"label\" of the index. The method iloc takes the index as arguments, with the parameters [row-index, col-index]",
"# select row index 0, and all the columns in that row\ndf.iloc[0,:]\n\n# select all the rows in column 0 by index\ndf.iloc[:,0]",
"Basic frame manipulations — data subsetting",
"df[['age', 'height', 'weight']].head()",
"Specifiying an array of column names returns a frame containing just those columns.",
"df.iloc[0:5]",
"It's also possible to access a subset of the rows by index. More commonly, though, you will want to subset the data by some property.",
"df[df['age'] >= 18].head()",
"This is intiutive to understand, but may seem a little magical at first. It is worth understanding what is going on underneath the covers.\nThe expression\ndf['age'] >= 18\n\nreturns an series of bool indicating whether the expression is true or false for that row (identified by index).",
"(df['age'] >= 18).head()\n\n(df['male'] == 0).head()",
"When such a series is the argument to the indexing operator, [], pandas returns a frame containing the rows where the value is True. These kinds of expressions can be combined as well, using the bitwise operators (not and/or).",
"((df['age'] >= 18) & (df['male'] == 0)).head()\n\ndf[(df['age'] >= 18) & (df['male'] == 0)].head()",
"This way, code for subsetting is intuitive to understand. It is also possible to subset rows and columns simultaneously.",
"df.loc[(df['age'] >= 18) & (df['male'] == 0), ['height', 'weight', 'age']].head()",
"Basic frame manipulations — renaming columns\nRenaming columns: just feed a list of new columns and pass it to df.columns",
"df.columns = ['new_height', 'new_weight', 'new_age', 'coded_gender']",
"Creating columns based on other columns\nIf I wanted to create a new column based on adding up the weight and age, I could do this:",
"df['new_id'] = df['new_weight'] + df['new_age']\ndf.head(2)",
"If I wanted to create a calculated column using a dictionary replacement, I could use the map function",
"gender_text = {1: 'Male', 0: 'Female'}\ndf['text_gender'] = df['coded_gender'].map(gender_text)\ndf.head(2)",
"What about using a lambda function to create a new column?",
"df['double_age'] = df['new_age'].apply(lambda x: x*2)\n\ndf.head(2)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
gregmedlock/Medusa
|
docs/creating_ensemble.ipynb
|
mit
|
[
"Creating an ensemble\nMedusa has two families of methods for generating ensembles: expansion and degradation. Expansion approaches currently consist of gapfilling algorithms. Degradation approaches include random degradation (useful for benchmarking new gapfilling methods) and omics integration algorithms that constrain network (e.g. transcriptomics integration; not currently implemented).\nExpanding a network\nThe most common network expansion approach involving metabolic networks is algorithmic gapfilling, where the goal is to identify reactions to add to a network that allow a feasible solution. An example of this is adding a minimal number of reactions to enable biomass production in a model for an organism in a specific condition (e.g. SMILEY [1]). See the gapfilling documentation in cobrapy for the formulation of this problem.\nAdding the minimum number of reactions to satisfy a biological function is just one approach to the gapfilling strategy. An alternative approach is to reformulate the problem to add the minimum amount of flux through candidate reactions for gapfilling. This has the advantage of being an entirely continuous problem, rather than the integer problem posed by SMILEY, so the time to find a solution is usually 1-2 orders of magnitude shorter.\nIn medusa, implementations of both gapfilling strategies are available, but we recommend the continuous approach, which we demonstrate below.\nInput for gapfilling\nThe key inputs for gapfilling are a cobra.Model object representing the GENRE you are filling gaps in and a second cobra.Model object containing reactions that form a universal reaction database (sometimes called a universal reaction bag). Additionally, context-specific information, such as the environmental conditions in which a phenotype was observed, may be needed to constrain the model during gapfilling.\nLet's use test data available in medusa for gapfilling. The approach we'll take to generate multiple solutions involves iteratively gapfilling across multiple media conditions to generate a single gapfilled model. We repeat the process with the original model but with a shuffled version of the media conditions (changing the order in which media conditions are used during gapfilling), each time generating a new solution. You can see examples of this approach in Biggs & Papin [2] and Medlock & Papin [3].",
"# Load the test model for Staphylococcus aureus, originally generated with ModelSEED\nimport medusa\nfrom medusa.test import create_test_model\nmodel = create_test_model('Saureus_seed')\n\n# Load the biolog data from Plata et al., Nature 2014\nfrom medusa.test import load_biolog_plata\nbiolog_base_composition, biolog_base_dict, biolog_thresholded = load_biolog_plata()\nbiolog_base_composition\n",
"Here, biolog_base_composition describes the media components that are present in every biolog condition (Note: if you are using these data for your own purposes, keep in mind that we added Heme and H2S2O3 due to common issues encountered in models. These are not actually in the biolog medium).\nThe biolog_base_dict is a dictionary version of this, which we'll use as direct input to the models as part of model.medium",
"biolog_base_dict",
"The actual growth/no growth data is in biolog_thresholded, which is a pandas DataFrame with organism species/genus as rows, and biolog media conditions as columns represented by the ModelSEED metabolite ID for the single carbon/nitrogen source present. The original source of these data is [4]; there, you can find the non-thresholded values if curious. Here, we've thresholded the growth data using the same threshold reported in the paper (>=10 relative units of tetrazolium dye).",
"# Just inspect the first 5 species\nbiolog_thresholded.head(5)",
"Now we'll extract the positive growth conditions for the species we're interested in (Staphylococcus aureus)",
"test_mod_pheno = biolog_thresholded.loc['Staphylococcus aureus']\ntest_mod_pheno = list(test_mod_pheno[test_mod_pheno == True].index)\ntest_mod_pheno",
"In order to gapfill this model, we have to make sure that the biolog media components are in the model, and that there are exchange reactions for each of these metabolites. To make this process more convenient, we'll load the universal reaction database now, which we will also use later in the process. The universal model is large, and the load_universal_modelseed does some extra processing of the model, so loading it may take a few minutes. First we'll check for changes that need to be made:",
"# load the universal reaction database\nfrom medusa.test import load_universal_modelseed\nfrom cobra.core import Reaction\nuniversal = load_universal_modelseed()\n\n# check for biolog base components in the model and record\n# the metabolites/exchanges that need to be added\nadd_mets = []\nadd_exchanges = []\nfor met in list(biolog_base_dict.keys()):\n try:\n model.metabolites.get_by_id(met)\n except:\n print('no '+met)\n add_met = universal.metabolites.get_by_id(met).copy()\n add_mets.append(add_met)\n\nmodel.add_metabolites(add_mets)\n\nfor met in list(biolog_base_dict.keys()):\n # Search for exchange reactions\n try:\n model.reactions.get_by_id('EX_'+met)\n except:\n add_met = universal.metabolites.get_by_id(met)\n ex_rxn = Reaction('EX_' + met)\n ex_rxn.name = \"Exchange reaction for \" + met\n ex_rxn.lower_bound = -1000\n ex_rxn.upper_bound = 1000\n ex_rxn.add_metabolites({add_met:-1})\n add_exchanges.append(ex_rxn)\n\nmodel.add_reactions(add_exchanges)\n",
"Next, we need to do the same for the single carbon/nitrogen sources in the biolog data. When performing this workflow on your own GENRE, you may want to check that all of the media components that enable growth have suitable transporters in the universal model (or already in the draft model).",
"# Find metabolites from the biolog data that are missing in the test model\n# and add them from the universal\nmissing_mets = []\nmissing_exchanges = []\nmedia_dicts = {}\nfor met_id in test_mod_pheno:\n try:\n model.metabolites.get_by_id(met_id)\n except:\n print(met_id + \" was not in model, adding met and exchange reaction\")\n met = universal.metabolites.get_by_id(met_id).copy()\n missing_mets.append(met)\n ex_rxn = Reaction('EX_' + met_id)\n ex_rxn.name = \"Exchange reaction for \" + met_id\n ex_rxn.lower_bound = -1000\n ex_rxn.upper_bound = 1000\n ex_rxn.add_metabolites({met:-1})\n missing_exchanges.append(ex_rxn)\n media_dicts[met_id] = biolog_base_dict.copy()\n media_dicts[met_id] = {'EX_'+k:v for k,v in media_dicts[met_id].items()}\n media_dicts[met_id]['EX_'+met_id] = 1000\nmodel.add_metabolites(missing_mets)\nmodel.add_reactions(missing_exchanges)",
"Now, let's fill some gaps using the iterative_gapfill_from_binary_phenotypes function. For simplicity, we'll just take the first 5 conditions and perform gapfilling for 10 cycles, which should yield an ensemble with 10 members. We set lower_bound = 0.05, which requires that the model produces 0.05 units of flux through the previous objective function (here, biomass production) which is now set as a constraint (i.e. v<sub>bm</sub> >= 0.05). inclusion_threshold is the amount of flux through a reaction required to include it in the gapfill solution, which is necessary because of the limits of numerical precision. Generally a small number (e.g. < 1E-8) is a good choice. However, some gapfill solutions may have reactions with non-zero flux in ranges lower than this; if this occurs, Medusa will raise an error letting you know that it failed to validate the gapfill solution, and that you should try lowering the threshold.",
"from medusa.reconstruct.expand import iterative_gapfill_from_binary_phenotypes\n# select a subset of the biolog conditions to perform gapfilling with\nsources = list(media_dicts.keys())\nsub_dict = {sources[0]:media_dicts[sources[0]],\n sources[1]:media_dicts[sources[1]],\n sources[2]:media_dicts[sources[2]],\n sources[3]:media_dicts[sources[3]],\n sources[4]:media_dicts[sources[4]]}\n\nnum_cycles = 10\nlower_bound = 0.05\nflux_cutoff = 1E-10\nensemble = iterative_gapfill_from_binary_phenotypes(model,universal,sub_dict,num_cycles,\\\n lower_bound=lower_bound,\\\n inclusion_threshold=1E-10,\\\n exchange_reactions=False,\\\n demand_reactions=False,\\\n exchange_prefix='EX')\n\nprint(len(ensemble.members))\nprint(ensemble.members)\n\n# Check out the features that vary across the ensemble\nprint(len(ensemble.features))\nprint([feature.base_component.id for feature in ensemble.features])",
"Degrading a network\nComing here soon.\nReferences\n[1]: Reed et al., \"Systems approach to refining genome annotation\", PNAS 2006\n[2]: Biggs & Papin, \"Managing uncertainty in metabolic network structure and improving predictions using EnsembleFBA\", PLoS Computational Biology 2017\n[3]: Medlock & Papin, \"Guiding the refinement of biochemical knowledgebases with ensembles of metabolic networks and semi-supervised learning\", BioRxiv 2018\n[4]: Plata et al., \"Long-term phenotypic evolution of bacteria\", Nature 2015"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
marpat/blog
|
ReadNboDip.ipynb
|
gpl-3.0
|
[
"Extracting dipole moment data from the NBO output\nThis notebook describes code to parse .nbo or .out files generated by Gaussian09/GENNBO6 modules. Lines related to <span style=\"color:rgba(217,0,126,1)\">NLMO dipole components</span> are extracted and saved to a <b>*.csv file</b>.<br>\nComplete blog entry discussing this <span style=\"color:orange\">Notebook</span> is at chemgplus.blogger.com site.",
"'''Custom styling. File *.css injects the contents of css_file in the \nheader of the notebook's HTML file. Other css files are in the /css directory'''\n# to (de)activate line numbering pres Esc while in the cell \n# followed by l (lower L)\n\n'''Sanity check since we are changing directories and .css file path\nwould be incorrect upon cell re-load'''\nfrom IPython.core.display import HTML\nimport string,sys,os,os.path,re\ncss_file = './css/blog.css'\nif os.path.isfile(css_file):\n css_file\nelse:\n %cd ..\nHTML(open(css_file, \"r\").read())\n\nimport pandas as pd\nimport numpy as np\npd.set_option('display.max_columns', 14)\npd.set_option('display.width', 300)\npd.set_option(\"max_columns\", 14)\npd.set_option('chained_assignment',None) # suppress warnings for web publishing\n\n%%capture \n# suppress output; remove %% capture for debugging\n# Enter subdirectory and the input filename\n%cd dipoles\nfilename = 'form.nbo'\n\n# Save the file path, name and extension\nfullpath = os.path.abspath(filename)\npath,file=os.path.split(fullpath)\nbasename, extension = os.path.splitext(filename)\n\n# Parse the text section of Dipole Analysis into the list 'capture'\nstart = 0\nbegin = 0\nend = 1\ncapture = []\nwith open (filename, 'r') as f:\n for line in f:\n # condition to end parsing\n if begin == 1 and '-------' in line:\n end = 0\n # parse the chunk\n if start == 1 and begin == 1 and end == 1 and not (\"deloc\" in line):\n if re.match(r\"\\s$\", line): continue # if there's a space in the line\n capture += [line.lstrip()]\n # First condition to initiate capture\n if 'DIPOLE MOMENT ANALYSIS:' in line:\n start = 1\n # Second condition to initiate capture \n if start == 1 and '==============' in line:\n begin = 1 \n\n# Extract values\ndef getdipvalues(list):\n orbnum = []\n orbtype = []\n dipX = []\n dipY = []\n dipZ = []\n dipTot = [] \n try:\n for item in capture:\n #Regex with capturing groups to parse lines in the dipole section\n pattern = re.search(r\"([0-9]{1,3})\\.\\s([A-Z]{2}.+)\\s{7,13}(-?\\d\\.\\d\\d)\\s?\\s(-?\\d\\.\\d\\d)\\s?\\s(-?\\d\\.\\d\\d)\\s?\\s(\\d\\.\\d\\d)\\s?\\s.+\", item, re.MULTILINE)\n if pattern:\n orbnum.append(pattern.group(1).strip())\n orbtype.append(pattern.group(2).strip())\n dipX.append(pattern.group(3))\n dipY.append(pattern.group(4))\n dipZ.append(pattern.group(5))\n dipTot.append(pattern.group(6))\n return orbnum, orbtype, dipX, dipY, dipZ, dipTot\n except ValueError, Argument:\n print \"The argument does not contain list.\\n\", Argument\n\n%%capture \n# suppress output; remove %% capture for debugging\n# Create Pandas dataframe\norbnum, orbtype, dipX, dipY, dipZ, dipTot = getdipvalues(capture)\n\n# Create Pandas DataFrame\ndf = pd.DataFrame({'NLMO': orbnum,'Type': orbtype,'X': dipX,'Y': dipY,'Z': dipZ,'Tot_Dip': dipTot},columns=['NLMO','Type','X','Y','Z','Tot_Dip'])\ndf[['X', 'Y','Z', 'Tot_Dip']] = df[['X', 'Y','Z', 'Tot_Dip']].astype(float)\ndf[['NLMO']] = df[['NLMO']].astype(int)\n\n# Write dataframe to .csv file\ntry:\n df.to_csv(basename+\"_dip.csv\",index=False, encoding='utf-8')\nexcept IOError:\n print \"Error: can\\'t find the file or read data\"\nelse:\n print \"\\n\" +('-'*80)+\"\\n\"\n print \">> Contents of the dataframe was written to \"+path+\"\\\\\"+basename+\"_dip.csv file\"",
"NLMO part of the DIPOLE MOMENT ANALYSIS: section is now shown in <span style=\"color:orange\"><b>Table 1</b></span>. The corresponding *_dip.csv file was saved with the path shown in the previous cell.",
"# Print html formatted table from the loaded css file\nHTML(df.to_html(classes = 'grid', escape=False))",
"<center><span style=\"color: orange\"><b>Table 1</b>. XYZ-coordinates, orbital types, and total dipole values from NLMO dipole output.</span></center> \n\n<span style=\"font-size: 12px\"><i>iPython Notebook ReadNboDip.ipynb:<br>\nversion 1.0 created on Dec 23, 2014<br>\nversion 1.1 updated on Jan 4, 2015</i></span>"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
sunsistemo/mozzacella-automato-salad
|
results-two-colors.ipynb
|
gpl-3.0
|
[
"import json\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import integrate\nfrom scipy.stats import chisqprob\nfrom gmpy2 import digits\n\nimport recipe\n\ns_rands_paper = recipe.rands_paper\nd_five = recipe.five\nd_five['Entropy_norm'] = d_five['Entropy'] / 8\nd_five['Entropy'] = d_five['Entropy_norm']\nd_five = d_five.drop('Entropy_norm', axis=1)\n\nd_five['p_value_deviation'] = np.abs(d_five['p-value'] - 0.5)\n\nd_five_p10_90 = d_five[(d_five['p-value'] > 0.1) & (d_five['p-value'] < 0.9)]\nd_five_p05_95 = d_five[(d_five['p-value'] > 0.05) & (d_five['p-value'] < 0.95)]\nd_rands_paper = d_five[d_five.rule.isin(s_rands_paper)]\n\nlen_five_p10_90 = len(d_five_p10_90)\nlen_five_p05_95 = len(d_five_p05_95)\nlen_rands_paper = len(d_rands_paper)\n\nprint(\"Random according to paper: #%d \" % len_rands_paper, end=\"\")\nprint(list(d_rands_paper.rule.sort_values()))\nprint(\"Between 5 - 95%%: #%d \" % len_five_p05_95, end=\"\")\nprint(list(d_five_p05_95.rule.sort_values()))\nprint(\"Between 10 - 90%%: #%d \" % len_five_p10_90, end=\"\")\nprint(list(d_five_p10_90.rule.sort_values()))\n\ns_five_p05_p95 = set(d_five_p05_95.rule)\ns_five_p10_p90 = set(d_five_p10_90.rule)\n\nprint(\"Random from paper but not according to us: \", end=\"\") \nprint(set(d_rands_paper.rule) - set(d_five_p10_90.rule))\n\nprint()\n\nprint(\"Random from paper but not according to us: \", end=\"\") \nprint(set(d_five_p10_90.rule) - set(d_rands_paper.rule))\n\n\nd_five[d_five.rule.isin(set(d_rands_paper.rule) - set(d_five_p10_90.rule))][['Entropy', 'mean_deviation', 'p-value']]",
"Plots\nEntropy vs langton\nblue = all, red = our random",
"# Plot Entropy of all rules against the langton parameter\nax1 = plt.gca()\nd_five.plot(\"langton\", \"Entropy\", ax=ax1, kind=\"scatter\", marker='o', alpha=.5, s=40)\nd_five_p10_90.plot(\"langton\", \"Entropy\", ax=ax1, kind=\"scatter\", color=\"r\", marker='o', alpha=.5, s=40)\nplt.show()\n\nax1 = plt.gca()\nd_five.plot(\"langton\", \"Entropy\", ax=ax1, kind=\"scatter\", marker='o', alpha=.5, s=40)\nd_five_p10_90.plot(\"langton\", \"Entropy\", ax=ax1, kind=\"scatter\", color=\"r\", marker='o', alpha=.5, s=40)\nplt.savefig('plots/entropy-langton.png', format='png', dpi=400)\n\nax1 = plt.gca()\nd_five.plot(\"langton\", \"Entropy\", ax=ax1, kind=\"scatter\", marker='o', alpha=.5, s=40)\nd_five_p10_90.plot(\"langton\", \"Entropy\", ax=ax1, kind=\"scatter\", color=\"r\", marker='o', alpha=.5, s=40)\nplt.savefig('plots/entropy-langton.svg', format='svg', dpi=400)",
"Chi-square vs langton\nblue = all, red = our random",
"# Plot Chi-Square of all rules against the langton parameter\nax2 = plt.gca()\nd_five.plot(\"langton\", \"Chi-square\", ax=ax2, logy=True, kind=\"scatter\", marker='o', alpha=.5, s=40)\nd_five_p10_90.plot(\"langton\", \"Chi-square\", ax=ax2, logy=True, kind=\"scatter\", color=\"r\", marker='o', alpha=.5, s=40)\nplt.show()\n\nax2 = plt.gca()\nd_five.plot(\"langton\", \"Chi-square\", ax=ax2, logy=True, kind=\"scatter\", marker='o', alpha=.5, s=40)\nd_five_p10_90.plot(\"langton\", \"Chi-square\", ax=ax2, logy=True, kind=\"scatter\", color=\"r\", marker='o', alpha=.5, s=40)\nplt.savefig('plots/chisquare-langton.png', format='png', dpi=400)\n\nax2 = plt.gca()\nd_five.plot(\"langton\", \"Chi-square\", ax=ax2, logy=True, kind=\"scatter\", marker='o', alpha=.5, s=40)\nd_five_p10_90.plot(\"langton\", \"Chi-square\", ax=ax2, logy=True, kind=\"scatter\", color=\"r\", marker='o', alpha=.5, s=40)\nplt.savefig('plots/chisquare-langton.svg', format='svg', dpi=400)",
"Mean vs langton\nblue = all, red = our random",
"# Plot Mean of all rules against the langton parameter\nax3 = plt.gca()\nd_five.plot(\"langton\", \"Mean\", ax=ax3, kind=\"scatter\", marker='o', alpha=.5, s=40)\nd_five_p10_90.plot(\"langton\", \"Mean\", ax=ax3, kind=\"scatter\", color=\"r\", marker='o', alpha=.5, s=40)\nplt.show()\n\nax3 = plt.gca()\nd_five.plot(\"langton\", \"Mean\", ax=ax3, kind=\"scatter\", marker='o', alpha=.5, s=40)\nd_five_p10_90.plot(\"langton\", \"Mean\", ax=ax3, kind=\"scatter\", color=\"r\", marker='o', alpha=.5, s=40)\nplt.savefig('plots/mean-langton.png', format='png', dpi=400)\n\nax3 = plt.gca()\nd_five.plot(\"langton\", \"Mean\", ax=ax3, kind=\"scatter\", marker='o', alpha=.5, s=40)\nd_five_p10_90.plot(\"langton\", \"Mean\", ax=ax3, kind=\"scatter\", color=\"r\", marker='o', alpha=.5, s=40)\nplt.savefig('plots/mean-langton.svg', format='svg', dpi=400)",
"Monte-Carlo-Pi vs langton\nblue = all, red = our random",
"# Plot Monte Carlo of all rules against the langton parameter\nax4 = plt.gca()\nd_five.plot(\"langton\", \"Monte-Carlo-Pi\", ax=ax4, kind=\"scatter\", marker='o', alpha=.5, s=40)\nd_five_p10_90.plot(\"langton\", \"Monte-Carlo-Pi\", ax=ax4, kind=\"scatter\", color=\"r\", marker='o', alpha=.5, s=40)\nplt.show()\n\nax4 = plt.gca()\nd_five.plot(\"langton\", \"Monte-Carlo-Pi\", ax=ax4, kind=\"scatter\", marker='o', alpha=.5, s=40)\nd_five_p10_90.plot(\"langton\", \"Monte-Carlo-Pi\", ax=ax4, kind=\"scatter\", color=\"r\", marker='o', alpha=.5, s=40)\nplt.savefig('plots/monte-carlo-langton.png', format='png', dpi=400)\n\nax4 = plt.gca()\nd_five.plot(\"langton\", \"Monte-Carlo-Pi\", ax=ax4, kind=\"scatter\", marker='o', alpha=.5, s=40)\nd_five_p10_90.plot(\"langton\", \"Monte-Carlo-Pi\", ax=ax4, kind=\"scatter\", color=\"r\", marker='o', alpha=.5, s=40)\nplt.savefig('plots/monte-carlo-langton.svg', format='svg', dpi=400)",
"Serial-Correlation vs langton\nblue = all, red = our random",
"# Plot Serial Correlation of all rules against the langton parameter\nax5 = plt.gca()\nd_five.plot(\"langton\", \"Serial-Correlation\", ax=ax5, kind=\"scatter\", marker='o', alpha=.5, s=40)\nd_five_p10_90.plot(\"langton\", \"Serial-Correlation\", ax=ax5, kind=\"scatter\", color=\"r\", marker='o', alpha=.5, s=40)\nplt.show()\n\nax5 = plt.gca()\nd_five.plot(\"langton\", \"Serial-Correlation\", ax=ax5, kind=\"scatter\", marker='o', alpha=.5, s=40)\nd_five_p10_90.plot(\"langton\", \"Serial-Correlation\", ax=ax5, kind=\"scatter\", color=\"r\", marker='o', alpha=.5, s=40)\nplt.savefig('plots/serial-correlation-langton.png', format='png', dpi=400)\n\nax5 = plt.gca()\nd_five.plot(\"langton\", \"Serial-Correlation\", ax=ax5, kind=\"scatter\", marker='o', alpha=.5, s=40)\nd_five_p10_90.plot(\"langton\", \"Serial-Correlation\", ax=ax5, kind=\"scatter\", color=\"r\", marker='o', alpha=.5, s=40)\nplt.savefig('plots/serial-correlation-langton.svg', format='svg', dpi=400)",
"p-value vs langton\nblue = all, red = our random",
"# Plot p-value of all rules against the langton parameter\nax6 = plt.gca()\nd_five.plot(\"langton\", \"p-value\", ax=ax6, kind=\"scatter\", marker='o', alpha=.5, s=40)\nd_five_p10_90.plot(\"langton\", \"p-value\", ax=ax6, kind=\"scatter\", color=\"r\", marker='o', alpha=.5, s=40)\nplt.show()\n\nax6 = plt.gca()\nd_five.plot(\"langton\", \"p-value\", ax=ax6, kind=\"scatter\", marker='o', alpha=.5, s=40)\nd_five_p10_90.plot(\"langton\", \"p-value\", ax=ax6, kind=\"scatter\", color=\"r\", marker='o', alpha=.5, s=40)\nplt.savefig('plots/p-value-langton.png', format='png', dpi=400)\n\nax6 = plt.gca()\nd_five.plot(\"langton\", \"p-value\", ax=ax6, kind=\"scatter\", marker='o', alpha=.5, s=40)\nd_five_p10_90.plot(\"langton\", \"p-value\", ax=ax6, kind=\"scatter\", color=\"r\", marker='o', alpha=.5, s=40)\nplt.savefig('plots/p-value-langton.svg', format='svg', dpi=400)\n\n# Cutoff rules with high Chi-Square (not random)\nd_rands_paper_chi = d_rands_paper[(d_rands_paper[\"Chi-square\"] < 300)] # 300 or 1E5 is same cutoff\n\nprint(\"Number of random rules according to paper: %d\" % len(d_rands_paper))\nprint(\"Number of paper rules with high Chi-Square: %d \" % (len(d_rands_paper) - len(d_rands_paper_chi)), end=\"\")\nprint(set(d_rands_paper.rule) - set(d_rands_paper_chi.rule))\n\nselection = d_five_p10_90[['rule', 'pi_deviation', 'mean_deviation', 'p_value_deviation', 'Serial-Correlation']]\n\nselection\n\np_value_top_10 = selection.sort_values(by='p_value_deviation').head(10)\nmean_top_10 = selection.sort_values(by='mean_deviation').head(10)\npi_top_10 = selection.sort_values(by='pi_deviation').head(10)\n\nprint(\"Top 10 p-value: \\t\", end=\"\")\nprint(p_value_top_10.rule.values)\nprint(\"Top 10 Mean: \\t\\t\", end=\"\")\nprint(mean_top_10.rule.values)\nprint(\"Top 10 Monte-Carlo-Pi: \\t\", end=\"\")\nprint(pi_top_10.rule.values)\n\nprint()\n\nprint(\"Both in top 10 p-value and Mean: \", end=\"\")\nprint(set(p_value_top_10.rule.values) & set(mean_top_10.rule.values))\nprint(\"In all three top 10s: \", end=\"\")\nprint(set(p_value_top_10.rule.values) & set(mean_top_10.rule.values) & set(pi_top_10.rule.values))\n\nselection[selection.rule.isin(set(p_value_top_10.rule.values) & set(mean_top_10.rule.values) & set(pi_top_10.rule.values))]\n\np_value_top_10\n\nmean_top_10\n\npi_top_10",
"Python's and linux' RNG\nPython's random.randint and linux' /dev/urandom",
"def read_results(filename):\n results = (File_bytes, Monte_Carlo_Pi, Rule, Serial_Correlation, Entropy, Chi_square, Mean) = [[] for _ in range(7)]\n with open(filename) as f:\n data = json.load(f)\n variables = {\"File-bytes\": File_bytes, \"Monte-Carlo-Pi\": Monte_Carlo_Pi, \"Rule\": Rule, \"Serial-Correlation\": Serial_Correlation,\n \"Entropy\": Entropy, \"Chi-square\": Chi_square, \"Mean\": Mean}\n for k, v in variables.items():\n v.append(data[k])\n results = np.array([np.array(r) for r in results]).T\n headers = [\"File-bytes\", \"Monte-Carlo-Pi\", \"Rule\", \"Serial-Correlation\", \"Entropy\", \"Chi-square\", \"Mean\"]\n return pd.DataFrame(results, columns=headers)\n\npython = read_results('python_1466717839.json')\nurandom = read_results('urandom_1466717941.json')\n\nfor d in (python, urandom):\n d[\"pi_deviation\"] = np.abs(d[\"Monte-Carlo-Pi\"] - np.pi)\n d[\"mean_deviation\"] = np.abs(d[\"Mean\"] - 255 / 2)\n d[\"p-value\"] = chisqprob(d[\"Chi-square\"], 255)\n d['Entropy_norm'] = d['Entropy'] / 8\n d['Entropy'] = d['Entropy_norm']\n d['p_value_deviation'] = np.abs(d['p-value'] - 0.5)\n\npython[['pi_deviation', 'mean_deviation', 'p_value_deviation']]\n\nurandom[['pi_deviation', 'mean_deviation', 'p_value_deviation']]\n\nselection"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
xdnian/pyml
|
assignments/solutions/ex04_sample_solution.ipynb
|
mit
|
[
"Assignment 4 - dimensionality reduction\nThis assignment focuses on two different ways for dimensionality reduction:\n* feature selection\n* feature extraction\nThis assignment has weighting $1.5$.\nSequential feature selection (50 points)\nThere is a sample code in PML chapter 4 for sequential bardward selection (SBS) and its application to subsequent KNN classifier.\nImplement sequential forward selection (SFS), and compare it with sequential backward selection by plotting the accuracy versus the number of features.\nYou can start with the sample code provided in the slides.\nYou can extend the existing SBS class to handle both forward and backward selection, or implement a separate class for SFS.\nPlot and compare the two accuracy versus number-of-features plots for SFS and SBS.\nUse the wine dataset as follows.",
"import pandas as pd\n\nwine_data_remote = 'https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data'\nwine_data_local = '../datasets/wine/wine.data'\n\ndf_wine = pd.read_csv(wine_data_remote,\n header=None)\n\ndf_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash',\n 'Alcalinity of ash', 'Magnesium', 'Total phenols',\n 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',\n 'Color intensity', 'Hue', 'OD280/OD315 of diluted wines',\n 'Proline']\n\n#print('Class labels', np.unique(df_wine['Class label']))\n#df_wine.head()\n\nfrom sklearn import __version__ as skv\nfrom distutils.version import LooseVersion as CheckVersion\nif CheckVersion(skv) < '0.18':\n from sklearn.cross_validation import train_test_split\nelse:\n from sklearn.model_selection import train_test_split\n\nX, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values\n\nX_train, X_test, y_train, y_test = \\\n train_test_split(X, y, test_size=0.3, random_state=0)\n\nfrom sklearn.preprocessing import StandardScaler\n\nstdsc = StandardScaler()\nX_train_std = stdsc.fit_transform(X_train)\nX_test_std = stdsc.transform(X_test)",
"Answer\nImplement your sequential backward selection class here, either as a separate class or by extending the SBS class that can handle both forward and backward selection (via an input parameter to indicate the direction).",
"from sklearn.base import clone\nfrom itertools import combinations\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\n\nclass SequentialSelection():\n def __init__(self, estimator, k_features, scoring=accuracy_score,\n backward = True, test_size=0.25, random_state=1):\n self.scoring = scoring\n self.estimator = clone(estimator)\n self.k_features = k_features\n self.backward = backward\n self.test_size = test_size\n self.random_state = random_state\n\n def fit(self, X, y):\n \n X_train, X_test, y_train, y_test = \\\n train_test_split(X, y, test_size=self.test_size,\n random_state=self.random_state)\n\n dim = X_train.shape[1]\n \n all_indices = tuple(range(dim))\n self.subsets_ = []\n self.scores_ = []\n \n if self.backward: \n self.indices_ = all_indices\n dims = range(dim, self.k_features-1, -1)\n else: # forward\n self.indices_ = []\n dims = range(1, self.k_features+1, 1) \n \n for dim in dims:\n scores = []\n subsets = []\n\n if self.backward:\n p_set = [p for p in combinations(self.indices_, r=dim)]\n else:\n remaining_indices = set(all_indices).difference(self.indices_)\n p_set = [tuple(set(p).union(set(self.indices_))) for p in combinations(remaining_indices, r=1)]\n \n for p in p_set:\n score = self._calc_score(X_train, y_train, \n X_test, y_test, p)\n scores.append(score)\n subsets.append(p)\n\n best = np.argmax(scores)\n self.indices_ = subsets[best]\n self.subsets_.append(self.indices_)\n self.scores_.append(scores[best])\n \n self.k_score_ = self.scores_[-1]\n\n return self\n\n def transform(self, X):\n return X[:, self.indices_]\n\n def _calc_score(self, X_train, y_train, X_test, y_test, indices):\n self.estimator.fit(X_train[:, indices], y_train)\n y_pred = self.estimator.predict(X_test[:, indices])\n score = self.scoring(y_test, y_pred)\n return score",
"Apply your sequential forward/backward selection code to the KNN classifier with the wine data set, and plot the accuracy versus number-of-features curves for both.\nDescribe the similarities and differences you can find, e.g. \n* do the two methods agree on the optimal number of features?\n* do the two methods have similar accuracy scores for each number of features?\n* etc.\nAnswer\nSBS and SFS give similar plots for accuracy versus the feature size, they agree on the accuracy score for 5 to 9 features based on the parameters setting used in the code.\nSFS reaches a higher accuracy score than SBS with a smaller feature size. We could see from the figure that SFS has a higher accuracy of 1 with a featue size of 3 while SBS gives an accuracy of 0.96.",
"%matplotlib inline\n\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KNeighborsClassifier\n\nknn = KNeighborsClassifier(n_neighbors=2)\n\n# selecting features\nfor backward in [True, False]:\n if backward:\n k_features = 1\n else:\n k_features = X_train_std.shape[1]\n \n ss = SequentialSelection(knn, k_features=k_features, backward=backward)\n ss.fit(X_train_std, y_train)\n\n # plotting performance of feature subsets\n k_feat = [len(k) for k in ss.subsets_]\n\n plt.plot(k_feat, ss.scores_, marker='o')\n plt.ylim([0.7, 1.1])\n plt.ylabel('Accuracy')\n plt.xlabel('Number of features')\n if backward:\n plt.title('backward')\n else:\n plt.title('forward')\n plt.grid()\n plt.tight_layout()\n # plt.savefig('./sbs.png', dpi=300)\n plt.show()",
"PCA versus LDA (50 points)\nWe have learned two different methods for feature extraction, PCA (unsupervised) and LDA (supervised).\nUnder what circumstances would PCA and LDA produce very different results? \nProvide one example dataset in 2D, analyze it via PCA and LDA, and plot it with the PCA and LDA components.\nYou can use code from the scikit-learn library.\nAnswer\nOne simple case is several very tall and narrow clusters stacked horizontally.\nPCA/LDA and will favor the vertical/horizontal direction, respectively.\nTo avoid curing via standardization, just having enough classes so that the total/individual set is sufficiently isotropic/anisotropic.\nThe difference could be explained: \nPCA: \n(1) unsupervised (no class label information) \n(2) project data into dimensions that maximize variance.\nLDA:\n(1) supervised (with class label information) \n(2) project data into dimensions to (a) maximize inter-class spread, (b) minimize intra-class spread",
"%matplotlib inline",
"Write code to produce your own dataset in 2D.\nYou are free to design relative characteristics like the number of class, the number of samples for each class, as long as your dataset could be analyzed via PCA and LDA.",
"# visualize the data set\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef plot_dataset(X, y, xlabel=\"\", ylabel=\"\"):\n num_class = np.unique(y).size\n \n colors = ['red', 'blue', 'green', 'black']\n markers = ['^', 'o', 's', 'd']\n \n if num_class <= 1:\n plt.scatter(X[:, 0], X[:, 1])\n pass\n else:\n for k in range(num_class):\n plt.scatter(X[y == k, 0], X[y == k, 1], color=colors[k], marker=markers[k], alpha=0.5)\n \n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.tight_layout()\n plt.show()\n\nfrom sklearn.datasets import make_blobs\n\ndef blobs(num_samples_per_class, dimension, num_class, cluster_std, center_spacing):\n cluster_scale = num_class*center_spacing\n\n class_centers = np.zeros((num_class, 2))\n for k in range(num_class):\n class_centers[k, 0] = center_spacing*(k-num_class*0.5)\n\n X, y = make_blobs(n_samples = num_samples_per_class*num_class, \n n_features = dimension, centers = class_centers,\n cluster_std=cluster_std)\n\n X[:, 1] *= cluster_scale\n \n return X, y\n\ndef slices(num_samples_per_class, dimension, num_class, aspect_ratio=1.0):\n num_rows = num_class*num_samples_per_class\n num_cols = dimension\n \n y = np.zeros(num_rows)\n X = np.random.uniform(low=0, high=1, size=(num_rows, num_cols))\n \n for k in range(num_class):\n row_lo = k*num_samples_per_class\n row_hi = (k+1)*num_samples_per_class\n y[row_lo:row_hi].fill(k)\n X[row_lo:row_hi, 0] = (X[row_lo:row_hi, 0]+k)*aspect_ratio/num_class\n \n y = y.astype(int) \n return X, y\n\n\nnum_samples_per_class = 100\ndimension = 2\ncluster_std = 1\ncenter_spacing = 5*cluster_std\n\naspect_ratio = 0.8\n\nnum_class = 3\n\n# You guys could try different dataset using blobs or slices and adjusting the angle below\n\n#X, y = blobs(num_samples_per_class, dimension, num_class, cluster_std, center_spacing)\nX, y = slices(num_samples_per_class, dimension, num_class, aspect_ratio)\n\nprint(X.shape)\nprint(np.unique(y))\n\n class RotationMatrix():\n # angle in degree\n def __init__(self, angle=0):\n theta = (angle/180.0)*np.pi\n self.matrix = np.array([[np.cos(theta), -np.sin(theta)], \n [np.sin(theta), np.cos(theta)]])\n def rot(self, X):\n return X.dot(self.matrix.transpose())\n\nangle = 0\n\nif angle != 0:\n rmat = RotationMatrix(angle)\n X = rmat.rot(X)\n\nfrom sklearn.preprocessing import StandardScaler\n\nif False:\n sc = StandardScaler()\n X = sc.fit_transform(X)",
"Plot your data set, with different classes in different marker colors and/or shapes.\nYou can write your own plot code or use existing library plot code.",
"plot_dataset(X, y)",
"Apply your dataset through PCA and LDA, and plot the projected data using the same plot code.\nExplain the differences you notice, and how you manage to construct your dataset to achieve such differences.\nYou can use the PCA and LDA code from the scikit-learn library.",
"from sklearn.decomposition import PCA\n\npca = PCA()\nX_pca = pca.fit_transform(X)\n\nplot_dataset(X_pca, y, xlabel='PC 1', ylabel='PC 2')\n\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\n\nlda = LDA(n_components=dimension)\nX_lda = lda.fit_transform(X, y)\n\nplot_dataset(X_lda, y, xlabel = \"LD 1\", ylabel = \"LD 2\")"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
paulromano/openmc
|
examples/jupyter/unstructured-mesh-part-ii.ipynb
|
mit
|
[
"Unstructured Mesh: Tallies with CAD and Point Cloud Visualization\nIn the first notebook on this topic, we looked at how to set up a tally using an unstructured mesh in OpenMC.\nIn this notebook, we will explore using unstructured mesh in conjunction with CAD-based geometry to perform detailed geometry analysis on complex geomerty.\nNOTE: This notebook will not run successfully if OpenMC has not been built with DAGMC support enabled.",
"import os\nfrom IPython.display import Image\nimport openmc\nimport openmc.lib\n\nassert(openmc.lib._dagmc_enabled())",
"We'll need to download our DAGMC geometry and unstructured mesh files. We'll be retrieving those using the function and URLs below.",
"from IPython.display import display, clear_output\nimport urllib.request\n\nmanifold_geom_url = 'https://tinyurl.com/rp7grox' # 99 MB\nmanifold_mesh_url = 'https://tinyurl.com/wojemuh' # 5.4 MB\n\n \ndef download(url, filename):\n \"\"\"\n Helper function for retrieving dagmc models\n \"\"\"\n def progress_hook(count, block_size, total_size):\n prog_percent = 100 * count * block_size / total_size\n prog_percent = min(100., prog_percent)\n clear_output(wait=True)\n display('Downloading {}: {:.1f}%'.format(filename, prog_percent))\n \n urllib.request.urlretrieve(url, filename, progress_hook)",
"The model we'll be looking at in this example is a steel piping manifold:",
"Image(\"./images/manifold-cad.png\", width=800)",
"This is a nice example of a model which would be extremely difficult to model using CSG. To get started, we'll need two files: \n 1. the DAGMC gometry file on which we'll track particles and \n 2. a tetrahedral mesh of the piping structure on which we'll score tallies\nTo start, let's create the materials we'll need for this problem. The pipes are steel and we'll model the surrounding area as air.",
"air = openmc.Material(name='air')\nair.set_density('g/cc', 0.001205)\nair.add_element('N', 0.784431)\nair.add_element('O', 0.210748)\nair.add_element('Ar',0.0046)\n\nsteel = openmc.Material(name='steel')\nsteel.set_density('g/cc', 8.0)\nsteel.add_element('Si', 0.010048)\nsteel.add_element('S', 0.00023)\nsteel.add_element('Fe', 0.669)\nsteel.add_element('Ni', 0.12)\nsteel.add_element('Mo', 0.025)\nsteel.add_nuclide('P31',0.00023)\nsteel.add_nuclide('Mn55',0.011014)\n\nmaterials = openmc.Materials([air, steel])\nmaterials.export_to_xml()",
"Now let's download the geometry and mesh files.\n(This may take some time.)",
"# get the manifold DAGMC geometry file\ndownload(manifold_geom_url, 'dagmc.h5m') \n# get the manifold tet mesh\ndownload(manifold_mesh_url, 'manifold.h5m')",
"Next we'll create a 5 MeV neutron point source at the entrance the single pipe on the low side of the model with",
"src_pnt = openmc.stats.Point(xyz=(0.0, 0.0, 0.0))\nsrc_energy = openmc.stats.Discrete(x=[5.e+06], p=[1.0])\n\nsource = openmc.Source(space=src_pnt, energy=src_energy)\n\nsettings = openmc.Settings()\nsettings.source = source\n\nsettings.run_mode = \"fixed source\"\nsettings.batches = 10\nsettings.particles = 100",
"And we'll indicate that we're using a CAD-based geometry.",
"settings.dagmc = True\n\nsettings.export_to_xml()",
"We'll run a few particles through this geometry to make sure everything is working properly.",
"openmc.run()",
"Now let's setup the unstructured mesh tally. We'll do this the same way we did in the previous notebook.",
"unstructured_mesh = openmc.UnstructuredMesh(\"manifold.h5m\")\n\nmesh_filter = openmc.MeshFilter(unstructured_mesh)\n\ntally = openmc.Tally()\ntally.filters = [mesh_filter]\ntally.scores = ['flux']\ntally.estimator = 'tracklength'\n\ntallies = openmc.Tallies([tally])\ntallies.export_to_xml()\n\nsettings.batches = 200\nsettings.particles = 5000\nsettings.export_to_xml()\n\nopenmc.run(output=False)",
"Again we should see that tally_1.200.vtk file which we can use to visualize our results in VisIt, ParaView, or another tool of your choice that supports VTK files.",
"!ls *.vtk\n\nImage(\"./images/manifold_flux.png\", width=\"800\")",
"For the purpose of this example, we haven't run enough particles to score in all of the tet elements, but we indeed see larger flux values near the source location at the bottom of the model.\nVisualization with statepoint data\nIt was mentioned in the previous unstructured mesh example that the centroids and volumes of elements are written to the state point file. Here, we'll explore how to use that information to produce point cloud information for visualization of this data.\nThis is particularly important when combining an unstructured mesh tally with other filters as a .vtk file will not automatically be written with the statepoint file in that scenario. To demonstrate this, let's setup a tally similar to the one above, but add an energy filter and re-run the model.",
"# energy filter with bins from 0 to 1 MeV and 1 MeV to 5 MeV\nenergy_filter = openmc.EnergyFilter((0.0, 1.e+06, 5.e+06))\n\ntally.filters = [mesh_filter, energy_filter]\nprint(tally)\nprint(energy_filter)\ntallies.export_to_xml()\n\n!cat tallies.xml\n\nopenmc.run(output=False)",
"Noice the warning at the end of the output above indicating that the .vtk file we used before isn't written in this case.\nLet's open up this statepoint file and get the information we need to create the point cloud data instead.\nNOTE: You will need the Python vtk module installed to run this part of the notebook.",
"with openmc.StatePoint(\"statepoint.200.h5\") as sp:\n tally = sp.tallies[1]\n \n umesh = sp.meshes[1]\n centroids = umesh.centroids\n mesh_vols = umesh.volumes\n \n thermal_flux = tally.get_values(scores=['flux'], \n filters=[openmc.EnergyFilter],\n filter_bins=[((0.0, 1.e+06),)])\n fast_flux = tally.get_values(scores=['flux'],\n filters=[openmc.EnergyFilter],\n filter_bins=[((1.e+06, 5.e+06),)])\n\ndata_dict = {'Flux 0 - 1 MeV' : thermal_flux,\n 'Flux 1 - 5 MeV' : fast_flux,\n 'Total Flux' : thermal_flux + fast_flux}\n\numesh.write_data_to_vtk(\"manifold\", data_dict)",
"We should now see our new flux file in the directory. It can be used to visualize the results in the same way as our other .vtk files.",
"!ls *.vtk\n\nImage(\"./images/manifold_pnt_cld.png\", width=800)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
trsherborne/learn-python
|
lesson6.ipynb
|
mit
|
[
"LSESU Applicable Maths Python Lesson 6\n29/11/16\nToday is all about handling and generating data. We'll be looking at the first principles of 2 different packages you should know about for handling data in Python:\n * NumPy\n * Pandas\n Run the appropriate version of the following commands ASAP to get yourself set up!",
"# Run this if you are using a Mac machine or have multiple versions of Python installed\n!pip3 install numpy pandas matplotlib pandas_datareader --upgrade\n\n# Run this if you are using a Windows machine\n!pip install numpy==1.11.1 pandas==0.19.0 matplotlib==1.5.3 pandas_datareader==0.2.1 --upgrade\n\n# Everyone run this block\nimport numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom pandas_datareader import data as web\n%matplotlib inline",
"Recap from last week\n\nWe looked at the basics of Object Oriented Programming, or OOP, last week. If you couldn't make it, don't be concerned because the content from last week won't affect what we will be looking at today.\n```\nclass Human(object):\n def init(self,name,age,height):\n self.name = name\n self.age = age\n self.height = height\ndef __lt__(self,other):\n return self.age < other.age\ndef __le__(self,other):\n return self.age <= other.age\ndef __gt__(self,other):\n return self.age > other.age\ndef __ge__(self,other):\n return self.age >= other.age\ndef __eq__(self,other):\n return self.age==other.age\n\ndef age_in_dog_years(self):\n return 7*self.age\n\n```\nNumPy\nNumPy is the standard mathematical and scientific computing package for Python. NumPy is a need to know if you want to write efficient and interpretable code. NumPy includes an optimised array type as well as linear algebra, Fourier Transform and random number capabilities.\nUnder the hood, much of NumPy is written in C/C++/Fortran which is highly optimised according to your specific computer. Using NumPy gives you features and speed you couldn't achieve with native Python.\nLink to NumPy documentation\nThe main NumPy feature - the Array type\nThe NumPy array is a grid of values all of the same type, this is different to Python lists which can have elements of different types. \nArrays are indexed similarly to lists, with each dimension being indexed from zero. When declaring the Array object be clear in your mind the dimensions of the array you need.\nnp.ones((3,4),int) \n -> 3 is the number of ROWS\n -> 4 is the number of COLUMNS\n -> int is the type of the array elements\n--\narray([[1, 1, 1, 1],\n [1, 1, 1, 1],\n [1, 1, 1, 1]])",
"# Creating an array of all zeroes\n\nnp.zeros((3,4),int)\n\nprint('a. np.zeros((2,2))')\na = np.zeros((2,2),int) \nprint(a) \n\nprint('b. np.ones((1,2))')\nb = np.ones((1,2)) \nprint(b) \n\nprint('c. np.full((2,2), 7)')\nc = np.full((2,2), 7) # Create a constant array\nprint(c) \n\nprint('d. np.eye(2)')\nd = np.eye(2) \nprint(c) \n \nprint('e. np.random.random((2,2))')\ne = np.random.random((2,2)) \nprint(e) ",
"In general you can follow the format below for declaring most NumPy arrays\nnp.format(shape, fill_value, dtype)\nWhere shape is declared as a tuple like (3,4) and dtype is the type of the data which is constant across the array but doesn't have to be a number\nTry replacing int in the declaration of array a with str",
"# You can also declare NumPy arrays using standard Python\n# Lists (and Lists of Lists)\n\nl1 = [1,2,3,4]\nl2 = [5,6,7,8]\nl3 = [9,10,11,12]\nl = [l1,l2,l3]\n\nprint(l)\n\nl_array = np.array(l)\n\n#print(l_array)",
"Indexing a NumPy array",
"# For a list of lists we would use the [][] notation\nupper_left_val = l[0][0]\nprint(upper_left_val)\nprint()\n\n# We use a single [] with np and seperate dimensions by ,\nupper_left_val_np = l_array[0,0]\nprint(upper_left_val_np)\nprint()\n\n# You can also slice arrays as so\nprint(l_array[0:2,1:3])\nprint()\n\n# And use the shape attribute of the object to understand\n# size \nprint(l_array.shape)\nprint()\n\n# Or the dtype attribute to inspect the type of the array\nprint(l_array.dtype)\n\n# Using the arange function you can retrieve linearly\n# spaced integers\nlin_space_int = np.arange(1,10)\nprint(lin_space_int)\nprint()\n\n# Or specify a step to create different spacings\nlin_space_new = np.arange(1,10,0.5)\nprint(lin_space_new)",
"Array mathematics",
"# Declare two example arrays\nx = np.array([[1,2],[3,4]], dtype=np.float64)\ny = np.array([[5,6],[7,8]], dtype=np.float64)\n\n# By default, operations are element wise in NumPy\n\n# Addition, two options\nprint(x+y)\n#print(np.add(x,y))\nprint()\n\n# Subtraction\nprint(x-y)\n#print(np.subtract(x,y))\nprint()\n\n# Product\nprint(x*y)\n#print np.multiply(x,y)\nprint()\n\n# Division\nprint(x/y)\n#print(np.divide(x,y))\nprint()\n\n# Square Root\nprint(np.sqrt(x))\nprint()\n\n## For Matrix operations, use the set of NumPy functions\n\n# Dot product\nprint(x.dot(y))\n#print(np.dot(x,y))\nprint()\n\n# You can also sum across dimensions easily\nprint(np.sum(x)) # For every element\nprint()\n\nprint(np.sum(x,axis=0))\nprint()\n\n# Or transpose a Matrix\nprint(x)\nprint()\nprint(x.T)",
"Challenge\nDeclare a 5 by 5 array of any numbers you want using one of the above methods we've discussed. Then read this NumPy documentation, when you are ready print the mean, standard deviation, minimum and maximum of your array",
"# TO DO \n# You can declare an array of random numbers or start with a list of lists\n# Check your array is the right size by printing the .shape attribute\n\n\n\n# Print the mean\n\n\n# Print the standard deviation \n\n\n# Print the minimum\n\n\n# Print the maximum\n\n\n\n# END TODO",
"Pandas\nPandas is a data manipulation package that we've glimpsed before. If you know R, then the Pandas Dataframe type will be very familiar, if not you can think of a Dataframe is a spreadsheet like object which can be manipulated and interfaced with much easier than lists of dictionaries (or dictionaries of lists!).\nLink to Pandas documentation\nThe main Pandas feature - the DataFrame type",
"# Pandas DataFrames can be initialised using Numpy objects or\n# or with native Python objects.\n\n# Using Numpy\ndf1 = pd.DataFrame(np.random.randn(3,4),columns=list('ABCD'))\nprint(df1)\n\n# Or with a dictionary of list vales\nmy_dict = {\n 'A':[1,2,3,4],\n 'B':['2016-12-25','2015-12-25','2014-12-25','2013-12-25'],\n 'C':pd.Series(1,index=list(range(4)),dtype=\"float32\"),\n 'D':pd.Categorical(['Test','Test','Test','Train']),\n 'E':[True,True,False,False]\n}\ndf2 = pd.DataFrame(my_dict)\ndf2\n\n# Many features of the DataFrame are similar to a NumPy array\n\ndf2.dtypes\n\n#df2.shape\n\n# A fast way to grab quick insights of the numerical features\n# of your data is to use .describe()\n\ndf2.describe()\n\n# We can sort and organise our dataframe based upon defined regions\ndf2.sort_index(axis=1,ascending=False)\n\ndf2.sort_values(by='B')",
"Selecting elements",
"# Use [] indexing and a column name to grab columns\ndf2['C']\n\n# Use [] and row values to grab rows\ndf2[1:3]\n\n# Or use .loc to retrieve specific values like the 0th row\ndf2.loc[0]\n\n# Or use loc and a condition\n\ndf2.loc[df2['E']==True]",
"Revisiting what we did on the first day",
"# Choose a stock\nticker = 'DRYS'\n\n# Choose a start date in US format MM/DD/YYYY\nstock_start = '10/2/2014'\n# Choose an end date in US format MM/DD/YYYY\nstock_end = '28/11/2016'\n\n# Retrieve the Data from Google's Finance Database\nstock = web.DataReader(ticker,data_source='google',\n start = stock_start,end=stock_end)\n\n# Print a table of the Data to see what we have just fetched\nstock.tail()\n\n# Generate the logarithm of the ratio between each days closing price\nstock['Log_Ret'] = np.log(stock['Close']/stock['Close'].shift(1))\n\n# Generate the rolling standard deviation across the time series data\nstock['Volatility'] = (stock['Log_Ret'].rolling(window=100).std())*np.sqrt(100)\n\n# Create a plot of changing Closing Price and Volatility\nstock[['Close','Volatility']].plot(subplots=True,color='b',figsize=(8,6))",
"Experiment with changing the Stock Ticker from GOOG and the dates. What interesting insights can you find? Is there a company which has become very volatile recently?"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
WMD-group/MacroDensity
|
tutorials/HeteroJunction/HeteroJunction.ipynb
|
mit
|
[
"Heterojunction offsets\nHeterojunctions are the basis of much of the semiconductor technology. From computer memory, to flat-panel display to photovoltaics, semiconductor heterojuctions underly much of the technology created in the past 100 years. Previosuly we constructed band-alignment-diagrams using the IP and EA from slab calculations. However, the details of the interface can also affect the line-up, from charge transfer and lattice strain effects, for example. For an excellent overview of the subject, I recommend: Band engineering at interfaces: theory and numerical experiments.\nOur system\nFor this demonstration we will choose a well-matched interface system. The interface between wurtzite structured ZnO and ZnS. In this case the lattice constants have been set to be those of ZnO. Obviously, due to deformation potentials the value of the offset depends on the lattice parameter used.\nNote The values in this tutorial are from very approximate calculations and no interface relaxation was allowed, therefore the absolute actual numbers should not be treated as meaningful.\n<img src=\"HJ.png\">\nProcedure\nWe present two procedures for obtaining the offset from a heterojunction calulcation. \n(i) We will intitally calculate the offset for the interface as we have modelled it, this essentially involves the calculation of the macroscopic average of the potential.\n(ii) We will then look at how the effects of strain at the interface can be accounted for to yield a \"natural band offset\". As outlined in Appl. Phys. Lett. 94, 212109 (2009).\nProcedure (i)\n$VBO = \\epsilon_{vbm}^a - \\epsilon_{vbm}^b + \\Delta V$\n$VBO$ is the offset. $\\epsilon_{vbm}$ are the eigenvalues of the highest occupied bands from bulk calculations of the two materials (ZnO and ZnS) and $\\Delta V$ is the offset in the potential across the interface.\nWe obtain the $\\epsilon_{vbm}$ values from the bulk OUTCAR files as in the slab model exercise. \nInterface calculation\nNow we do a calculation of the interface to get the potential profile. Important settings for the INCAR file:\nLVHAR = .TRUE. # This generates a LOCPOT file with the potential\n\nIn your example directory there should already be a LOCPOT file. This is the one we will use to analyse the potential and extract the value of $\\Delta V$.\nIn the sample PlanarAverage.py file, all we have to edit are the top three lines. Of these the only one that is not obvious is the lattice_vector parameter. This is just the periodicity of the slab in the direction normal to the surface. In the picture below, this is just the distance between the layers of ZnO.\n<img src=\"HJ.png\">",
"%matplotlib inline\nimport os\nimport macrodensity as md\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\nextrema = md.vasp_tools.get_band_extrema('OUTCAR_ZnO')\nprint(extrema)\nextrema = md.vasp_tools.get_band_extrema('OUTCAR_ZnS')\nprint(extrema)\n\ninput_file = 'LOCPOT'\nlattice_vector = 3.6\noutput_file = 'planar.dat'",
"The code below is usually set in the PlanarAverage.py file; you don't need to edit it",
"if os.path.isfile('LOCPOT'):\n print('LOCPOT already exists')\nelse:\n os.system('bunzip2 LOCPOT.bz2')\n\nvasp_pot, NGX, NGY, NGZ, Lattice = md.read_vasp_density(input_file)\nvector_a,vector_b,vector_c,av,bv,cv = md.matrix_2_abc(Lattice)\nresolution_x = vector_a/NGX\nresolution_y = vector_b/NGY\nresolution_z = vector_c/NGZ\ngrid_pot, electrons = md.density_2_grid(vasp_pot,NGX,NGY,NGZ)",
"The code below will prompt you to say which axis you want to average along",
"## POTENTIAL\nplanar = md.planar_average(grid_pot, NGX, NGY, NGZ)\n## MACROSCOPIC AVERAGE\nmacro = md.macroscopic_average(planar, lattice_vector/2, resolution_z)",
"Now we can plot the results",
"fig, ax1 = plt.subplots(1, 1, sharex=True)\n\ntextsize = 22\nmpl.rcParams['xtick.labelsize'] = textsize\nmpl.rcParams['ytick.labelsize'] = textsize\nmpl.rcParams['figure.figsize'] = (15, 8)\n\nax1.plot(planar,label=\"Planar\",lw=3)\nax1.plot(macro,label=\"Macroscopic\",lw=3)\n\nax1.set_ylabel('V/V', fontsize=22)\nax1.set_xlabel('Grid position', fontsize=22)\n\nax1.set_xlim(0,len(planar))\n\nax1.set_facecolor((0.95,0.95,0.95))\nax1.grid(True)\n\nax1.legend(fontsize=22)\nplt.savefig('hj-offset.png')\nplt.show()\nnp.savetxt(output_file,macro)",
"Get $\\Delta V$\nFrom inspection of the macroscopic average, saved in planar.dat we can find that the value of $V$ at each plateau is 2.64 V and -2.61 V. So the step $\\Delta V$ is 5.25 V\nThe offset\nApplying the equation from earlier, the VBO is \n1.07 - 2.28 + 5.25 = 4.04 eV\nProcedure (ii)\nIn this procedure we can take into account the fact that the band edge positions are affected by changes to the cell volume. To do this we first calculate the eigenvalues $\\epsilon_{VBM}$ at an average volume of the two materials.",
"extrema = md.vasp_tools.get_band_extrema('OUTCAR_ZnO_av')\nprint(extrema)\nextrema = md.vasp_tools.get_band_extrema('OUTCAR_ZnS_av')\nprint(extrema)",
"The deformation potential\nTo account for the effect of strain on the band positions we need to know the deformation potential $\\alpha$. In this case the values for $\\alpha$ of ZnO and ZnS (and many other systems) are available Phys. Rev. B 73, 245206.\n$\\alpha_{ZnO} = 0.48$\n$\\alpha_{ZnS} = 0.83$\nThe equation we now use is \n$ VBO = (\\epsilon_{vbm}^a + \\alpha_a \\partial \\ln V_a )- (\\epsilon_{vbm}^b + \\alpha_b \\partial \\ln V_b) + \\Delta V $\nHere the eigenvalues and offset potential are the same as in procedure (i), but calculated for an average volume cell. We also use the log of the change in volume of each phase multiplied by the deformation potential ($ \\alpha_B \\partial \\ln V_B $).\nThe potential alignment\nThis procedure is the same as for procedure (i), but always using the average volume cell. Actually as we used the average cell in procedure (i), it turns out to be exactly the same. But we can do it again to reinforce.",
"input_file = 'LOCPOT'\nlattice_vector = 3.6\noutput_file = 'planar.dat'\n\nvasp_pot, NGX, NGY, NGZ, Lattice = md.read_vasp_density(input_file)\nvector_a,vector_b,vector_c,av,bv,cv = md.matrix_2_abc(Lattice)\nresolution_x = vector_a/NGX\nresolution_y = vector_b/NGY\nresolution_z = vector_c/NGZ\ngrid_pot, electrons = md.density_2_grid(vasp_pot,NGX,NGY,NGZ)\n\n## POTENTIAL\nplanar = md.planar_average(grid_pot,NGX,NGY,NGZ)\n## MACROSCOPIC AVERAGE\nmacro = md.macroscopic_average(planar,lattice_vector/2,resolution_z)\n\nfig, ax1 = plt.subplots(1, 1, sharex=True)\n\ntextsize = 22\nmpl.rcParams['xtick.labelsize'] = textsize\nmpl.rcParams['ytick.labelsize'] = textsize\nmpl.rcParams['figure.figsize'] = (10, 6)\n\nax1.plot(planar,label=\"Planar\",lw=3)\nax1.plot(macro,label=\"Macroscopic\",lw=3)\n\nax1.set_xlim(0,len(planar))\n\nax1.set_facecolor((0.95,0.95,0.95))\nax1.grid(True)\n\nax1.legend(fontsize=22)\nplt.show()\nnp.savetxt(output_file,macro)",
"As before the value of $\\Delta V$ is 5.25 V.\nVolume change\nBy comparing the average to the equilibrium POSCARs we can get the values of $\\partial\\ln V = \\frac{\\Delta V}{V_{av}}$",
"dlnVa = (47.55 - 61.04) / 61.04 # ZnO\ndlnVb = (76.88 - 61.04) / 61.04 # ZnS\n\nVBO_natural = (-2.464 + 0.48 * dlnVa ) - (4.8288 + 0.83 * dlnVb) + 5.25\n\nprint 'Natural offset: %3.1f eV' % (VBO_natural)",
"Exercise Calculate band offsetr and natural band offset for the ZnS/ZnSe heterojunction\nThe necessary data is in the ZnS_ZnSe folder."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
lhcb/opendata-project
|
LHCb_Open_Data_Project.ipynb
|
gpl-2.0
|
[
"Measuring Matter Antimatter Asymmetries at the Large Hadron Collider\n\nIntroduction\nPress the grey arrow to expand each section\n\n<b> Welcome to the first guided LHCb Open Data Portal project! </b>\n<div align=\"justify\">Here you will be able to analyse data taken by the Large Hadron Collider (LHC) at CERN. The aim of this study is for you to be able to search for differences in the behaviour of matter and [antimatter](https://en.wikipedia.org/wiki/Antimatter). This project will enable you to carry out your own data analysis at a level similar to that of CERN research. This project does not require a detailed knowledge of particle physics. It is most suitable for people with a scientific and mathematical background equivalent to that required for applying for university entrance in a science, technology engineering or mathematics discipline. Some previous familiarity with computer programming would also be advantageous. Additional theoretical information or programming knowledge you might need is provided as the project progresses.</div>\n\nBefore you start, you might find it helpful to find out more about matter antimatter asymmetries, what we hope to learn by studying them, and how we can detect them with experiments such as the LHCb experiment at CERN.\nHere are some details that relate directly to this project:\n - What is the particle physics focus of this experiment? and what will I study in this project?\n - How does the LHCb detector record the data?\nOnce you have tried the analysis please provide feedback to help us improve this project using this brief survey.\nFeel free to post questions, feedback and discuss results with other users using the GitHub issue tracker.\nGetting Started\nAims:\n\nBecome familiar with the help available for programming\nRead the simulation data into the program\n\n\n<div align=\"justify\">Just like researchers at CERN, you will be coding your own analysis. This will use the computer programming language Python. There is no prerequisite of Python language programming experience for following this project. There will be hints available to you helping you along the way. You might find these tutorials on Python helpful:</div>\n\nPython Tutorials\nThe most important coding guidance we are providing you is in the form of an unrelated analysis. We have performed an analysis of Nobel prizes winners. That link provides you with the full code for this. The coding skills required for this Nobel analysis is very similar to that needed for the particle physics analysis. Hence by reading and understandign that analysis you can copy and adapt the lines of code to perform your particle physics analysis.\nReading simulation data\nIn order to get started and check the first code that you will be writing works correctly it is best to start by analysing simulated data rather than real data from the LHC. The real data contains not only the type of events that you wish to analyse, known as the 'signal', but also events that can fake these, known as 'background'. The real data measurements are also limited by the resolution of the detector. The simplified simulation data provided here contains only the signal events and provides the results that would be obtained for a perfect detector.\nIMPORTANT: For every code box with code already in it, like the one below you must click in and press shift+enter to run the code.\nIf the In [x]: to the left of a codebox changes to In [*]: that means the code in that box is currently running",
"from __future__ import print_function\nfrom __future__ import division\n\n%pylab inline\nexecfile('Data/setup_main_analysis.py')",
"If you want help with coding there is in addition to the example code, some hints within each section and a function reference list.",
"# Let us now load the simulated data\nsim_data = read_root('Data/PhaseSpaceSimulation.root')",
"<div align=\"justify\">Now that you can access the data, you can use a number of functions which can help you analyse it. You can find these functions in the libraries at the top of the page. Try to make a table of some of the information within your data file so that you can get a feel of the typical values for data in the set. Understanding the range of values for different variables will help with plotting graphs.</div>\n\nThe data contains information about 'events' that were observed in the detector. An event refers to the particles produced when an interaction took place when two proton are collided at the LHC. The data you have includes information about particles observed in the detector after each collision. If you think of the data as a table, each row of the table is the results from a different collision. The columns of the table are different quantities measured about the particles produced in the collision. \nWe are interested in analysing the decays of particles called B<sup>+</sup> or B<sup>-</sup> mesons decaying into three other mesons called kaons (K<sup>+</sup> or K<sup>-</sup>). The events you have been given are those in which this process may have occurred. The detector has been used to reconstruct tracks that may have come from the kaons. You are given the measured momenta, charge, and likelihood of the tracks being kaons. You are given information for three tracks in each event, the ones that could be the three kaons that a B<sup>+</sup> or B<sup>-</sup> meson has decayed into. The following information is available about each event: information list",
"# make a table of the data variables here ",
"Hints\nCreating a table - Use your head() - remember to look at the example analysis to see how this was done there.\nInvariant mass reconstruction\nAims:\n\nPlot a histogram of the momentum of one of the kaon candidates\nCalculate the energy of each of the kaon candidates\nPlot the invariant masses of the B<sup>+</sup> or B<sup>-</sup> mesons___\n\nPlotting a feature:\nYou can plot any features of the data in a histogram. Choose any suitable binning that allows you to observed the distribution of the variable clearly. Try making a histogram for the first kaon candidate's momentum x-component (H1_PX):",
"# make a histogram of the H1_PX variable here",
"Momentum is a vector quantity, it has x,y, and z components. Try calculating the magnitude of the momentum of the first kaon candidate and plotting a histogram of this, you'll need the H1_PX, H1_PY and H1_PZ variables.",
"# calculate a variable for the magnitude of the momentum of the first kaon \n# plot a histogram of this variable",
"Hints\nHistogram plotting - You can use the hist() function. The parameters bins(n) and range(x,y) allow youto plot n bins over the range x to y.\nVector Magnitude The square magnitude of a magnitude of a vector is given by the sum of the square of its of its components in the x,y and z directions: $p^2 = p_x^2+p_y^2+p_z^2$, where $p$ is the magnitude of the momentum, and $p_x,p_y,p_z$ are the components of the momentum in the X,Y, and Z directions.\nEnergy and mass\nEinstein's theory of special relativity relates Energy, mass and momentum. We have measured the momentum of the kaon candidates in the detector, and have just plotted one of the components of the momentum of the kaon, and the magnitude of the momentum. The invariant mass of the kaon is well known and you can look this up. We wish to determine the energy of the kaons. \nHere is a brief guide to the energy-momentum relation of special relativity. Further information can be found on wikipedia pages on Invariant Mass and the Energy-momentum relation.\nNow, calculate the energy of the first kaon candidate using:\n<center> $E^2 = p^2 + m^2$ </center>",
"# calculate the energy of the first kaon\n\n# plot a histogram of this variable",
"Hints\nEnergy calculation - Use the magnitude of momentum variable you calculated above and the known invariant mass of the kaon to work out the energy of the first hadron. Calculate the energy squared, and then the energy and plot this.\nKaon mass - you can find the kaon mass on wikipedia or in physics textbooks. There is also a reference used by particle physicists: all our knowledge of the properties of the particles are collected together by the particle data group here.\nCalculate the momenta and energies of the second and third kaon candidates also.",
"# calculate variables for the energy of the other two kaons",
"Adding features of the $B$ meson\nIn this analysis we are looking for B<sup>+</sup> or B<sup>-</sup> mesons (see B meson) that have decayed into the three charged kaons.\nEnergy is a conserved quantities. This means that you can use the energy of the three 'daughter' kaons, which you have calculated above, to calculate the energy that the B meson that decayed into them must have.\nMomentum is also a conserved quantity. Hence you can also use the momenta of the 'daughter' kaons to calculate the momentum of the B meson. But be careful - momentum is a vector quantity. \nUsing the Energy of the B meson and the magnitude of the momentum of the B meson you can use the energy-momentum relationship again. This time you are applying it to the B meson. This will allow you to calculate the invariant mass of the B meson.",
"# calculate the energy of the B meson\n\n# calculate the momentum components of the B meson \n# and the magnitude of the momentum of the B meson\n\n# calculate the B meson invariant mass\n# plot the B meson invariant mass in a histogram",
"You should have a graph that sharply peaks at the mass of the B<sup>+</sup> meson. The mass of the B<sup>+</sup> and B<sup>-</sup> meson are the same. Check that the peak of your graph is at the known mass of the B meson. Congratulations!\nRecall that you have made this plot for simulated data. How might you expect the plots for real data to look different ? In the next section you will start to work with the real LHC data.\nHint\nB Meson Energy - From energy conservation, the energy of the B meson will be the sum of the energies of the three kaons: $E_B=E_{K1}+E_{K2}+E_{K3}$, where $E_B$ is the energy of the B meson, $E_{K1}, E_{K2}, E_{K3}$ are the energies of each of the kaons. \nB meson momentum - From momentum conservation, the X component of the momentum of the B meson will be the sum of the X momentum components of the three Kaons : $px_B=px_{K1}+px_{K2}+px_{K3}$, where $px$ is the X direction component of the momentum of the B meson, $px_{K1},px_{K2},px_{K3}$ are the X direction components of the momenta of the three kaons. You can then do the same with the Y and Z components. Having obtained the X,Y, and z components of the B momentum you can find the magnitude of the momentum of the B meson.\n B meson invariant mass* - Rearrange the equation $E^2=p^2+m^2$ to find $m^2$. Using the values of the magnitude of the momentum of the B meson and the B meson Energy, find the mass of the B meson.\nHistogram plotting - Take care that the range of your mass plot is set suitably that you can see the mass peak. Once you have found the peak you can set the range appropriately. You do not have to start your graph at a mass of 0.\nUnits - The data you are provided has energies in 'MeV' (10<sup>6</sup> electron volts). The mass of the B meson is often quoted in 'GeV/c<sup>2</sup>' (10<sup>9</sup> electron volts).\nWorking with real data and applying cuts\nAims:\n\n\nFilter out data that is not from the B<sup>+</sup> → K<sup>+</sup>K<sup>+</sup>K<sup>−</sup> channel, or the antiparticle equivalent B<sup>-</sup> → K<sup>+</sup>K<sup>-</sup>K<sup>−</sup>\n\n\nPlot a histogram of B-meson mass for the real data and observe how different cuts affect the data\n\n\nIn the section above you have analysed the simulation data to determine the invariant mass of the B meson. Now, you can start applying the methods you have used to the real LHCb data. This data was collected by the LHCb detector at CERN during 2011, the first major year of LHC operations.\nThe data you are given has been filtered to select only events that are likely to have come from B<sup>+</sup> or B<sup>-</sup> mesons decaying into three final state charged particles. You are interested in the case where these three final state paticles are charged kaons K<sup>+</sup> or K<sup>-</sup>.\nAn introduction has been provided on the detector and data sample. As background information we also provide further information on the selection that has been applied to select this data sample.\nPreselection\nYou want to apply a preselection to the three final state tracks that\n* Ensures that they are not muons (i.e. !H1_isMuon where ! means not, and similarly for H2 and H3)\n* Requires that they each have a low probability of being pions (e.g. H1_ProbPi < 0.5)\n* Requires that they each have a high probability of being a kaon (e.g. H1_ProbK > 0.5)\nYou need to find a balance between making cuts that are too loose and include too many background events and too tight and reject many of your signal events.\nIn order to now find the most suitable further selection cuts, make yourself familiar with how cuts can affect the significance of the final result. Feel free to come back to this stage later and adjust your cuts to see the impact. \nThe pre selection you create will be applied for you if give it the name 'preselection'.\nWe have provided an example preselection in the hints, so feel free to use that to get started if you wish. start with a loose preselection and then refine it after you have studied the plots.",
"# Make your preselection here, this line applies no preselection\npreselection = \"\"",
"This next line of code just loads the real data into a new DataFrame, this may take a few minutes.\nIt also applies the preselection that you have created if you called it preselection.",
"real_data = read_root(['Data/B2HHH_MagnetDown.root', 'Data/B2HHH_MagnetUp.root'], where=preselection)",
"Make histograms of the probability of a final state particle being a kaon or a pion.\nThese will help guide you on suitable probability values at which to cut.\nYou can also consider more sophisticated options like 2-D plots of kaon and pion probabilities or different values of the cuts for the different final state particles.",
"# plot the probability that a final state particle is a kaon\n\n# plot the probability that a final state particle is a pion",
"Now calculate the invariant mass of the B meson for the real data and plot a histogram of this. \nCompare it with the one you drew for the simulation data. \nCan you explain the differences you observe?",
"# draw a histogram for the B meson mass in the real data",
"Experiment with the cuts and see the impact of harsher or more lenient cuts on the invariant mass plot.\nYou should select a set of cuts which makes the signal most prominent with respect to the background.\nOnce you have finalised the selection on particle identification also make cuts on the reconstructed particle mass to select the events in the B meson mass peak, removing the background events which lie at lower and higher invariant masses. \nPreselection example hint\nThis is an example string, showing the syntax, that you could use as a preselection starting point:",
"preselection = \"H1_ProbPi < 0.5 & H2_ProbPi < 0.5 & H3_ProbPi < 0.5 & H1_ProbK > 0.5 & H2_ProbK > 0.5 & H3_ProbK > 0.5 & !H1_isMuon & !H2_isMuon & !H3_isMuon\"",
"Searching for global matter anti-matter differences\nIn this section you will start to study matter antimatter differences (CP Violation). Here 'global' means that you are looking for differences across all ranges of energy and momentum (the kinematics) of the kaons into which the charge B mesons have decayed. Later we look at 'local' differences in different regions of the kinematics. \nAims:\n\nCalculate the global CP asymmetry \nWork out the statistical uncertainty\nDetermine if there is evidence for CP violation in this decay\n\nIn order to quantify the matter antimatter asymmetry in this process we wish to compare the B<sup>+</sup> and the B<sup>-</sup> particles. The B<sup>-</sup> is the anti-particle of the B<sup>+</sup>.\nHow can you distinguish between events that contain B<sup>+</sup> and B<sup>-</sup> particles using H1_Charge, H2_Charge and H3_Charge?",
"# make a variable for the charge of the B mesons",
"Now count the numbers of events of each of the two types (N<sup>+</sup> and N<sup>-</sup>). Also calculate the difference between these two numbers.",
"# make variables for the numbers of positive and negative B mesons",
"In order to calculate the Asymmetry, you can make use of the formula:\n(note you may need to run this box in order to see the image)\n<img src=\"Images/AsymmetryEq.png\" width=\"200\" />",
"# calculate the value of the asymmetry, by using the formula above, and then print it",
"Hint\nDifferentiating between N+ and N-\n\nCharge is a conserved quantity. The charge of the $B$ meson is equal to the sum of the charges of the particles into which it decays.\nYou can use len(real_data.query('B_Charge == charge')) to count the number of mesons, where B_Charge is the variable you created and charge is 1 or -1.\nYou can find an example of this at the end of the example notebook.\n\nEstimating the significance of the deviation\nYou will now need to calculate the statistical uncertainty of the asymmetry. You can do so using the formula: <img src=\"Images/AsymmetryErrorEq.png\" width=\"200\" />\nThe significance of the result, sigma, is found by dividing the value for asymmetry by its uncertainty. A value exceeding three sigma is considered \"evidence\" by particle physicists while a value of five sigma or more can be called an \"observation\" or \"discovery\".",
"# calculate the statistical significance of your result and print it",
"Congratulations! You have performed your first search for a matter anti-matter difference.\nHere you have only considered the statistical uncertainty. Your measurement will also have other sources of uncertainty known as systematic uncertainties which you have not considered at this stage.\nDalitz plots and two body resonances\nAims:\n\nProduce Dalitz plots of the simulation and real data sample\nCreate ordered and binned dalitz plots.\nIdentify two body resonances in the Dalitz plots\n\nIn this stage we introduce you to an important technique for analysing decays of one particle (your charged B meson) into three bodies (the three kaons). This is known as a Dalitz plot. \nThe decay of the B meson can proceed either directly to the three-body final state or via an intermediate particle. For example, B<sup>+</sup> → K<sup>+</sup>K<sup>+</sup>K<sup>−</sup>, could proceed through the decay B<sup>+</sup> → K<sup>+</sup>R<sup>0</sup>, where R<sup>0</sup> is a neutral particle resonance which can decay R<sup>0</sup> → K<sup>+</sup>K<sup>-</sup>. Dalitz plots can be used to identify these resonances which are visible as bands on the Dalitz plot.\nMore information about these plots and why these are used in particle physics research can be found in Dalitz Plot Introduction.\nThe kinematics of a three-body decay can be fully described using only two variables. The energies and momenta of the three kaons are not independent of each other as they all come from the decay of a B meson and energy and momentum are conserved. The axes of the plots conventionally are the squared invariant masses of two pairs of the decay products. It is a 2D plot, the x and y axes are both squared masses and the density of points in the plot shows the structure.\nConsider our decay B<sup>+</sup> → K<sup>+</sup><sub>1</sub>K<sup>+</sup><sub>2</sub>K<sup>−</sup><sub>3</sub>, where we have numbered the kaons 1,2,3 to distinguish them. We can calculate the invariant mass of three possible combinations that could correspond to intermediate resonances R<sup>++</sup><sub>1</sub> → K<sup>+</sup><sub>1</sub>K<sup>+</sup><sub>2</sub>, R<sup>0</sup><sub>2</sub> → K<sup>+</sup><sub>1</sub>K<sup>-</sup><sub>3</sub>, and R<sup>0</sup><sub>3</sub> → K<sup>+</sup><sub>3</sub>K<sup>-</sup><sub>3</sub>. \nThe potential R<sup>++</sup><sub>1</sub> would be a doubly charged resonance. We would not expect to see any resonances corresponding to this as mesons are composed of one quark and one anti-quark and their charges cannot add up to two units.\nThe potential R<sup>0</sup><sub>2</sub> and R<sup>0</sup><sub>3</sub> correspond to configurations in which we could see resonances. Hence you should compute the invariant mass combinations for these. The square of these masses should be used as the Dalitz variables. \nWe suggest you make these plots first for the simulation data. In the simulation there are no intermediate resonances and your plot should be of uniform density inside the range physically allowed by energy and momentum conservation.",
"# calculate the invariant masses for each possible hadron pair combination\n\n# plot the invariant mass for one of these combinations \n\n# make a Dalitz plot with labelled axes for the simulation data",
"Hints\nCalculating invariant mass - Use the same technique as you did above for the B meson, but now applying it to two-body invariant masses rather than three.\nPlotting the Dalitz plot - You can use a scatter plot from matplotlib to plot a Dalitz plotm, see the example analysis. Remember to use the square of each two-body mass.\nAdding Dalitz plot for real data\nNow draw a Dalitz plot for the real data. Check that the signs of the charge of the hadrons are correct to correspond to your potential neutral resonances R<sup>0</sup><sub>2</sub> and R<sup>0</sup><sub>3</sub>.",
"# calculate the invariant masses for each possible hadron pair combination in the real data\n\n# make a Dalitz plot for the real data (with your preselection cuts applied)",
"<div align=\"justify\">While drawing the Dalitz plot for the real data, label the axes accordingly. Compare the Dalitz plots of the real data with the one for the simulation. \nWhat are the most striking differences? \n</div>\n\nOrdering Dalitz variables\nYou can make a further improvement to allow you to observe the resonances easier. Your resonances R<sup>0</sup><sub>2</sub> and R<sup>0</sup><sub>3</sub> are both composed of the same particle types, K<sup>+</sup>K<sup>-</sup>, and hence have the same distributions. It is useful to impose an ordering which distinguishes the resonances. We can call the resonances R<sup>0</sup><sub>Low</sub> and R<sup>0</sup><sub>High</sub>. In each event R<sup>0</sup><sub>Low</sub> is the resonance with the lower mass and the other corresponds to the higher mass combination of kaons. You can now use the mass of these ordered resonances as your Dalitz plot variables, thus effectively \"folding\" your Dalitz plot so that one axis always has a higher value than the other.",
"# make a new Dalitz plot with a mass ordering of the axes",
"Hint\nOrdered Dalitz plot - You can find the maximum of the mass of R<sup>0</sup><sub>Low</sub> vs R<sup>0</sup><sub>High</sub> elementwise on one axis, and the minimum of on the other. You can use numpy.min(a,b) and numpy.max(a,b) to perform elementwise comparisons between two arrays a and b and return one array filled by either the individual min/max element from the elementwise comparisons.\nBinned Dalitz plot\nYou can improve the representation of your Dalitz plot by binning the data. The hist2d function can be used to make a 2D histogram. The number of bins specification in the hist2d function is the number of bins in one axis.",
"# plot a binned Dalitz Plot\n# use colorbar() to make a legend for your plot at the side",
"Two body resonances\nYou can now use your Dalitz plot to identify the intermediate resonances that you see in your plots. The resonances will have shown up as bands of higher density of points on the plots. You can use the particle data group tables of mesons to identify which particles these correspond to. The tables give the masses and widths of the particles and their decay modes. You are looking for mesons with the masses corresponding to where you see the bands and that decay into K<sup>+</sup>K<sup>-</sup>.\nCongratulations! You have succesfully made a Dalitz plot and used it to observe the presence of intermediate particles in the decay of your charged B meson into three charged kaons. \nSearching for local matter anti-matter differences\nAims:\n\nObserve matter antimatter differences (CP violation) in regions of the Dalitz plots of the B<sup>+</sup> and B<sup>-</sup> mesons.\nFor the data in these regions produce plots to best display the CP violation.\n\nIn a section above you searched for global CP violation. You probably did not find a result with very high significance. \nCP violation may arise from interference between decays through different resonances, and hence the magnitude and sign of the CP violation may vary across the Dalitz plot. We can apply the same equation as in the global CP violation study \n<img src=\"Images/AsymmetryEq.png\" width=\"200\" />\nbut apply this only to events in particular regions of the Dalitz plot.\nRemoving charm resonances\nThe analysis performed here is to study the CP violation in the charmless B meson decays to kaons. \"charmless\" means that the decay does not proceed through a charm quark. However, the most frequent decay of the B mesons occur through the b quark decaying into a c quark. The majority of these events can be removed by rejecting the events that are proceeding through a D<sup>0</sup> meson (which contains the charm quark).\nIn the section above you plotted a histogram of the invariant mass of the intermediate resonances and will have observed the D<sup>0</sup> meson in this and in the Dalitz plot. You should now reject events that are around the mass range of the D<sup>0</sup> meson to suppress this contribution. You can do this in your pre-selection on the data that you set-up earlier in the project.\nThis was also a simplification that we did not consider when we were calculating the global asymmetry. After you have applied this pre-selection your code will now recompute the global asymmetry with the D<sup>0</sup> meson contribution rejected. We have not yet observed CP violation in charm mesons and searching for this is another active area of current research.\nComparing Dalitz plots\nMake separate Dalitz plots for the B<sup>+</sup> and the B<sup>-</sup> decays.\nLocal CP Violation will show up as an asymmetry between the B<sup>+</sup> and the B<sup>-</sup> plots. \nIn order that the statistical error on the asymmetry in each bin is not over large the bins need to contain a reasonable number of entries. Hence you will probably need larger bins than when you were looking for resonances in the section above. A suitable initial bin size might be $2.5~\\text{GeV}^2/\\text{c}^4 \\times 2.5~\\text{GeV}^2/\\text{c}^4$.",
"# make a Dalitz plot for the B+ events\n\n# make a Dalitz plot for the B- events\n\n# Make a plot showing the asymmetry between these two Daltz plots\n# i.e. calculate the asymmetry between each bin of the B+ and B- Dalitz plots and show the result in another 2D plot",
"Observing a large asymmetry in some regions of the plot does not necessarily mean you have observed CP violation. If there are very few events in that region of the plot the uncertainty on that large asymmetry may be large. Hence, the value may still be compatible with zero.\nYou can calculate the statistical uncertainty on the asymmetry, for each bin of the plot, using the same formulas as you used in the global asymmetry section. You can then make a plot showing the uncertainty on the asymmetry.\nDividing the plot showing the asymmetry by the plot showing the statistical uncertainty you can then obtain the significance of the asymmetry in each bin. You can then plot the significance of the asymmetry to see if there is any evidence for CP violation.",
"# Make a plot showing the uncertainty on the asymmetry \n\n# Make a plot showing the statistical significance of the asymmetry",
"Observing CP violation\nFrom your studies of the asymmetry plot, and the plot of its significance, you will be able to identify regions in the Dalitz plots that show indications of sizeable and significant CP Violation. You may find you have several consecutive bins with significant positive, or negative, asymmetries. You may wish to try alternative binnings of the Dalitz plots to best isolate the regions in which the significant asymmetries occur.\nYou can select events that are in these regions of the Dalitz plots where you observe signs of CP Violation. You can then plot a simple 1D histogram of the invariant mass distribution of the B<sup>+</sup> and the B<sup>-</sup> events, just as you did at the start of the project, but only for events that lie in the region of the Dalitz plot that you are interested in. Make the plots of the B<sup>+</sup> and the B<sup>-</sup> events with the same scale, or superimpose the two plots, so that you can observe if the particle and anti-particle decay processes are occurring at the same rate.",
"# Make a plot showing the invariant mass of the B+ meson particles\n# using events from a region of the Dalitz plot showing sizeable CP asymmetries\n\n# Make a plot showing the invariant mass of the B- meson particles using events from the same region",
"Congratulations! You should now have succesfully observed significant evidence for CP Violation. You should have plots that clearly show that particle and anti-particle decay processes occur at different rates in local regions of the Dalitz plot. You may wish to comapre your results with those published by the LHCb collaboration in this paper.\nWell Done you have succesfully completed your first particle physics analysis project. There are many more analyses that can be conducted witht the data set that you have been provided and the skills that you have gained. Ideas for some of these are explored in the section below. Maybe you can discover something new!\nNow you've finished the analysis please provide feedback to help us improve this project using this brief survey.\nFurther analyses\nThe data set you have been provided is the full set of data recorded by LHCb preselected for decays of charged B mesons into three final state tracks. This data set has been used for two important publications, here and here.\nWe discuss here: \n<ul>\n<li>Additional elements that you could add to your analysis of B<sup>+</sup> → K<sup>+</sup>K<sup>+</sup>K<sup>−</sup> </li>\n<li>Further analyses that you could perform with this data set</li>\n</ul>\n\nAdding extra sophistication\nSystematic Uncertainties\nIn this analysis you considered the statistical uncertainty on the result. This occurs as a result of having only a limited number of events. In addition there are systematic uncertainties, these arise from biases in your measurement. Here we discuss three sources of these for this analysis.\n<ul>\n<li> Production asymmetry. The LHC is a proton-proton collider and hence the initial state of the collision is not matter antimatter symmetric. Consequently B<sup>+</sup> and B<sup>-</sup> mesons may not be produced at exactly the same rates. This small production asymmetry it is estimated could be approximately 1%. It can also be measured from the data, as discussed in the LHCb paper.</li>\n<li> Detection asymmetry. The LHCb detector could be more efficient for detecting either the B<sup>+</sup> or the B<sup>-</sup> final states. This is because the positive and negative kaons will be bent by the magnet indifferent directions in the detector. If the efficiency of the detector is higher in one region than another this will lead to higher efficiencies for K<sup>+</sup> or K<sup>-</sup> and hence for B<sup>+</sup> or B<sup>-</sup>. For this reason the magnetic field of the LHCb detector is regularly reversed. You used data in this analysis in which the magnetic field was both up and down and hence the effect will (partially) cancel. By comparing results for the two magnet polarities separately you can check the size of this effect. When loading the data above both polarities were combined, you can instead load them independently to measure the difference between the two datasets.</li>\n<li> Analysis technique. The analysis technique you have used may bias the result. A major simplification we made in the analysis above was to neglect 'background' events. We imposed a selection to select a sample of predominantly signal events but have not accounted for the effect of the residual background events.</li>\n</ul>\n\nUsing mass sidebands\nOne source of 'background' events arises from random combinations of tracks in events that happen to fake the 'signal' characteristics. These events will not peak in the mass distribution at the mass of the B meson but rtaher will have a smoothly varying distribution. Looking at the number and distribution of of events away from the mass peak can allow you to estimate the number of background events under the mass peak.\nFitting distributions\nThe next level of sophistication in the analysis requires fitting the distributions of events that are observed in the B mass distribution in order to estimate the yield of signal events and background events. You can see how this is done in the LHCb paper on the analysis. Fitting can be performed using the CERN root framework.\nFurther analyses\nThe LHCb papers using this data set that you are using analysed four decay channels of the charged B mesons. You can perform any of these analyses.\n<ul>\n<li>B<sup>+</sup> → K<sup>+</sup>K<sup>+</sup>K<sup>−</sup> (and anti-particle equivalent). This is the analysis you have performed here. It has the lowest background of the four channels and hence the approximation we made of neglecting the background events will give the least bias to this channel.</li>\n<li>B<sup>+</sup> → π<sup>+</sup>π<sup>+</sup>π<sup>−</sup> (and anti-particle equivalent). In this analysis the final state is three charged pions. The level of background events compared to the signal is significantly higher as pions are the most commonly produced particle at the LHC. Hence, a method of estimating the background level should be added to complete this analysis.</li>\n<li>B<sup>+</sup> → K<sup>+</sup>π<sup>+</sup>π<sup>−</sup> (and anti-particle equivalent). In this analysis the final state is a mixture of one kaon and two pions. This means that the analysis needs to determine in each event which track is the best candidate kaon and apply selection cuts appropriately to select out the events.</li>\n<li>B<sup>+</sup> → π<sup>+</sup>K<sup>+</sup>K<sup>−</sup> (and anti-particle equivalent). This channel has a higher level of background compared to the signal.</li>\n</ul>"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
twosigma/beaker-notebook
|
doc/python/TableAPI.ipynb
|
apache-2.0
|
[
"Python API for Table Display\nIn addition to APIs for creating and formatting BeakerX's interactive table widget, the Python runtime configures pandas to display tables with the interactive widget instead of static HTML.",
"import pandas as pd\nfrom beakerx import *\nfrom beakerx.object import beakerx\n\npd.read_csv('../resources/data/interest-rates.csv')\n\ntable = TableDisplay(pd.read_csv('../resources/data/interest-rates.csv'))\ntable.setAlignmentProviderForColumn('m3', TableDisplayAlignmentProvider.CENTER_ALIGNMENT)\ntable.setRendererForColumn(\"y10\", TableDisplayCellRenderer.getDataBarsRenderer(False))\ntable.setRendererForType(ColumnType.Double, TableDisplayCellRenderer.getDataBarsRenderer(True))\ntable\n\ndf = pd.read_csv('../resources/data/interest-rates.csv')\ndf['time'] = df['time'].str.slice(0,19).astype('datetime64[ns]')\ntable = TableDisplay(df)\ntable.setStringFormatForTimes(TimeUnit.DAYS)\ntable.setStringFormatForType(ColumnType.Double, TableDisplayStringFormat.getDecimalFormat(4,6))\ntable.setStringFormatForColumn(\"m3\", TableDisplayStringFormat.getDecimalFormat(0, 0))\n\ntable\n\ntable = TableDisplay(pd.read_csv('../resources/data/interest-rates.csv'))\ntable\n#freeze a column\ntable.setColumnFrozen(\"y1\", True)\n#hide a column\ntable.setColumnVisible(\"y30\", False)\n\ntable.setColumnOrder([\"m3\", \"y1\", \"y5\", \"time\", \"y2\"])\n\ndef config_tooltip(row, column, table):\n return \"The value is: \" + str(table.values[row][column])\n\ntable.setToolTip(config_tooltip)\n\ntable.setDataFontSize(16)\ntable.setHeaderFontSize(18)\n\ntable\n\nmapListColorProvider = [\n {\"a\": 1, \"b\": 2, \"c\": 3},\n {\"a\": 4, \"b\": 5, \"c\": 6},\n {\"a\": 7, \"b\": 8, \"c\": 5}\n]\ntabledisplay = TableDisplay(mapListColorProvider)\n\ncolors = [\n [Color.LIGHT_GRAY, Color.GRAY, Color.RED],\n [Color.DARK_GREEN, Color.ORANGE, Color.RED],\n [Color.MAGENTA, Color.BLUE, Color.BLACK]\n]\n\ndef color_provider(row, column, table):\n return colors[row][column]\n\ntabledisplay.setFontColorProvider(color_provider)\ntabledisplay\n\nmapListFilter = [\n {\"a\":1, \"b\":2, \"c\":3},\n {\"a\":4, \"b\":5, \"c\":6},\n {\"a\":7, \"b\":8, \"c\":5}\n]\ndisplay = TableDisplay(mapListFilter)\n\ndef filter_row(row, model):\n return model[row][1] == 8\n\ndisplay.setRowFilter(filter_row)\n\ndisplay\n\ntable = TableDisplay(pd.read_csv('../resources/data/interest-rates.csv'))\ntable.addCellHighlighter(TableDisplayCellHighlighter.getHeatmapHighlighter(\"m3\", TableDisplayCellHighlighter.FULL_ROW))\n\ntable",
"Display mode: Pandas default",
"beakerx.pandas_display_default()\npd.read_csv('../resources/data/interest-rates.csv')",
"Display mode: TableDisplay Widget",
"beakerx.pandas_display_table()\npd.read_csv('../resources/data/interest-rates.csv')",
"Recognized Formats",
"TableDisplay([{'y1':4, 'm3':2, 'z2':1}, {'m3':4, 'z2':2}])\n\nTableDisplay({\"x\" : 1, \"y\" : 2})",
"Programmable Table Actions",
"mapList4 = [\n {\"a\":1, \"b\":2, \"c\":3},\n {\"a\":4, \"b\":5, \"c\":6},\n {\"a\":7, \"b\":8, \"c\":5}\n]\ndisplay = TableDisplay(mapList4)\n\ndef dclick(row, column, tabledisplay):\n tabledisplay.values[row][column] = sum(map(int,tabledisplay.values[row]))\n\ndisplay.setDoubleClickAction(dclick)\n\ndef negate(row, column, tabledisplay):\n tabledisplay.values[row][column] = -1 * int(tabledisplay.values[row][column])\n\ndef incr(row, column, tabledisplay):\n tabledisplay.values[row][column] = int(tabledisplay.values[row][column]) + 1\n\ndisplay.addContextMenuItem(\"negate\", negate)\ndisplay.addContextMenuItem(\"increment\", incr)\n\ndisplay\n\nmapList4 = [\n {\"a\":1, \"b\":2, \"c\":3},\n {\"a\":4, \"b\":5, \"c\":6},\n {\"a\":7, \"b\":8, \"c\":5}\n]\ndisplay = TableDisplay(mapList4)\n\n#set what happens on a double click\ndisplay.setDoubleClickAction(\"runDoubleClick\")\n\ndisplay\n\nprint(\"runDoubleClick fired\")\nprint(display.details)",
"Set index to DataFrame",
"df = pd.read_csv('../resources/data/interest-rates.csv')\ndf.set_index(['m3'])\n\ndf = pd.read_csv('../resources/data/interest-rates.csv')\ndf.index = df['time']\ndf",
"Update cell",
"dataToUpdate = [\n {'a':1, 'b':2, 'c':3},\n {'a':4, 'b':5, 'c':6},\n {'a':7, 'b':8, 'c':9}\n]\ntableToUpdate = TableDisplay(dataToUpdate)\n\ntableToUpdate\n\ntableToUpdate.values[0][0] = 99\ntableToUpdate.sendModel()\n\ntableToUpdate.updateCell(2,\"c\",121)\ntableToUpdate.sendModel()",
"HTML format\nHTML format allows markup and styling of the cell's content. Interactive JavaScript is not supported however.",
"table = TableDisplay({\n 'w': '$2 \\\\sigma$',\n 'x': '<em style=\"color:red\">italic red</em>',\n 'y': '<b style=\"color:blue\">bold blue</b>',\n 'z': 'strings without markup work fine too',\n })\ntable.setStringFormatForColumn(\"Value\", TableDisplayStringFormat.getHTMLFormat())\ntable",
"Auto linking of URLs\nThe normal string format automatically detects URLs and links them. An underline appears when the mouse hovers over such a string, and when you click it opens in a new window.",
"TableDisplay({'Two Sigma': 'http://twosigma.com', 'BeakerX': 'http://BeakerX.com'})"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
mdiaz236/DeepLearningFoundations
|
tensorboard/.ipynb_checkpoints/Anna KaRNNa-checkpoint.ipynb
|
mit
|
[
"Anna KaRNNa\nIn this notebook, I'll build a character-wise RNN trained on Anna Karenina, one of my all-time favorite books. It'll be able to generate new text based on the text from the book.\nThis network is based off of Andrej Karpathy's post on RNNs and implementation in Torch. Also, some information here at r2rt and from Sherjil Ozair on GitHub. Below is the general architecture of the character-wise RNN.\n<img src=\"assets/charseq.jpeg\" width=\"500\">",
"import time\nfrom collections import namedtuple\n\nimport numpy as np\nimport tensorflow as tf",
"First we'll load the text file and convert it into integers for our network to use.",
"with open('anna.txt', 'r') as f:\n text=f.read()\nvocab = set(text)\nvocab_to_int = {c: i for i, c in enumerate(vocab)}\nint_to_vocab = dict(enumerate(vocab))\nchars = np.array([vocab_to_int[c] for c in text], dtype=np.int32)\n\ntext[:100]\n\nchars[:100]",
"Now I need to split up the data into batches, and into training and validation sets. I should be making a test set here, but I'm not going to worry about that. My test will be if the network can generate new text.\nHere I'll make both input and target arrays. The targets are the same as the inputs, except shifted one character over. I'll also drop the last bit of data so that I'll only have completely full batches.\nThe idea here is to make a 2D matrix where the number of rows is equal to the number of batches. Each row will be one long concatenated string from the character data. We'll split this data into a training set and validation set using the split_frac keyword. This will keep 90% of the batches in the training set, the other 10% in the validation set.",
"def split_data(chars, batch_size, num_steps, split_frac=0.9):\n \"\"\" \n Split character data into training and validation sets, inputs and targets for each set.\n \n Arguments\n ---------\n chars: character array\n batch_size: Size of examples in each of batch\n num_steps: Number of sequence steps to keep in the input and pass to the network\n split_frac: Fraction of batches to keep in the training set\n \n \n Returns train_x, train_y, val_x, val_y\n \"\"\"\n \n \n slice_size = batch_size * num_steps\n n_batches = int(len(chars) / slice_size)\n \n # Drop the last few characters to make only full batches\n x = chars[: n_batches*slice_size]\n y = chars[1: n_batches*slice_size + 1]\n \n # Split the data into batch_size slices, then stack them into a 2D matrix \n x = np.stack(np.split(x, batch_size))\n y = np.stack(np.split(y, batch_size))\n \n # Now x and y are arrays with dimensions batch_size x n_batches*num_steps\n \n # Split into training and validation sets, keep the virst split_frac batches for training\n split_idx = int(n_batches*split_frac)\n train_x, train_y= x[:, :split_idx*num_steps], y[:, :split_idx*num_steps]\n val_x, val_y = x[:, split_idx*num_steps:], y[:, split_idx*num_steps:]\n \n return train_x, train_y, val_x, val_y\n\ntrain_x, train_y, val_x, val_y = split_data(chars, 10, 200)\n\ntrain_x.shape\n\ntrain_x[:,:10]",
"I'll write another function to grab batches out of the arrays made by split data. Here each batch will be a sliding window on these arrays with size batch_size X num_steps. For example, if we want our network to train on a sequence of 100 characters, num_steps = 100. For the next batch, we'll shift this window the next sequence of num_steps characters. In this way we can feed batches to the network and the cell states will continue through on each batch.",
"def get_batch(arrs, num_steps):\n batch_size, slice_size = arrs[0].shape\n \n n_batches = int(slice_size/num_steps)\n for b in range(n_batches):\n yield [x[:, b*num_steps: (b+1)*num_steps] for x in arrs]\n\ndef build_rnn(num_classes, batch_size=50, num_steps=50, lstm_size=128, num_layers=2,\n learning_rate=0.001, grad_clip=5, sampling=False):\n \n if sampling == True:\n batch_size, num_steps = 1, 1\n\n tf.reset_default_graph()\n \n # Declare placeholders we'll feed into the graph\n \n inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')\n x_one_hot = tf.one_hot(inputs, num_classes, name='x_one_hot')\n\n\n targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')\n y_one_hot = tf.one_hot(targets, num_classes, name='y_one_hot')\n y_reshaped = tf.reshape(y_one_hot, [-1, num_classes])\n \n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n \n # Build the RNN layers\n \n lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)\n drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)\n cell = tf.contrib.rnn.MultiRNNCell([drop] * num_layers)\n\n initial_state = cell.zero_state(batch_size, tf.float32)\n\n # Run the data through the RNN layers\n rnn_inputs = [tf.squeeze(i, squeeze_dims=[1]) for i in tf.split(x_one_hot, num_steps, 1)]\n outputs, state = tf.contrib.rnn.static_rnn(cell, rnn_inputs, initial_state=initial_state)\n \n final_state = tf.identity(state, name='final_state')\n \n # Reshape output so it's a bunch of rows, one row for each cell output\n \n seq_output = tf.concat(outputs, axis=1,name='seq_output')\n output = tf.reshape(seq_output, [-1, lstm_size], name='graph_output')\n \n # Now connect the RNN putputs to a softmax layer and calculate the cost\n softmax_w = tf.Variable(tf.truncated_normal((lstm_size, num_classes), stddev=0.1),\n name='softmax_w')\n softmax_b = tf.Variable(tf.zeros(num_classes), name='softmax_b')\n logits = tf.matmul(output, softmax_w) + softmax_b\n\n preds = tf.nn.softmax(logits, name='predictions')\n \n loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped, name='loss')\n cost = tf.reduce_mean(loss, name='cost')\n\n # Optimizer for training, using gradient clipping to control exploding gradients\n tvars = tf.trainable_variables()\n grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip)\n train_op = tf.train.AdamOptimizer(learning_rate)\n optimizer = train_op.apply_gradients(zip(grads, tvars))\n\n # Export the nodes \n export_nodes = ['inputs', 'targets', 'initial_state', 'final_state',\n 'keep_prob', 'cost', 'preds', 'optimizer']\n Graph = namedtuple('Graph', export_nodes)\n local_dict = locals()\n graph = Graph(*[local_dict[each] for each in export_nodes])\n \n return graph",
"Hyperparameters\nHere I'm defining the hyperparameters for the network. The two you probably haven't seen before are lstm_size and num_layers. These set the number of hidden units in the LSTM layers and the number of LSTM layers, respectively. Of course, making these bigger will improve the network's performance but you'll have to watch out for overfitting. If your validation loss is much larger than the training loss, you're probably overfitting. Decrease the size of the network or decrease the dropout keep probability.",
"batch_size = 100\nnum_steps = 100\nlstm_size = 512\nnum_layers = 2\nlearning_rate = 0.001",
"Write out the graph for TensorBoard",
"model = build_rnn(len(vocab),\n batch_size=batch_size,\n num_steps=num_steps,\n learning_rate=learning_rate,\n lstm_size=lstm_size,\n num_layers=num_layers)\n\nwith tf.Session() as sess:\n \n sess.run(tf.global_variables_initializer())\n file_writer = tf.summary.FileWriter('./logs/1', sess.graph)",
"Training\nTime for training which is is pretty straightforward. Here I pass in some data, and get an LSTM state back. Then I pass that state back in to the network so the next batch can continue the state from the previous batch. And every so often (set by save_every_n) I calculate the validation loss and save a checkpoint.",
"!mkdir -p checkpoints/anna\n\nepochs = 1\nsave_every_n = 200\ntrain_x, train_y, val_x, val_y = split_data(chars, batch_size, num_steps)\n\nmodel = build_rnn(len(vocab), \n batch_size=batch_size,\n num_steps=num_steps,\n learning_rate=learning_rate,\n lstm_size=lstm_size,\n num_layers=num_layers)\n\nsaver = tf.train.Saver(max_to_keep=100)\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n \n # Use the line below to load a checkpoint and resume training\n #saver.restore(sess, 'checkpoints/anna20.ckpt')\n \n n_batches = int(train_x.shape[1]/num_steps)\n iterations = n_batches * epochs\n for e in range(epochs):\n \n # Train network\n new_state = sess.run(model.initial_state)\n loss = 0\n for b, (x, y) in enumerate(get_batch([train_x, train_y], num_steps), 1):\n iteration = e*n_batches + b\n start = time.time()\n feed = {model.inputs: x,\n model.targets: y,\n model.keep_prob: 0.5,\n model.initial_state: new_state}\n batch_loss, new_state, _ = sess.run([model.cost, model.final_state, model.optimizer], \n feed_dict=feed)\n loss += batch_loss\n end = time.time()\n print('Epoch {}/{} '.format(e+1, epochs),\n 'Iteration {}/{}'.format(iteration, iterations),\n 'Training loss: {:.4f}'.format(loss/b),\n '{:.4f} sec/batch'.format((end-start)))\n \n \n if (iteration%save_every_n == 0) or (iteration == iterations):\n # Check performance, notice dropout has been set to 1\n val_loss = []\n new_state = sess.run(model.initial_state)\n for x, y in get_batch([val_x, val_y], num_steps):\n feed = {model.inputs: x,\n model.targets: y,\n model.keep_prob: 1.,\n model.initial_state: new_state}\n batch_loss, new_state = sess.run([model.cost, model.final_state], feed_dict=feed)\n val_loss.append(batch_loss)\n\n print('Validation loss:', np.mean(val_loss),\n 'Saving checkpoint!')\n saver.save(sess, \"checkpoints/anna/i{}_l{}_{:.3f}.ckpt\".format(iteration, lstm_size, np.mean(val_loss)))\n\ntf.train.get_checkpoint_state('checkpoints/anna')",
"Sampling\nNow that the network is trained, we'll can use it to generate new text. The idea is that we pass in a character, then the network will predict the next character. We can use the new one, to predict the next one. And we keep doing this to generate all new text. I also included some functionality to prime the network with some text by passing in a string and building up a state from that.\nThe network gives us predictions for each character. To reduce noise and make things a little less random, I'm going to only choose a new character from the top N most likely characters.",
"def pick_top_n(preds, vocab_size, top_n=5):\n p = np.squeeze(preds)\n p[np.argsort(p)[:-top_n]] = 0\n p = p / np.sum(p)\n c = np.random.choice(vocab_size, 1, p=p)[0]\n return c\n\ndef sample(checkpoint, n_samples, lstm_size, vocab_size, prime=\"The \"):\n prime = \"Far\"\n samples = [c for c in prime]\n model = build_rnn(vocab_size, lstm_size=lstm_size, sampling=True)\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, checkpoint)\n new_state = sess.run(model.initial_state)\n for c in prime:\n x = np.zeros((1, 1))\n x[0,0] = vocab_to_int[c]\n feed = {model.inputs: x,\n model.keep_prob: 1.,\n model.initial_state: new_state}\n preds, new_state = sess.run([model.preds, model.final_state], \n feed_dict=feed)\n\n c = pick_top_n(preds, len(vocab))\n samples.append(int_to_vocab[c])\n\n for i in range(n_samples):\n x[0,0] = c\n feed = {model.inputs: x,\n model.keep_prob: 1.,\n model.initial_state: new_state}\n preds, new_state = sess.run([model.preds, model.final_state], \n feed_dict=feed)\n\n c = pick_top_n(preds, len(vocab))\n samples.append(int_to_vocab[c])\n \n return ''.join(samples)\n\ncheckpoint = \"checkpoints/anna/i3560_l512_1.122.ckpt\"\nsamp = sample(checkpoint, 2000, lstm_size, len(vocab), prime=\"Far\")\nprint(samp)\n\ncheckpoint = \"checkpoints/anna/i200_l512_2.432.ckpt\"\nsamp = sample(checkpoint, 1000, lstm_size, len(vocab), prime=\"Far\")\nprint(samp)\n\ncheckpoint = \"checkpoints/anna/i600_l512_1.750.ckpt\"\nsamp = sample(checkpoint, 1000, lstm_size, len(vocab), prime=\"Far\")\nprint(samp)\n\ncheckpoint = \"checkpoints/anna/i1000_l512_1.484.ckpt\"\nsamp = sample(checkpoint, 1000, lstm_size, len(vocab), prime=\"Far\")\nprint(samp)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
andreyf/machine-learning-examples
|
numpy_and_pandas/part1_pandas_intro.ipynb
|
gpl-3.0
|
[
"<center>\n<img src=\"../img/ods_stickers.jpg\">\nОткрытый курс по машинному обучению\n</center>\nАвтор материала: программист-исследователь Mail.ru Group, старший преподаватель Факультета Компьютерных Наук ВШЭ Юрий Кашницкий\n<center>Тема 1. Первичный анализ данных с Pandas</center>\n<center>Часть 1. Обзор библиотеки Pandas</center>\nPandas - это библиотека Python, предоставляющая широкие возможности для анализа данных. С ее помощью очень удобно загружать, обрабатывать и анализировать табличные данные с помощью SQL-подобных запросов. В связке с библиотеками Matplotlib и Seaborn появляется возможность удобного визуального анализа табличных данных.",
"# Python 2 and 3 compatibility\n# pip install future\nfrom __future__ import (absolute_import, division,\n print_function, unicode_literals)\n# отключим предупреждения Anaconda\nimport warnings\nwarnings.simplefilter('ignore')\nimport pandas as pd\nimport numpy as np\n%matplotlib inline\nimport matplotlib.pyplot as plt",
"Основными структурами данных в Pandas являются классы Series и DataFrame. Первый из них представляет собой одномерный индексированный массив данных некоторого фиксированного типа. Второй - это двухмерная структура данных, представляющая собой таблицу, каждый столбец которой содержит данные одного типа. Можно представлять её как словарь объектов типа Series. Структура DataFrame отлично подходит для представления реальных данных: строки соответствуют признаковым описаниям отдельных объектов, а столбцы соответствуют признакам.\nДля начала рассмотрим простые примеры создания таких объектов и возможных операций над ними.\nSeries\n Создание объекта Series из 5 элементов, индексированных буквами:",
"salaries = pd.Series([400, 300, 200, 250], \n index = ['Andrew', 'Bob', \n 'Charles', 'Ann']) \nprint(salaries) \n\nsalaries[salaries > 250]",
"Индексирование возможно в виде s.Name или s['Name'].",
"print(salaries.Andrew == salaries['Andrew']) \n\nsalaries['Carl'] = np.nan\n\nsalaries.fillna(salaries.median(), inplace=True)\n\nsalaries",
"Series поддерживает пропуски в данных.",
"salaries.c = np.nan # Series can contain missing values\nprint(salaries)",
"Объекты Series похожи на ndarray и могут быть переданы в качестве аргументов большинству функций из Numpy.",
"print('Second element of salaries is', salaries[1], '\\n')\n# Smart indexing\nprint(salaries[:3], '\\n')\nprint('There are', len(salaries[salaries > 0]), 'positive elements in salaries\\n')\n# Series obects can be the arguments for Numpy functions\nprint(np.exp(salaries))",
"DataFrame\nСоздание и изменение\nПерейдём к рассмотрению объектов типа DataFrame. Такой объект можно создать из массива numpy, указав названия строк и столбцов.",
"df1 = pd.DataFrame(np.random.randn(5, 3), \n index=['o1', 'o2', 'o3', 'o4', 'o5'], \n columns=['f1', 'f2', 'f3'])\ndf1",
"Альтернативным способом является создание DataFrame из словаря numpy массивов или списков.",
"df2 = pd.DataFrame({'A': np.random.random(5), \n 'B': ['a', 'b', 'c', 'd', 'e'], \n 'C': np.arange(5) > 2})\ndf2",
"Обращение к элементам (или целым кускам фрейма):",
"print('The element in position 3, B is', df2.at[3, 'B'], '\\n')\nprint(df2.loc[[1, 4], ['A', 'B']])",
"Изменение элементов и добавление новых:",
"df2.at[2, 'B'] = 'f'\ndf2\n\ndf2.loc[5] = [3.1415, 'c', False]\ndf2\n\ndf1.columns = ['A', 'B', 'C']\ndf3 = df1.append(df2)\ndf3",
"Обработка пропущенных значений",
"df1.at['o2', 'A'] = np.nan\ndf1.at['o4', 'C'] = np.nan\ndf1",
"Булева маска для пропущенных значений (True - там, где был пропуск, иначе - False):",
"pd.isnull(df1)",
"Можно удалить все строки, где есть хотя бы один пропуск.",
"df1.dropna(how='any')",
"Пропуски можно заменить каким-то значением.",
"df1.fillna(0)",
"Пример первичного анализа данных с Pandas\nЧтение из файла и первичный анализ\nОднако на практике DataFrame, с которым нам предстоит работать, необходимо считать из некоторого файла. Рассмотрим работу с DataFrame на примере следующего набора данных. Для каждрого опрошенного имеется следующая информация: заработная плата за час работы, опыт работы, образование, внешняя привлекательность (в баллах от 1 до 5), бинарные признаки: пол, семейное положение, состояние здоровья (хорошее/плохое), членство в профсоюзе, цвет кожи (белый/чёрный), занятость в сфере обслуживания (да/нет).",
"df = pd.read_csv('../data/beauty.csv', sep = ';')",
"Посмотрим на размер данных и названия признаков.",
"print(df.shape)\nprint(df.columns.values)\n\ndf.head(10)",
"При работе с большими объёмами данных бывает удобно посмотреть только на небольшие части фрейма (например, начало).",
"df.head(4)",
"Метод describe показывает основные статистические характеристики данных по каждому признаку: число непропущенных значений, среднее, стандартное отклонение, диапазон, медиану, 0.25 и 0.75 квартили.",
"df.describe()",
"DataFrame можно отсортировать по значению какого-нибудь из признаков. В нашем случае, например, по размеру заработной платы.",
"df.sort_values(by='wage', ascending = False).head()\n\ndf.sort_values(by=['female', 'wage'],\n ascending=[True, False]).head()",
"Индексация и извлечение данных\nDataFrame можно индексировать по-разному. В связи с этим рассмотрим различные способы индексации и извлечения нужных нам данных из DataFrame на примере простых вопросов.\nДля извлечения отдельного столбца можно использовать конструкцию вида DataFrame['Name']. Воспользуемся этим для ответа на вопрос: какова доля людей с хорошим здоровьем среди опрошенных?",
"df['goodhlth'].mean()",
"Очень удобной является логическая индексация DataFrame по одному столбцу. Выглядит она следующим образом: df[P(df['Name'])], где P - это некоторое логическое условие, проверяемое для каждого элемента столбца Name. Итогом такой индексации является DataFrame, состоящий только из строк, удовлетворяющих условию P по столбцу Name. Воспользуемся этим для ответа на вопрос: какова средняя заработная плата среди женщин?",
"df[df['female'] == 1].head()\n\ndf[(df['goodhlth'] == 1) & \n (df['female'] == 1)].head()\n\ndf[(df['female'] == 0)]['wage'].mean() - \\\ndf[(df['female'] == 1)]['wage'].mean()",
"Какова максимальная заработная плата среди мужчин, имеющих членство в профсоюзе, и с опытом работы до 10 лет?",
"df[(df['female'] == 0) & (df['union'] == 1) \n & (df['exper'] < 10)]['wage'].max()",
"Применение функции к каждому столбцу:",
"df.apply(np.mean) ",
"Группирование данных в зависимости от значения признака looks и подсчет среднего значения по каждому столбцу в каждой группе.",
"df['looks'].describe()\n\ng = df.groupby('looks') \nfor (i, sub_df) in g:\n print(sub_df['wage'].mean(), sub_df['looks'].mean())",
"Обращение к конкретной группе:",
"d1 = g.get_group(1)\nd1",
"Визуализация в Pandas\nМетод scatter_matrix позволяет визуализировать попарные зависимости между признаками (а также распределение каждого признака на диагонали). Проделаем это для небинарных признаков.",
"pd.scatter_matrix(df[['wage', 'exper', 'educ', 'looks']], \n figsize=(15, 15), diagonal='kde')\nplt.show()",
"Для каждого признака можно построить отдельную гистограмму:",
"df['looks'].hist()",
"Или сразу для всех:",
"df.hist(color = 'k', bins = 30, figsize=(15,10))\nplt.show()",
"Полезным также является график типа box plot (\"ящик с усами\"). Он позволяет компактно визуализировать основные характеристики (медиану, нижний и верхний квартили, минимальное и максимальное значение, выбросы) распределения признаков.",
"df.boxplot(column='exper', by='looks')\nplt.show()",
"Можно сделать это, сгруппировав данные по какому-либо другому признаку:",
"df.boxplot(column='exper', by=['female', 'black'], \n figsize=(10,10))\nplt.show()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
guyk1971/deep-learning
|
embeddings/Skip-Gram_word2vec.ipynb
|
mit
|
[
"Skip-gram word2vec\nIn this notebook, I'll lead you through using TensorFlow to implement the word2vec algorithm using the skip-gram architecture. By implementing this, you'll learn about embedding words for use in natural language processing. This will come in handy when dealing with things like machine translation.\nReadings\nHere are the resources I used to build this notebook. I suggest reading these either beforehand or while you're working on this material.\n\nA really good conceptual overview of word2vec from Chris McCormick \nFirst word2vec paper from Mikolov et al.\nNIPS paper with improvements for word2vec also from Mikolov et al.\nAn implementation of word2vec from Thushan Ganegedara\nTensorFlow word2vec tutorial\n\nWord embeddings\nWhen you're dealing with words in text, you end up with tens of thousands of classes to predict, one for each word. Trying to one-hot encode these words is massively inefficient, you'll have one element set to 1 and the other 50,000 set to 0. The matrix multiplication going into the first hidden layer will have almost all of the resulting values be zero. This a huge waste of computation. \n\nTo solve this problem and greatly increase the efficiency of our networks, we use what are called embeddings. Embeddings are just a fully connected layer like you've seen before. We call this layer the embedding layer and the weights are embedding weights. We skip the multiplication into the embedding layer by instead directly grabbing the hidden layer values from the weight matrix. We can do this because the multiplication of a one-hot encoded vector with a matrix returns the row of the matrix corresponding the index of the \"on\" input unit.\n\nInstead of doing the matrix multiplication, we use the weight matrix as a lookup table. We encode the words as integers, for example \"heart\" is encoded as 958, \"mind\" as 18094. Then to get hidden layer values for \"heart\", you just take the 958th row of the embedding matrix. This process is called an embedding lookup and the number of hidden units is the embedding dimension.\n<img src='assets/tokenize_lookup.png' width=500>\nThere is nothing magical going on here. The embedding lookup table is just a weight matrix. The embedding layer is just a hidden layer. The lookup is just a shortcut for the matrix multiplication. The lookup table is trained just like any weight matrix as well.\nEmbeddings aren't only used for words of course. You can use them for any model where you have a massive number of classes. A particular type of model called Word2Vec uses the embedding layer to find vector representations of words that contain semantic meaning.\nWord2Vec\nThe word2vec algorithm finds much more efficient representations by finding vectors that represent the words. These vectors also contain semantic information about the words. Words that show up in similar contexts, such as \"black\", \"white\", and \"red\" will have vectors near each other. There are two architectures for implementing word2vec, CBOW (Continuous Bag-Of-Words) and Skip-gram.\n<img src=\"assets/word2vec_architectures.png\" width=\"500\">\nIn this implementation, we'll be using the skip-gram architecture because it performs better than CBOW. Here, we pass in a word and try to predict the words surrounding it in the text. In this way, we can train the network to learn representations for words that show up in similar contexts.\nFirst up, importing packages.",
"import time\n\nimport numpy as np\nimport tensorflow as tf\n\nimport utils",
"Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space.",
"from urllib.request import urlretrieve\nfrom os.path import isfile, isdir\nfrom tqdm import tqdm\nimport zipfile\n\ndataset_folder_path = 'data'\ndataset_filename = 'text8.zip'\ndataset_name = 'Text8 Dataset'\n\nclass DLProgress(tqdm):\n last_block = 0\n\n def hook(self, block_num=1, block_size=1, total_size=None):\n self.total = total_size\n self.update((block_num - self.last_block) * block_size)\n self.last_block = block_num\n\nif not isfile(dataset_filename):\n with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:\n urlretrieve(\n 'http://mattmahoney.net/dc/text8.zip',\n dataset_filename,\n pbar.hook)\n\nif not isdir(dataset_folder_path):\n with zipfile.ZipFile(dataset_filename) as zip_ref:\n zip_ref.extractall(dataset_folder_path)\n \nwith open('data/text8') as f:\n text = f.read()",
"Preprocessing\nHere I'm fixing up the text to make training easier. This comes from the utils module I wrote. The preprocess function coverts any punctuation into tokens, so a period is changed to <PERIOD>. In this data set, there aren't any periods, but it will help in other NLP problems. I'm also removing all words that show up five or fewer times in the dataset. This will greatly reduce issues due to noise in the data and improve the quality of the vector representations. If you want to write your own functions for this stuff, go for it.",
"words = utils.preprocess(text)\nprint(words[:30])\n\nprint(\"Total words: {}\".format(len(words)))\nprint(\"Unique words: {}\".format(len(set(words))))",
"And here I'm creating dictionaries to convert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word (\"the\") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words.",
"vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)\nint_words = [vocab_to_int[word] for word in words]",
"Subsampling\nWords that show up often such as \"the\", \"of\", and \"for\" don't provide much context to the nearby words. If we discard some of them, we can remove some of the noise from our data and in return get faster training and better representations. This process is called subsampling by Mikolov. For each word $w_i$ in the training set, we'll discard it with probability given by \n$$ P(w_i) = 1 - \\sqrt{\\frac{t}{f(w_i)}} $$\nwhere $t$ is a threshold parameter and $f(w_i)$ is the frequency of word $w_i$ in the total dataset.\nI'm going to leave this up to you as an exercise. This is more of a programming challenge, than about deep learning specifically. But, being able to prepare your data for your network is an important skill to have. Check out my solution to see how I did it.\n\nExercise: Implement subsampling for the words in int_words. That is, go through int_words and discard each word given the probablility $P(w_i)$ shown above. Note that $P(w_i)$ is the probability that a word is discarded. Assign the subsampled data to train_words.",
"## Your code here\n# My solution was not using Counter thus was extremely slow. here it is:\n# def freq(word,corpus):\n# return corpus.count(word)/len(corpus)\n#\n# def prob(word,freqs,th):\n# return 1-np.sqrt(th/freqs[word])\n#\n# freqs = {word:freq(word,int_words) for word in int_words}\n# p_drop = {word: prob(word,freqs,th) for word in int_words}\n# train_words = {w for w in int_words if p_drop[w]>np.random.rand()}\n\n\nfrom collections import Counter\nimport random\n\nword_counts=Counter(int_words) # dictionary like with k:v=int_words:count\ntotal_count = len(int_words)\nfreqs={word: count/total_count for word,count in word_counts.items()}\np_drop={word: 1-np.sqrt(th/freqs[word]) for word in word_counts}\ntrain_words = [word for word in int_words if p_drop[word]<random.random()]\n",
"Making batches\nNow that our data is in good shape, we need to get it into the proper form to pass it into our network. With the skip-gram architecture, for each word in the text, we want to grab all the words in a window around that word, with size $C$. \nFrom Mikolov et al.: \n\"Since the more distant words are usually less related to the current word than those close to it, we give less weight to the distant words by sampling less from those words in our training examples... If we choose $C = 5$, for each training word we will select randomly a number $R$ in range $< 1; C >$, and then use $R$ words from history and $R$ words from the future of the current word as correct labels.\"\n\nExercise: Implement a function get_target that receives a list of words, an index, and a window size, then returns a list of words in the window around the index. Make sure to use the algorithm described above, where you choose a random number of words from the window.",
"def get_target(words, idx, window_size=5):\n ''' Get a list of words in a window around an index. '''\n \n # Your code here\n # get random number in the range (1,window_size) - this will be the number of words we'll take\n R=random.randint(1,window_size)\n # what about warping arond ? do we want to allow it ?\n start = max(idx-R,0)\n stop = min(idx+R+1,len(words))\n return words[start:idx]+words[idx+1:stop]\n\n# note that the reference solution used np.random.randint \n# note that the reference solution returned list(set(words[start:idx]+words[idx+1:stop])). not clear why the set() is needed...\n",
"Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.",
"def get_batches(words, batch_size, window_size=5):\n ''' Create a generator of word batches as a tuple (inputs, targets) '''\n \n n_batches = len(words)//batch_size\n \n # only full batches\n words = words[:n_batches*batch_size]\n \n for idx in range(0, len(words), batch_size):\n x, y = [], []\n batch = words[idx:idx+batch_size]\n for ii in range(len(batch)):\n batch_x = batch[ii]\n batch_y = get_target(batch, ii, window_size)\n y.extend(batch_y)\n x.extend([batch_x]*len(batch_y))\n yield x, y\n ",
"Building the graph\nFrom Chris McCormick's blog, we can see the general structure of our network.\n\nThe input words are passed in as integers. This will go into a hidden layer of linear units, then into a softmax layer. We'll use the softmax layer to make a prediction like normal.\nThe idea here is to train the hidden layer weight matrix to find efficient representations for our words. We can discard the softmax layer becuase we don't really care about making predictions with this network. We just want the embedding matrix so we can use it in other networks we build from the dataset.\nI'm going to have you build the graph in stages now. First off, creating the inputs and labels placeholders like normal.\n\nExercise: Assign inputs and labels using tf.placeholder. We're going to be passing in integers, so set the data types to tf.int32. The batches we're passing in will have varying sizes, so set the batch sizes to [None]. To make things work later, you'll need to set the second dimension of labels to None or 1.",
"train_graph = tf.Graph()\nwith train_graph.as_default():\n inputs = tf.placeholder(dtype=tf.int32,shape=[None], name='inputs')\n labels = tf.placeholder(dtype=tf.int32,shape=[None,None],name='labels') # ??? To make things work later, you'll need to set the second dimension of labels to None or 1.",
"Embedding\nThe embedding matrix has a size of the number of words by the number of units in the hidden layer. So, if you have 10,000 words and 300 hidden units, the matrix will have size $10,000 \\times 300$. Remember that we're using tokenized data for our inputs, usually as integers, where the number of tokens is the number of words in our vocabulary.\n\nExercise: Tensorflow provides a convenient function tf.nn.embedding_lookup that does this lookup for us. You pass in the embedding matrix and a tensor of integers, then it returns rows in the matrix corresponding to those integers. Below, set the number of embedding features you'll use (200 is a good start), create the embedding matrix variable, and use tf.nn.embedding_lookup to get the embedding tensors. For the embedding matrix, I suggest you initialize it with a uniform random numbers between -1 and 1 using tf.random_uniform.",
"n_vocab = len(int_to_vocab)\nn_embedding = 200\nwith train_graph.as_default():\n embedding = tf.Variable(tf.random_uniform((n_vocab,n_embedding),-1,1))\n embed = tf.nn.embedding_lookup(embedding,inputs)",
"Negative sampling\nFor every example we give the network, we train it using the output from the softmax layer. That means for each input, we're making very small changes to millions of weights even though we only have one true example. This makes training the network very inefficient. We can approximate the loss from the softmax layer by only updating a small subset of all the weights at once. We'll update the weights for the correct label, but only a small number of incorrect labels. This is called \"negative sampling\". Tensorflow has a convenient function to do this, tf.nn.sampled_softmax_loss.\n\nExercise: Below, create weights and biases for the softmax layer. Then, use tf.nn.sampled_softmax_loss to calculate the loss. Be sure to read the documentation to figure out how it works.",
"# Number of negative labels to sample\nn_sampled = 100\nwith train_graph.as_default():\n softmax_w = tf.Variable(tf.truncated_normal((n_vocab,n_embedding),stddev=0.1))\n softmax_b = tf.Variable(tf.zeros(n_vocab))\n\n\n # Calculate the loss using negative sampling\n loss = tf.nn.sampled_softmax_loss(softmax_w,softmax_b,labels,embed,n_sampled,n_vocab)\n\n cost = tf.reduce_mean(loss)\n optimizer = tf.train.AdamOptimizer().minimize(cost)",
"Validation\nThis code is from Thushan Ganegedara's implementation. Here we're going to choose a few common words and few uncommon words. Then, we'll print out the closest words to them. It's a nice way to check that our embedding table is grouping together words with similar semantic meanings.",
"with train_graph.as_default(): # Question : why do we need this context ? \n ## From Thushan Ganegedara's implementation\n valid_size = 16 # Random set of words to evaluate similarity on.\n valid_window = 100\n # pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent \n valid_examples = np.array(random.sample(range(valid_window), valid_size//2))\n valid_examples = np.append(valid_examples, \n random.sample(range(1000,1000+valid_window), valid_size//2))\n\n valid_dataset = tf.constant(valid_examples, dtype=tf.int32)\n \n # We use the cosine distance:\n norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))\n normalized_embedding = embedding / norm\n valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)\n similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))\n\n# If the checkpoints directory doesn't exist:\n!mkdir checkpoints",
"Training\nBelow is the code to train the network. Every 100 batches it reports the training loss. Every 1000 batches, it'll print out the validation words.",
"epochs = 10\nbatch_size = 1000\nwindow_size = 10\n\nwith train_graph.as_default():\n saver = tf.train.Saver()\n\nwith tf.Session(graph=train_graph) as sess:\n iteration = 1\n loss = 0\n sess.run(tf.global_variables_initializer())\n\n for e in range(1, epochs+1):\n batches = get_batches(train_words, batch_size, window_size)\n start = time.time()\n for x, y in batches:\n \n feed = {inputs: x,\n labels: np.array(y)[:, None]}\n train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)\n \n loss += train_loss\n \n if iteration % 100 == 0: \n end = time.time()\n print(\"Epoch {}/{}\".format(e, epochs),\n \"Iteration: {}\".format(iteration),\n \"Avg. Training loss: {:.4f}\".format(loss/100),\n \"{:.4f} sec/batch\".format((end-start)/100))\n loss = 0\n start = time.time()\n \n if iteration % 1000 == 0:\n ## From Thushan Ganegedara's implementation\n # note that this is expensive (~20% slowdown if computed every 500 steps)\n sim = similarity.eval()\n for i in range(valid_size):\n valid_word = int_to_vocab[valid_examples[i]]\n top_k = 8 # number of nearest neighbors\n nearest = (-sim[i, :]).argsort()[1:top_k+1]\n log = 'Nearest to %s:' % valid_word\n for k in range(top_k):\n close_word = int_to_vocab[nearest[k]]\n log = '%s %s,' % (log, close_word)\n print(log)\n \n iteration += 1\n save_path = saver.save(sess, \"checkpoints/text8.ckpt\")\n embed_mat = sess.run(normalized_embedding)",
"Restore the trained network if you need to:",
"with train_graph.as_default():\n saver = tf.train.Saver()\n\nwith tf.Session(graph=train_graph) as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n embed_mat = sess.run(embedding)",
"Visualizing the word vectors\nBelow we'll use T-SNE to visualize how our high-dimensional word vectors cluster together. T-SNE is used to project these vectors into two dimensions while preserving local stucture. Check out this post from Christopher Olah to learn more about T-SNE and other ways to visualize high-dimensional data.",
"%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport matplotlib.pyplot as plt\nfrom sklearn.manifold import TSNE\n\nviz_words = 500\ntsne = TSNE()\nembed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])\n\nfig, ax = plt.subplots(figsize=(14, 14))\nfor idx in range(viz_words):\n plt.scatter(*embed_tsne[idx, :], color='steelblue')\n plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
jacobdein/alpine-soundscapes
|
Compute naturalness areas.ipynb
|
mit
|
[
"Compute naturalness areas\nThis notebook computes the naturalness areas for each point in a 'sample_points' vector map based on a 'naturalness' vector map. The computed areas are exported to a csv file.\nThis notebook uses GRASS GIS (7.0.4), and must be run inside of a GRASS environment (start the jupyter notebook server from the GRASS command line).\nThe code in this notebook was modified from examples in \"How to write a Python GRASS GIS 7 addon\" developed for a FOSS4G Europe 2015 workshop. The code is contained in the python-grass-addon repository on GitHub.com.\nRequired packages\nnumpy <br />\npandas <br />\npyprind\nVariable definitions\nnaturalness – vector map representing naturalness <br />\nnaturalness_value_field – name of the field containing the naturalness values <br />\nsample_points – vector map containing observer points <br />\nradius – radius in map units around each sample point to calculate land cover areas within <br />\nuse_viewshed – consider only 'visible' land cover, based on a viewshed from each sample point <br />",
"naturalness = 'naturalness'\n\nnaturalness_value_field = 'value'\n\nn_types = 7\n\nsample_points = 'sample_points_field'\n\nradius = 500\n\nuse_viewshed = False",
"get viewshed suffix (for filenames)",
"def getViewshedSuffix():\n if use_viewshed:\n viewshed_suffix = '_viewshed'\n else:\n viewshed_suffix = ''\n return viewshed_suffix\n\nnaturalness_area_table_filename = \"/home/ubuntu/naturalness_areas_{0}m{1}.csv\".format(radius, getViewshedSuffix())",
"Import statements",
"import numpy as np\nimport pandas\nimport pyprind",
"GRASS import statements",
"import grass.script as gscript\n\nfrom grass.pygrass.vector.geometry import Point\nfrom grass.pygrass.vector import Vector\n\nfrom grass.pygrass.vector import VectorTopo\nfrom grass.pygrass.vector.table import DBlinks",
"Function declarations\nconnect to attribute table",
"def connectToAttributeTable(map):\n vector = VectorTopo(map)\n vector.open(mode='r')\n dblinks = DBlinks(vector.c_mapinfo)\n link = dblinks[0]\n return link.table()",
"extract point from vector map",
"def extractPoint(input, ID, output):\n\n where = 'ID = {0}'.format(ID)\n type = 'point'\n\n gscript.read_command('v.extract',\n input=input,\n where=where,\n output=output,\n type=type, \n overwrite=True)",
"create buffer around point",
"def bufferPoint(input, output, radius):\n gscript.read_command('v.buffer',\n input=input,\n output=output,\n type='point',\n distance=radius,\n overwrite=True)",
"convert viewshed from raster to vector map",
"def vectorizeViewshed(input, ID, output):\n type = 'area'\n column = 'visible'\n\n gscript.read_command('r.to.vect', \n input=input, \n output=output,\n type=type,\n column=column,\n overwrite=True)",
"overlay a vector map on an underlying vector map using 'and' selection operator",
"def overlay(overlay, underlay, output):\n operator='and'\n gscript.read_command('v.overlay',\n ainput=overlay,\n binput=underlay,\n operator=operator,\n output=output,\n overwrite=True)",
"add area column to vector map",
"def calculateAreas(map):\n \n #add new area column\n gscript.read_command('v.db.addcolumn',\n map=map,\n columns=\"area_square_meters DOUBLE PRECISION\")\n \n #compute area and insert into area column\n gscript.read_command('v.to.db',\n map=map,\n type='centroid',\n option='area',\n columns='area_square_meters',\n unit='meters')",
"create table showing total area by landcover type",
"def getNaturalnessAreaByValue(map):\n #get area data\n table = connectToAttributeTable(map=map)\n table.filters.select()\n columns = table.columns.names()\n cursor = table.execute()\n result = np.array(cursor.fetchall())\n cursor.close()\n data = pandas.DataFrame(result, columns=columns).set_index('cat')\n \n #make sure naturalness_value_field is a numeric data type\n data['b_' + naturalness_value_field] = pandas.to_numeric(data['b_' + naturalness_value_field])\n \n #create naturalness column\n data['naturalness'] = np.nan\n \n #define naturalness value categories (round values)\n for index, row in data.iterrows():\n naturalness = np.round(row['b_' + naturalness_value_field])\n data.set_value(index, 'naturalness', int(naturalness))\n \n #sum areas by naturalness value\n data['area_square_meters'] = pandas.to_numeric(data['area_square_meters'])\n areas = data[['naturalness', 'area_square_meters']].groupby(by='naturalness').sum()\n \n #calculate mean\n total_area = data['area_square_meters'].sum()\n percent_area = data['area_square_meters'] / total_area\n weighted_area = percent_area * data['b_' + naturalness_value_field]\n mean = weighted_area.sum()\n \n #add to areas dataframe\n areas = areas.set_value('mean', 'area_square_meters', mean)\n \n return areas",
"Utility functions\nexport vector map to a shapefile",
"def exportVectorToShapefile(map, output):\n gscript.read_command('v.out.ogr',\n input=map,\n format='ESRI_Shapefile',\n output=output,\n flags='e',\n overwrite=True)",
"get info about a vector map",
"def getVectorMapInfo(map):\n return gscript.read_command('v.info', map=map)",
"Compute landcover areas\nconnect to 'sample_points' attribute table",
"point_table = connectToAttributeTable(map=sample_points)\npoint_table.filters.select()\ncolumns = point_table.columns.names()\ncursor = point_table.execute()\nresult = np.array(cursor.fetchall())\ncursor.close()\npoint_data = pandas.DataFrame(result, columns=columns).set_index('cat')",
"loop through sample points",
"with Vector(sample_points, mode='r') as points:\n \n #setup progress bar\n progress_bar = pyprind.ProgBar(points.n_lines, bar_char='█', title='Naturalness analysis progress', monitor=True, stream=1, width=50)\n \n #iterate through points\n for point in points:\n \n #get point ID (SiteID)\n ID = point_data['ID'][point.cat-1]\n \n #update progress bar\n progress_bar.update(item_id=ID)\n \n #buffer current point\n extractPoint(input='sample_points_field', ID=ID, output='tmp_buffer_point')\n bufferPoint(input='tmp_buffer_point', output='tmp_point_buffer', radius=radius)\n \n #set buffer as overlay\n overlay_input = 'tmp_point_buffer'\n #consider only visible naturalness if 'use_viewshed' = True\n if use_viewshed:\n viewshed = 'vect_{0}_viewshed'.format(ID)\n visible_viewshed = 'vect_{0}_viewshed_{1}m'.format(ID, radius)\n #vectorize viewshed\n vectorizeViewshed(input='{0}_viewshed'.format(ID), ID=ID, output=viewshed)\n #overlay buffer on viewshed\n overlay(overlay=overlay_input,\n underlay=viewshed,\n output=visible_viewshed)\n #set overlay to the visible viewshed\n overlay_input = visible_viewshed\n overlay_output = 'vect_{0}_naturalness_{1}m{2}'.format(ID, radius, getViewshedSuffix())\n #overlay naturalness\n overlay(overlay=overlay_input,\n underlay=naturalness,\n output=overlay_output)\n \n #calculate naturalness area\n calculateAreas(map=overlay_output)",
"Export naturalness area table\ncreate table with naturalness areas by type for each point",
"#create table\nindex_start = 0\n''' set the first index number,\n allowing easier insertion into a database table that already contains \n area calculations with other parameters\n (i.e radius and use_viewshed)'''\ncolumns = ['ID', 'SiteID', 'IncludedArea']\ncolumns = columns + [ str(n) for n in range(1,n_types+1) ] + ['mean']\narea_table = pandas.DataFrame(columns=columns)\n\n#set naming variables\nincluded_area = '{0}m{1}'.format(radius, getViewshedSuffix()) #0=radius, 1=viewshed_suffix\nmap_pattern = 'vect_{0}_naturalness_{1}m{2}' #0=ID, 1=radius, 2=viewshed_suffix\n\n#iterate through points\nfor index, point in point_data.iterrows():\n ID = point['ID']\n map = map_pattern.format(ID, radius, getViewshedSuffix())\n #initiate row\n row = {'ID':\"{0:.3g}\".format(int(index) + index_start),\n 'SiteID': str(ID),\n 'IncludedArea': included_area}\n #get naturalness areas\n areas = getNaturalnessAreaByValue(map)\n \n #iterate through area types\n for index, area in areas.iterrows():\n #add area to row\n try:\n row[\"{0:.3g}\".format(int(index))] = area['area_square_meters']\n except ValueError:\n row[index] = area['area_square_meters']\n #append row to table\n area_table = area_table.append(row, ignore_index=True)\n\narea_table.set_index('ID', inplace=True)\n\n#export table to file\narea_table.to_csv(naturalness_area_table_filename, header=False)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
zzsza/Datascience_School
|
19. 문서 전처리/02. NLTK 자연어 처리 패키지 소개.ipynb
|
mit
|
[
"NLTK 자연어 처리 패키지 소개\nNLTK(Natural Language Toolkit) 패키지는 교육용으로 개발된 자연어 처리 및 문서 분석용 파이썬 패키지다. 다양한 기능 및 예제를 가지고 있으며 실무 및 연구에서도 많이 사용된다.\nNLTK 패키지가 제공하는 주요 기능은 다음과 같다.\n\n샘플 corpus 및 사전\n토큰 생성(tokenizing)\n형태소 분석(stemming/lemmatizing)\n품사 태깅(part-of-speech tagging)\n구문 분석(syntax parsing)\n\n샘플 corpus\ncorpus는 분석 작업을 위한 샘플 문서 집합을 말한다. 단순히 소설, 신문 등의 문서를 모아놓은 것도 있지만 대부분 품사. 형태소, 등의 보조적 의미를 추가하고 쉬운 분석을 위해 구조적인 형태로 정리해 놓은 것이 많다.\nNLTK 패키지의 corpus 서브패키지에서는 다음과 같은 다양한 연구용 corpus를 제공한다. 이 목록은 전체 corpus의 일부일 뿐이다.\n\naveraged_perceptron_tagger Averaged Perceptron Tagger\nbook_grammars: Grammars from NLTK Book\nbrown: Brown Corpus\nchat80: Chat-80 Data Files\ncity_database: City Database\ncomparative_sentences Comparative Sentence Dataset\ndependency_treebank. Dependency Parsed Treebank\ngutenberg: Project Gutenberg Selections\nhmm_treebank_pos_tagger Treebank Part of Speech Tagger (HMM)\ninaugural: C-Span Inaugural Address Corpus\nlarge_grammars: Large context-free and feature-based grammars for parser comparison\nmac_morpho: MAC-MORPHO: Brazilian Portuguese news text with part-of-speech tags\nmasc_tagged: MASC Tagged Corpus\nmaxent_ne_chunker: ACE Named Entity Chunker (Maximum entropy)\nmaxent_treebank_pos_tagger Treebank Part of Speech Tagger (Maximum entropy)\nmovie_reviews: Sentiment Polarity Dataset Version 2.0\nnames: Names Corpus, Version 1.3 (1994-03-29)\nnps_chat: NPS Chat\nomw: Open Multilingual Wordnet\nopinion_lexicon: Opinion Lexicon\npros_cons: Pros and Cons\nptb: Penn Treebank\npunkt: Punkt Tokenizer Models\nreuters: The Reuters-21578 benchmark corpus, ApteMod version\nsample_grammars: Sample Grammars\nsentence_polarity: Sentence Polarity Dataset v1.0\nsentiwordnet: SentiWordNet\nsnowball_data: Snowball Data\nstopwords: Stopwords Corpus\nsubjectivity: Subjectivity Dataset v1.0\ntagsets: Help on Tagsets\ntreebank: Penn Treebank Sample\ntwitter_samples: Twitter Samples\nunicode_samples: Unicode Samples\nuniversal_tagset: Mappings to the Universal Part-of-Speech Tagset\nuniversal_treebanks_v20 Universal Treebanks Version 2.0\nverbnet: VerbNet Lexicon, Version 2.1\nwebtext: Web Text Corpus\nword2vec_sample: Word2Vec Sample\nwordnet: WordNet\nwords: Word Lists\n\n이러한 corpus 자료는 설치시에 제공되는 것이 아니라 download 명령으로 사용자가 다운로드 받아야 한다.",
"nltk.download('averaged_perceptron_tagger')\nnltk.download(\"gutenberg\")\nnltk.download('punkt')\nnltk.download('reuters')\nnltk.download(\"stopwords\")\nnltk.download(\"taggers\")\nnltk.download(\"webtext\")\nnltk.download(\"wordnet\")\n\nnltk.corpus.gutenberg.fileids()\n\nemma_raw = nltk.corpus.gutenberg.raw(\"austen-emma.txt\")\nprint(emma_raw[:1302])",
"토큰 생성(tokenizing)\n문서를 분석하기 위해서는 우선 긴 문자열을 분석을 위한 작은 단위로 나누어야 한다. 이 문자열 단위를 토큰(token)이라고 한다.",
"from nltk.tokenize import word_tokenize\nword_tokenize(emma_raw[50:100])\n\nfrom nltk.tokenize import RegexpTokenizer\nt = RegexpTokenizer(\"[\\w]+\")\nt.tokenize(emma_raw[50:100])\n\nfrom nltk.tokenize import sent_tokenize\nprint(sent_tokenize(emma_raw[:1000])[3])",
"형태소 분석\n형태소 분석이란 어근, 접두사/접미사, 품사(POS, part-of-speech) 등 다양한 언어적 속성의 구조를 파악하는 작업이다. 구체적으로는 다음과 같은 작업으로 나뉜다.\n\nstemming (어근 추출)\nlemmatizing (원형 복원)\nPOS tagging (품사 태깅)\n\n### Stemming and lemmatizing",
"from nltk.stem import PorterStemmer\nst = PorterStemmer()\nst.stem(\"eating\")\n\nfrom nltk.stem import LancasterStemmer\nst = LancasterStemmer()\nst.stem(\"shopping\")\n\nfrom nltk.stem import RegexpStemmer\nst = RegexpStemmer(\"ing\")\nst.stem(\"cooking\")\n\nfrom nltk.stem import WordNetLemmatizer\nlm = WordNetLemmatizer()\nprint(lm.lemmatize(\"cooking\"))\nprint(lm.lemmatize(\"cooking\", pos=\"v\"))\nprint(lm.lemmatize(\"cookbooks\"))\n\nprint(WordNetLemmatizer().lemmatize(\"believes\"))\nprint(LancasterStemmer().stem(\"believes\"))",
"POS tagging\nPOS(part-of-speech)는 품사를 말한다. \n\nPart-of-Speech Tagset\nhttps://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.htm\nhttp://www.ibm.com/support/knowledgecenter/ko/SS5RWK_3.5.0/com.ibm.discovery.es.ta.doc/iiysspostagset.htm",
"from nltk.tag import pos_tag\ntagged_list = pos_tag(word_tokenize(emma_raw[:100]))\ntagged_list\n\nfrom nltk.tag import untag\nuntag(tagged_list)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
zhangmianhongni/MyPractice
|
Python/notebook/一个SVM RBF分类调参的例子.ipynb
|
apache-2.0
|
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets, svm\nfrom sklearn.svm import SVC\nfrom sklearn.datasets import make_moons, make_circles, make_classification\n%matplotlib inline",
"我们生成一些随机数据来让我们后面去分类,为了数据难一点,我们加入了一些噪音。生成数据的同时把数据归一化",
"X, y = make_circles(noise=0.2, factor=0.5, random_state=1);\nfrom sklearn.preprocessing import StandardScaler\nX = StandardScaler().fit_transform(X)",
"我们先看看我的数据是什么样子的,这里做一次可视化如下:",
"from matplotlib.colors import ListedColormap\ncm = plt.cm.RdBu\ncm_bright = ListedColormap(['#FF0000', '#0000FF'])\nax = plt.subplot()\n\nax.set_title(\"Input data\")\n# Plot the training points\nax.scatter(X[:, 0], X[:, 1], c=y, cmap=cm_bright)\nax.set_xticks(())\nax.set_yticks(())\nplt.tight_layout()\nplt.show()",
"好了,现在我们要对这个数据集进行SVM RBF分类了,分类时我们使用了网格搜索,在C=(0.1,1,10)和gamma=(1, 0.1, 0.01)形成的9种情况中选择最好的超参数,我们用了4折交叉验证。这里只是一个例子,实际运用中,你可能需要更多的参数组合来进行调参。",
"from sklearn.model_selection import GridSearchCV\ngrid = GridSearchCV(SVC(), param_grid={\"C\":[0.1, 1, 10], \"gamma\": [1, 0.1, 0.01]}, cv=4)\ngrid.fit(X, y)\nprint(\"The best parameters are %s with a score of %0.2f\"\n % (grid.best_params_, grid.best_score_))",
"也就是说,通过网格搜索,在我们给定的9组超参数中,C=10, Gamma=0.1 分数最高,这就是我们最终的参数候选。\n到这里,我们的调参举例就结束了。不过我们可以看看我们的普通的SVM分类后的可视化。这里我们把这9种组合各个训练后,通过对网格里的点预测来标色,观察分类的效果图。代码如下:",
"x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\ny_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\nxx, yy = np.meshgrid(np.arange(x_min, x_max,0.02),\n np.arange(y_min, y_max, 0.02))\n\nfor i, C in enumerate((0.1, 1, 10)):\n for j, gamma in enumerate((1, 0.1, 0.01)):\n plt.subplot() \n clf = SVC(C=C, gamma=gamma)\n clf.fit(X,y)\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8)\n\n # Plot also the training points\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.coolwarm)\n\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())\n plt.xticks(())\n plt.yticks(())\n plt.xlabel(\" gamma=\" + str(gamma) + \" C=\" + str(C))\n plt.show()"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
mikekestemont/lot2016
|
Chapter 9 - Text analysis.ipynb
|
mit
|
[
"Chapter 9: What we have covered so far (and a bit more)\n\nIn this chapter, we will work our way through a concise review of the Python functionality we have covered so far. Throughout this chapter, we will work with a interesting, yet not too large dataset, namely the well-known Arabian nights. Alf Laylah Wa Laylah, the Stories of One Thousand and One Nights is a collection of folk tales, collected over many centuries by various authors, translators, and scholars across West, Central and South Asia and North Africa. It forms a huge narrative wheel with an overarching plot, created by the frame story of Shahrazad.\n\nThe stories begin with the tale of king Shahryar and his brother, who, having both been deceived by their respective Sultanas, leave their kingdom, only to return when they have found someone who — in their view — was wronged even more. On their journey the two brothers encounter a huge jinn who carries a glass box containing a beautiful young woman. The two brothers hide as quickly as they can in a tree. The jinn lays his head on the girl’s lap and as soon as he is asleep, the girl demands the two kings to make love to her or else she will wake her ‘husband’. They reluctantly give in and the brothers soon discover that the girl has already betrayed the jinn ninety-eight times before. This exemplar of lust and treachery strengthens the Sultan’s opinion that all women are wicked and not to be trusted. \nWhen king Shahryar returns home, his wrath against women has grown to an unprecedented level. To temper his anger, each night the king sleeps with a virgin only to execute her the next morning. In order to make an end to this cruelty and save womanhood from a \"virgin scarcity\", Sharazad offers herself as the next king’s bride. On the first night, Sharazad begins to tell the king a story, but she does not end it. The king’s curiosity to know how the story ends, prevents him from executing Shahrazad. The next night Shahrazad finishes her story, and begins a new one. The king, eager to know the ending of this tale as well, postpones her execution once more. Using this strategy for One Thousand and One Nights in a labyrinth of stories-within-stories-within-stories, Shahrazad attempts to gradually move the king’s cynical stance against women towards a politics of love and justice (see Marina Warner’s Stranger Magic (2013) in case you're interested).\n\nThe first European version of the Nights was translated into French by Antoine Galland. Many translations (in different languages) followed, such as the (heavily criticized) English translation by Sir Richard Francis Burton entitled The Book of the Thousand and a Night (1885). This version is freely available from the Gutenberg project (see here), and will be the one we will explore here.\nFiles and directories\nIn the notebooks we use, there is a convenient way to quickly inspect the contents of a folder using the ls command. Our Arabian nights are contained under the general data folder:",
"ls data/arabian_nights",
"As you can see, this folder holds a number of plain text files, ending in the .txt extension. Let us open a random file:",
"f = open('data/arabian_nights/848.txt', 'r')\ntext = f.read()\nf.close()\nprint(text[:500])",
"Here, we use the open() function to create a file object f, which we can use to access the actual text content of the file. Make sure that you do not pass the 'w' parameter (\"write\") to open(), instead of 'r' (\"read\"), since this would overwrite and thus erase the existing file. After assigning the string returned by f.read() to the variable text, we print the 500 first characters of text to get an impression of what it contains, using simple string indexing ([:500]). Don't forget to close the file again after you have opened or strange things could happen to your file! One little trick which is commonly used to avoid having to explicitly open and close your file is a with block (mind the indentation):",
"with open('data/arabian_nights/848.txt', 'r') as f:\n text = f.read()\nprint(text[:500])",
"This code block does exactly the same thing as the previous one but saves you some typing. In this chapter we would like to work with all the files in the arabian_nights directory. This is where loops come in handy of course, since what we really would like to do, is iterate over the contents of the directory. Accessing these contents in Python is easy, but requires importing some extra functionality. In this case, we need to import the os module, which contains all functionality related to the 'operating system' of your machine, such as directory information:",
"import os",
"Using the dot-syntax (os.xxx), we can now access all functions that come with this module, such as listdir(), which returns a list of the items which are included under a given directory",
"filenames = os.listdir('data/arabian_nights')\nprint(len(filenames))\nprint(filenames[:20])",
"The function os.listdir() returns a list of strings, representing the filenames contained under a directory.\nQuiz\n\nIn Burton's translation some of the 1001 nights are missing. How many?\nCan you come up with a clever way to find out which nights are missing? Hint: a counting loop and some string casting might be useful here!",
"# your code goes here",
"With os.listdir(), you need to make sure that you pass the correct path to an existing directory:",
"os.listdir('data/belgian_nights')",
"It might therefore be convenient to check whether a directory actually exists in a given location:",
"print(os.path.isdir('data/arabian_nights'))\nprint(os.path.isdir('data/belgian_nights'))",
"The second directory, naturally, does not exist and isdir() evaluates to False in this case. Creating a new (and thus empty) directory is also easy using os:",
"os.mkdir('belgian_nights')",
"We can see that it lives in the present working directory now, by typing ls again:",
"ls",
"Or we use Python:",
"print(os.path.isdir('belgian_nights'))",
"Removing directories is also easy, but PLEASE watch out, sometimes it is too easy: if you remove a wrong directory in Python, it will be gone forever... Unlike other applications, Python does not keep a copy of it in your Trash and it does not have a Ctrl-Z button. Please watch out with what you do, since with great power comes great responsiblity! Removing the entire directory which we just created can be done as follows:",
"import shutil\nshutil.rmtree('belgian_nights')",
"And lo behold: the directory has disappeared again:",
"print(os.path.isdir('belgian_nights'))",
"Here, we use the rmtree() command to remove the entire directory in a recursive way: even if the directory isn't empty and contains files and subfolders, we will remove all of them. The os module also comes with a rmdir() but this will not allow you to remove a directory which is not empty, as becomes clear in the OSError raised below:",
"os.rmdir('data/arabian_nights')",
"The folder contains things and therefore cannot be removed using this function. There are, of course, also ways to remove individual files or check whether they exist:",
"os.mkdir('belgian_nights')\nf = open('belgian_nights/1001.txt', 'w')\nf.write('Content')\nf.close()\nprint(os.path.exists('belgian_nights/1001.txt'))\nos.remove('belgian_nights/1001.txt')\nprint(os.path.exists('belgian_nights/1001.txt'))",
"Here, we created a directory, wrote a new file to it (1001.txt), and removed it again. Using os.path.exists() we monitored at which point the file existed. Finally, the shutil module also ships with a useful copyfile() function which allows you to copy files from one location to another, possibly with another name. To copy night 66 to the present directory, for instance, we could do:",
"shutil.copyfile('data/arabian_nights/66.txt', 'new_66.txt')",
"Indeed, we have added an exact copy of night 66 to our present working directory:",
"ls",
"We can safely remove it again:",
"os.remove('new_66.txt')",
"Paths\nThe paths we have used so far are 'relative' paths, in the sense that they are relative to the place on our machine from which we execute our Python code. Absolute paths can also be retrieved and will differ on each computer, because they typically include user names etc:",
"os.path.abspath('data/arabian_nights/848.txt')",
"While absolute paths are longer to type, they have the advantage that they can be used anywhere on your computer (i.e. irrespective of where you run your code from). Paths can be tricky. Suppose that we would like to open one of our filenames:",
"filenames = os.listdir('data/arabian_nights')\nrandom_filename = filenames[9]\nwith open(random_filename, 'r') as f:\n text = f.read()\nprint(text[:500])",
"Python throws a FileNotFoundError, complaining that the file we wish to open does not exist. This situation stems from the fact that os.listdir() only returns the base name of a given file, and not an entire (absolute or relative) path to it. To properly access the file, we must therefore not forget to include the rest of the path again:",
"filenames = os.listdir('data/arabian_nights')\nrandom_filename = filenames[9]\nwith open('data/arabian_nights/'+ random_filename, 'r') as f:\n text = f.read()\nprint(text[:500])",
"Apart from os.listdir() there are a number of other common ways to obtain directory listings in Python. Using the glob module for instance, we can easily access the full relative path leading to our Arabian Nights:",
"import glob\nfilenames = glob.glob('data/arabian_nights/*')\nprint(filenames[:10])",
"The asterisk (*) in the argument passed to glob.glob() is worth noting here. Just like with regular expressions, this asterisk is a sort of wildcard which will match any series of characters (i.e. the filenames under arabian_nights). When we exploit this wildcard syntax, glob.glob() offers another distinct advantage: we can use it to easily filter out filenames which we are not interested in:",
"filenames = glob.glob('data/arabian_nights/*.txt')\nprint(filenames[:10])",
"Interestingly, the command in this code block will only load filenames that end in \".txt\". This is interesting when we would like to ignore other sorts of junk files etc. that might be present in a directory. To replicate similar behaviour with os.listdir(), we would have needed a typical for-loop, such as:",
"filenames = []\nfor fn in os.listdir('data/arabian_nights'):\n if fn.endswith('.txt'):\n filenames.append(fn)\nprint(filenames[:10])",
"Or for you stylish coders out there, you can show off with a list comprehension:",
"filenames = [fn for fn in os.listdir('data/arabian_nights') if fn.endswith('.txt')]",
"However, when using glob.glob(), you might sometimes want to be able to extract a file's base name again. There are several solutions to this:",
"filenames = glob.glob('data/arabian_nights/*.txt')\nfn = filenames[10]\n\n# simple string splitting:\nprint(fn.split('/')[-1])\n\n# using os.sep:\nprint(fn.split(os.sep)[-1])\n\n# using os.path:\nprint(os.path.basename(fn))",
"Both os.sep and os.path.basename have the advantage that they know what separator is used for paths in the operating system, so you don't need to explicitly code it like in the first solution. Separators differ between Windows (backslash) and Mac/Linux (forward slash).\nFinally, sometimes, you might be interested in all the subdirectories of a particular directory (and all the subdirectories of these subdirectories etc.). Parsing such deep directory structures can be tricky, especially if you do not know how deep a directory tree might run. You could of course try stacking multiple loops using os.listdir(), but a more convenient way is os.walk():",
"for root, directory, filename in os.walk(\"data\"):\n print(filename)",
"As you can see, os.walk() allows you to efficiently loop over the entire tree. As always, don't forget that help is right around the corner in your notebooks. Using help(), you can quickly access the documentation of modules and their functions etc. (but only after you have imported the modules first!).",
"help(os.walk)",
"Quiz\nIn the next part of this chapter, we will need a way to sort our stories from the first, to the very last night. For our own convenience we will use a little hack for this. In this quiz, we would like you to create a new folder under data directory, called '1001'. You should copy all the original files from arabian_nights to this new folder, but give the files a new name, prepending zeros to filename until all nights have four digits in their name. 1001.txt stays 1001.txt, for instance, but 66.txt becomes 0066.txt and 2.txt becomes 0002.txt etc. This will make sorting the nights easier below. For this quiz you could for instance use a for loop in combination with a while loop (but don't get stuck in endless loops...)",
"# your quiz code",
"Parsing files\nUsing the code from the previous quiz, it is now trivial to sort our nights sequentially on the basis of their actual name (i.e. a string variable):",
"for fn in sorted(os.listdir('data/1001')):\n print(fn)",
"Using the old filenames, this was not possible directly, because of the way Python sorts strings of unequal lengths. Note that the number in the filenames are represented as strings, which are completely different from real numeric integers, and thus will be sorted differently:",
"for fn in sorted(os.listdir('data/arabian_nights/')):\n print(fn)",
"Note: There is a more elegant, but also slightly less trivial way to achieve the correct order in this case:",
"for fn in sorted(os.listdir('data/arabian_nights/'),\n key=lambda nb: int(nb[:-4])):\n print(fn)",
"Should you be interested: here, we pass a key argument to sort, which specifies which operations should be applied to the filenames before actually sorting them. Here, we specify a so-called lambda function to key, which is less intuitive to read, but which allow you to specify a sort of 'mini-function' in a very condensed way: this lambda function chops off the last four characters from each filename and then converts (or 'casts') the results to a new data type using int(), namely an integer (a 'whole' number, as opposed to floating point numbers). Eventually, this leads to the same order. \n\nMore functions\nSo far, we have been using pre-existing, ready-made functions from Python's standard library, or the standard set of functionality which comes with the programming language. Importantly, there are two additional ways of using functions on your code, which we will cover below: (i) you can write your own functions, and (ii) you can use functions from other, external libraries, which have been developped by so-called 'third parties'. Below, we will for instance use plotting functions from matplotlib, which is a common visualization library for Python. \nAt this point, we have an efficient way of looping over the Arabian Nights sequentially. What we still lack, are functions to load and clean our data. As you could see above, our files still contain a lot of punctuation marks etc., which are perhaps less interesting from the point of view of textual analysis. Let us write a simple function that takes a string as input, and returns a cleaner version of it, where all characters are lowercased, and only alphabetic characters are kept:",
"import re\ndef preprocess(in_str):\n out_str = ''\n for c in in_str.lower():\n if c.isalpha() or c.isspace():\n out_str += c\n whitespace = re.compile(r'\\s+')\n out_str = whitespace.sub(' ', out_str)\n return out_str",
"This code reviews some of the materials from previous chapters, including the use of a regular expression, which converts all consecutive instances of whitespace (including line breaks, for instance) to a single space. After executing the previous code block, we can now test our function:",
"old_str = 'This; is -- a very DIRTY string!'\nnew_str = preprocess(old_str)\nprint(new_str)",
"We can now apply this function to the contents from a random night:",
"with open('data/1001/0007.txt', 'r') as f:\n in_str = f.read()\nprint(preprocess(in_str))",
"This text looks cleaner already! We can now start to extract individual tokens from the text and count them. This process is called tokenization. Here, we make the naive assumption that words are simply space-free alphabetic strings -- which is of course wrong in the case of English words like \"can't\". Note that for many languages there exist better tokenizers in Python (such as the ones in the Natural Language Toolkit (nltk). We suffice with a simpler approach for now:",
"def tokenize(in_str):\n tokens = in_str.split()\n tokens = [t for t in tokens if t]\n return tokens ",
"Using the list comprehension, we make sure that we do not accidentally return empty strings as a token, for instance, at the beginning of a text which starts with a newline. Remember that anything in Python with a length of 0, will evaluate to False, which explains the if t in the comprehension: empty strings will fail this condition. We can start stacking our functions now:",
"with open('data/1001/0007.txt', 'r') as f:\n in_str = f.read()\ntokens = tokenize(preprocess(in_str))\nprint(tokens[:10])",
"We can now start analyzing our nights. A good start would be to check the length of each night in words:",
"print(len(tokens))",
"Quiz\nIterate over all the nights in 1001 in a sorted way. Open, preprocess and tokenize each text. Store in a list called word_counts how many words each story has.",
"# your quiz code",
"We now have a list of numbers, which we can plot over time. We will cover plotting more extensively in one of the next chapters. The things below are just a teaser. Start by importing matplotlib, which is imported as follows by convention:",
"import matplotlib.pyplot as plt\n%matplotlib inline",
"The second line is needed to make sure that the plots will properly show up in our notebook. Let us start with a simple visualization:",
"plt.plot(word_counts)",
"As you can see, this simple command can be used to quickly obtain a visualization that shows interesting trends. On the y-axis, we plot absolute word counts for each of our nights. The x-axis is figured out automatically by matplotlib and adds an index on the horizontal x-axis. Implicitly, it interprets our command as follows:",
"plt.plot(range(0, len(word_counts)), word_counts)",
"When plt.plot receives two flat lists as arguments, it plots the first along the x-axis, and the second along the y-axis. If it only receives one list, it plots it along the y-axis and uses the range we now (redundantly) specified here for the x-axis. This is in fact a subtoptimal plot, since the index of the first data point we plot is zero, although the name of the first night is '1.txt'. Additionally, we know that there are some nights missing in our data. To set this straight, we could pass in our own x-coordinates as follows:",
"filenames = sorted(os.listdir('data/1001'))\nidxs = [int(i[:-4]) for i in filenames]\nprint(idxs[:20])\nprint(min(idxs))\nprint(max(idxs))",
"We can now make our plot more truthful, and add some bells and whistles:",
"plt.plot(idxs, word_counts, color='r')\nplt.xlabel('Word length')\nplt.ylabel('# words (absolute counts)')\nplt.title('The Arabian Nights')\nplt.xlim(1, 1001)",
"Quiz\n\nUsing axvline() you can add vertical lines to a plot, for instance at position:",
"plt.plot(idxs, word_counts, color='r')\nplt.xlabel('Word length')\nplt.ylabel('# words (absolute counts)')\nplt.title(r'The Arabian Nights')\nplt.xlim(1, 1001)\nplt.axvline(500, color='g')",
"Write code that plots the position of the missing nights using this function (and blue lines).",
"# quiz code goes here",
"Right now, we are visualizing texts, but we might also be interested in the vocabulary used in the story collection. Counting how often a word appears in a text is trivial for you right now with custom code, for instance:",
"cnts = {}\nfor word in tokens:\n if word in cnts:\n cnts[word] += 1\n else:\n cnts[word] = 1\nprint(cnts)",
"One interesting item which you can use for counting in Python is the Counter object, which we can import as follows:",
"from collections import Counter",
"This Counter makes it much easier to write code for counting. Below you can see how this counter automatically creates a dictionary-like structure:",
"cnt = Counter(tokens)\nprint(cnt)",
"If we would like to find which items are most frequent for instance, we could simply do:",
"print(cnt.most_common(25))",
"We can also pass the Counter the tokens to count in multiple stages:",
"cnt = Counter()\ncnt.update(tokens)\ncnt.update(tokens)\nprint(cnt.most_common(25))",
"After passing our tokens twice to the counter, we see that the numbers double in size.\nQuiz\nWrite code that makes a word frequency counter named vocab, which counts the cumulative frequencies of all words in the Arabian Nights. Which are the 15 most frequent words? Does that make sense?",
"# quiz code",
"Let us now finally visualize the frequencies of the 15 most frequent items using a standard barplot in matplotlib. This can be achieved as follows. We first split out the names and frequencies, since .mostcommon(n) returns a list of tuples, and we create indices:",
"freqs = [f for _, f in vocab.most_common(15)]\nwords = [w for w, _ in vocab.most_common(15)] # note the use of underscores for 'throwaway' variables\nidxs = range(1, len(freqs)+1)",
"Next, we simply do:",
"plt.barh(idxs, freqs, align='center')\nplt.yticks(idxs, words)\nplt.xlabel('Words')\nplt.ylabel('Cumulative absolute frequencies')",
"Et voilà!\nClosing Assignment\nIn this larger assignment, you will have to perform some basic text processing on the larger set of XML-encoded files under data/TEI/french_plays. For this assignment, there are several subtasks:\n1. Each of these files represent a play written by a particular author (see the <author> element): count how many texts were written by each author in the entire corpus. Make use of a Counter.\n2. Each play has a cast list (<castList>), with a role-element for every character in it. In this element, the civil-attribute encodes the gender of the character (M/F, or another charatcer ). Create for each individual author a barplot using matplotlib, showing the percentage of male, female and 'other' characters as a percentage. Pick beautiful colors.\n3. Difficult: The information contained in the castList is priceless, because it allows us to determine for each word in the play by whom it is uttered, since the <sp> tag encodes which character in the cast list is speaking at a particular time. Parse play 156.xml (L'Amour à la mode) and calculate which of the characters has the highest vocabulary richness: divide the number of unique words in the speaker's utterances by the total number of words (s)he utters. Only consider speakers that utter at least 1000 tokens in the play.\nHint: If your run into encoding errors etc. when processing larger text collections, you can always use try/except constructions to catch these.\n\nIgnore the following, it's just here to make the page pretty:",
"from IPython.core.display import HTML\ndef css_styling():\n styles = open(\"styles/custom.css\", \"r\").read()\n return HTML(styles)\ncss_styling()",
"<p><small><a rel=\"license\" href=\"http://creativecommons.org/licenses/by-sa/4.0/\"><img alt=\"Creative Commons License\" style=\"border-width:0\" src=\"https://i.creativecommons.org/l/by-sa/4.0/88x31.png\" /></a><br /><span xmlns:dct=\"http://purl.org/dc/terms/\" property=\"dct:title\">Python Programming for the Humanities</span> by <a xmlns:cc=\"http://creativecommons.org/ns#\" href=\"http://fbkarsdorp.github.io/python-course\" property=\"cc:attributionName\" rel=\"cc:attributionURL\">http://fbkarsdorp.github.io/python-course</a> is licensed under a <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-sa/4.0/\">Creative Commons Attribution-ShareAlike 4.0 International License</a>. Based on a work at <a xmlns:dct=\"http://purl.org/dc/terms/\" href=\"https://github.com/fbkarsdorp/python-course\" rel=\"dct:source\">https://github.com/fbkarsdorp/python-course</a>.</small></p>"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
linwoodc3/gdeltPyR
|
examples/Basic gdeltPyR Query.ipynb
|
gpl-3.0
|
[
"Basic gdeltPyR Usage\ngdeltPyR retrieves Global Database of Events, Language, and Tone (GDELT) data (version 1.0 or version 2.0) via parallel HTTP GET requests and is an alternative to accessing GDELT data via Google BigQuery . \nPerformance will vary based on the number of available cores (i.e. CPUs), internet connection speed, and available RAM. For systems with limited RAM, Later iterations of gdeltPyR will include an option to store the output directly to disc. \nMemory Considerations\nTake your systems specifications into consideration when running large or complex queries. While gdeltPyR loads each temporary file long enough only to convert it into a pandas dataframe (15 minutes each for 2.0, full day for 1.0 events tables), GDELT data can be especially large and exhaust a computers RAM. For example, Global Knowledge Graph (gkg) table queries can eat up large amounts of RAM when pulling data for only a few days. Before trying month long queries, try single day queries or create a pipeline that pulls several days worth of data, writes to discs, flushes globals, and continues to pull more data. \nRecommended RAM\nIt's best to use a system with at least 8 GB of RAM.\nInstallation\nbash\npip install gdeltPyR\nYou can also install directly from www.github.com\nbash\npip install git+https://github.com/linwoodc3/gdeltPyR\nBasic Usage\ngdeltPyR queries revolve around 4 concepts:\n| Name | Description | Input Possibilities/Examples |\n|----------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------|\n| version | (integer) - Selects the version of GDELT data to query; defaults to version 2. | 1 or 2 |\n| date * | (string or list of strings) - Dates to query | \"2016 10 23\" or \"2016 Oct 23\" |\n| coverage | (bool) - For GDELT 2.0, pulls every 15 minute interval in the dates passed in the 'date' parameter. Default coverage is False or None. gdeltPyR will pull the latest 15 minute interval for the current day or the last 15 minute interval for a historic day. | True or False or None |\n| tables * | (string) - The specific GDELT table to pull. The default table is the 'events' table. See the GDELT documentation page for more information | 'events' or 'mentions' or 'gkg' |\nWith these basic concepts, you can run any number of GDELT queries.",
"##############################\n# Import the package\n##############################\nimport gdelt\n\n###############################\n# Instantiate the gdelt object\n##############################\n\ngd = gdelt.gdelt(version=2)",
"To launch your query, pass in your dates. When passing multiple dates, pass as a list of strings. We will time the multi-day query. \nImportant Date Details for GDELT 1.0 and 2.0\nFor GDELT 2.0, every 15 minute interval is a zipped CSV file, and gdeltPyR makes concurrent HTTP GET requests to each file. When the coverage parameter is set to True, each full day of data has 96 15 minute interval files to pull. If you are pulling the current day and coverage is set to True, gdeltPyR all the intervals leading up to the latest 15 minute interval. When coverage is False, the package pulls the last 15 minute interval when querying a historical date and the latest 15 minute interval when querying the current date. Additinally, GDELT 2.0 data only goes back as far as Feb 2015. The additional features of GDELT 2.0 are discussed here. \nGDELT 1.0 releases the previous day's query at 6AM EST of the next day (if today's current date is 23 Oct, the 22 Oct results would be available at 6AM Eastern on 23 Oct).\nThe Query\nTo launch your query, just pass in dates. When passing multiple dates, pass as a list of strings. First, some information on my OS.",
"import platform\nimport multiprocessing\n\nprint (platform.platform())\n\nprint (multiprocessing.cpu_count())",
"And now the query.",
"%time results = gd.Search(['2016 10 19','2016 10 22'],table='events',coverage=True)",
"Let's get an idea for the number of results we returned.",
"results.info()",
"In ~36 seconds, gdeltPyR returned nearly a 900,000 by 61 (rows x columns) Pandas dataframe that only consumes 407.2 MBs of memory. With the data in a tidy format, GDELT data can be analyzed with any number of pandas data analysis pipelines and techniques."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
root-mirror/training
|
SummerStudentCourse/2019/Exercises/WorkingWithFiles/WritingOnFiles_Solution.ipynb
|
gpl-2.0
|
[
"<span style=\"color:red\">\nWait before looking at the solution! Try harder if you can!\n</span>\nWriting on files\nThis is a Python notebook in which you will practice the concepts learned during the lectures.\nWriting histograms\nCreate a TFile containing three histograms filled with random numbers distributed according to a Gaus, an exponential and a uniform distribution.\nClose the file: you will reopen it later.",
"import ROOT\n\nrndm = ROOT.TRandom3(1)\n\nf = ROOT.TFile(\"histos.root\",\"RECREATE\")\n\nhg = ROOT.TH1F(\"gaus\",\"Gaussian numbers\", 64, -4, 4)\nhe = ROOT.TH1F(\"expo\",\"Exponential numbers\", 64, -4, 4)\nhu = ROOT.TH1F(\"unif\",\"Uniform numbers\", 64, -4, 4)\nfor i in xrange(1024):\n hg.Fill(rndm.Gaus())\n he.Fill(rndm.Exp(1))\n hu.Fill(rndm.Uniform(-4,4))\n\nfor h in (hg, he, hu): h.Write()\n \nf.Close()",
"Now, you can invoke the ls command from within the notebook to list the files in this directory. Check that the file is there. You can invoke the rootls command to see what's inside the file.",
"! ls .\n! echo Now listing the content of the file\n! rootls -l ./histos.root",
"Access the histograms and draw them in Python. Remember that you need to create a TCanvas before and draw it too in order to inline the plots in the notebooks.\nYou can switch to the interactive JavaScript visualisation using the %jsroot on \"magic\" command.",
"%jsroot on\nf = ROOT.TFile(\"histos.root\")\nc = ROOT.TCanvas()\nc.Divide(2,2)\nc.cd(1)\nf.gaus.Draw()\nc.cd(2)\nf.expo.Draw()\nc.cd(3)\nf.unif.Draw()\nc.Draw() # Draw the Canvas",
"You can now repeat the exercise above using C++. Transform the cell in a C++ cell using the %%cpp \"magic\".",
"%%cpp\nTFile f(\"histos.root\");\nTH1F *hg, *he, *hu;\nf.GetObject(\"gaus\", hg);\nf.GetObject(\"expo\", he);\nf.GetObject(\"unif\", hu);\nTCanvas c;\nc.Divide(2,2);\nc.cd(1);\nhg->Draw();\nc.cd(2);\nhe->Draw();\nc.cd(3);\nhu->Draw();\nc.Draw(); // Draw the Canvas",
"Inspect the content of the file: TXMLFile\nROOT provides a different kind of TFile, TXMLFile. It has the same interface and it's very useful to better understand how objects are written in files by ROOT.\nRepeat the exercise above, either on Python or C++ - your choice, using a TXMLFILE rather than a TFile and then display its content with the cat command. Can you see how the content of the individual bins of the histograms is stored? And the colour of its markers?\nDo you understand why the xml file is bigger than the root one even if they have the same content?",
"f = ROOT.TXMLFile(\"histos.xml\",\"RECREATE\")\n\nhg = ROOT.TH1F(\"gaus\",\"Gaussian numbers\", 64, -4, 4)\nhe = ROOT.TH1F(\"expo\",\"Exponential numbers\", 64, -4, 4)\nhu = ROOT.TH1F(\"unif\",\"Uniform numbers\", 64, -4, 4)\nfor i in xrange(1024):\n hg.Fill(rndm.Gaus())\n he.Fill(rndm.Exp(1))\n hu.Fill(rndm.Uniform(-4,4))\n\nfor h in (hg, he, hu): h.Write()\n \nf.Close()\n\n! ls -l histos.xml histos.root\n\n! cat histos.xml"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
jsmanrique/grimoirelab-personal-utils
|
Light Github index generator.ipynb
|
mit
|
[
"Github Repositories Index Generator\nThis notebook generates two ElasticSearch (ES) indexes with information about:\n* git (commits, files, lines added, lines removed, commit authors)\n* github (issues, pull requests, time to close in hours)\nLet's start by importing the utils python script, setting up the connection to the ES server and defining some variables",
"import utils\nutils.logging.basicConfig(level=utils.logging.INFO)\nsettings = utils.read_config_file('settings.yml')\nes = utils.establish_connection(settings['es_host'])",
"Let's give a name to the indexes and create them in the ES server. Take care utils.create_ES_index() deletes any existing index with the same name before creating it.",
"index_name_git = 'github-git'\nutils.create_ES_index(es, index_name_git, utils.MAPPING_GITHUB_GIT)\nindex_name_github_issues = 'github-issues'\nutils.create_ES_index(es, index_name_github_issues, utils.MAPPING_GITHUB_ISSUES)",
"Let's import needed backends from Perceval",
"from perceval.backends.core.git import Git\nfrom perceval.backends.core.github import GitHub",
"For each repository in the settings file, get git related info and upload it to defined git ES index",
"for repo_url in settings['github-repo']:\n \n repo_owner = repo_url.split('/')[-2]\n repo_name = repo_url.split('/')[-1]\n repo_git_url = repo_url + '.git'\n \n git_repo = Git(uri=repo_git_url, gitpath='/tmp/'+repo_name)\n \n utils.logging.info('Parsing log from {}'.format(repo_name))\n \n items = []\n bulk_size = 10000\n \n for commit in git_repo.fetch():\n \n contributor_name = commit['data']['Author'].split('<')[0][:-1]\n contributor_email_domain = commit['data']['Author'].split('@')[-1][:-1]\n \n for file in commit['data']['files']:\n if 'added' not in file.keys() or file['added'] == '-': \n file['added'] = 0\n if 'removed' not in file.keys() or file['removed'] == '-':\n file['removed'] = 0\n \n summary = {\n 'date': commit['data']['AuthorDate'], \n 'commit_id': commit['data']['commit'],\n 'contributor_name': contributor_name, \n 'contributor_email_domain': contributor_email_domain,\n 'file': file['file'],\n 'lines_added': file['added'], \n 'lines_removed': file['removed'],\n 'github_owner': repo_owner, 'github_repository': repo_name\n }\n \n items.append({'_index': index_name_git, '_type': 'item', '_source': summary})\n \n if len(items) > bulk_size:\n utils.helpers.bulk(es, items)\n items = []\n utils.logging.info('{} items uploaded'.format(bulk_size))\n \n if len(items) != 0:\n utils.helpers.bulk(es, items)\n utils.logging.info('Remaining {} items uploaded'.format(len(items)))",
"For each repository in the settings file, get github issues related info and upload it to defined github issues ES index",
"import datetime as datetime\n\nfor repo_url in settings['github-repo']:\n \n repo_owner = repo_url.split('/')[-2]\n repo_name = repo_url.split('/')[-1]\n repo_git_url = repo_url + '.git'\n \n github_repo = GitHub(owner=repo_owner, repository=repo_name, api_token=settings['github_token'])\n \n utils.logging.info('Parsing issues from {}'.format(repo_name))\n \n items = []\n \n for issue in github_repo.fetch():\n created_at = issue['data']['created_at']\n \n #If the issue/pull-request is closed, we get the time to close it\n if issue['data']['state'] == 'closed':\n closed_at = issue['data']['closed_at']\n creation_date = datetime.datetime.strptime(created_at, \"%Y-%m-%dT%H:%M:%SZ\")\n closing_date = datetime.datetime.strptime(closed_at, \"%Y-%m-%dT%H:%M:%SZ\")\n delta_time = (closing_date - creation_date).total_seconds()\n else:\n delta_time = None\n \n summary = {\n 'date': created_at, \n 'title': issue['data']['title'],\n 'state': issue['data']['state'],\n 'url': issue['data']['html_url'],\n 'comments': issue['data']['comments'],\n 'closed_at': issue['data']['closed_at'],\n 'time_to_solve': delta_time,\n 'github_owner': repo_owner,\n 'github_repository': repo_name\n }\n \n \"\"\" If there is submitter name, we use it as contributor_name\n If not, we use the github username as contributor_name\n \"\"\"\n if issue['data']['user_data']['name'] != None:\n summary['contributor_name'] = issue['data']['user_data']['name']\n else:\n summary['contributor_name'] = issue['data']['user_data']['login']\n \n \"\"\" If there is someone assigned, we try to get the name as assignee_name\n If there is no name, we use the github username\n \"\"\"\n try:\n summary['assignee_name'] = issue['data']['assignee_data']['name']\n except:\n try:\n summary['assignee_name'] = issue['data']['assignee']['login']\n except:\n summary['assignee_name'] = None\n \n # We check if the item is an issue or pull request\n if 'pull_request' in issue['data'].keys():\n summary['issue_type'] = 'pull-request'\n else:\n summary['issue_type'] = 'issue'\n \n items.append({'_index': index_name_github_issues, '_type': 'item', '_source': summary})\n \n if len(items) > bulk_size:\n utils.helpers.bulk(es, items)\n items = []\n utils.logging.info('{} items uploaded'.format(bulk_size))\n \n if len(items) != 0:\n utils.helpers.bulk(es, items)\n utils.logging.info('Remaining {} items uploaded'.format(len(items)))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
feststelltaste/software-analytics
|
demos/20190425_JUGH_Kassel/DatenanalysenProblemeEntwicklung.ipynb
|
gpl-3.0
|
[
"Mit Datenanalysen Probleme in der Entwicklung aufzeigen\n<small>Java User Group Hessen, Kassel, 25.04.2019</small>\n<b>Markus Harrer</b>, Software Development Analyst\nTwitter: @feststelltaste\nBlog: feststelltaste.de\n<img src=\"../resources/innoq_logo.jpg\" width=20% height=\"20%\" align=\"right\"/>\nDas Problem mit den Problemen in der Softwareentwicklung\nDer typische Software-Problemverlauf\n<img src=\"../resources/schuld4.png\" width=85% align=\"center\"/>\nDas eigentliche Problem\n<img src=\"../resources/kombar0.png\" width=95% align=\"center\"/>\n<img src=\"../resources/kombar3.png\" width=95% align=\"center\"/>\nWie Daten analysieren?\n<div align=\"center\">\n<h3>The <span class=\"yellow\">ultimate</span>, <span class=\"green\">super</span> <span class=\"red\">awesome</span><br/> Quality Management Dashboard</h3>\n\n<img src=\"../resources/Sonarqube-nemo-dashboard_small.png\">\n </div>\n\nHäufigkeit von Fragen vs. deren Risiken\n<img src=\"../resources/risk4.png\" width=95% align=\"center\"/>\nEs braucht zusätzliche, situations-spezifische Datenanalysen!\nWie machen es andere Disziplinen?\nData Science!\nWas ist Data Science?\n\"<b><span class=\"green\">Datenanalysen</span></b> auf nem Mac.\"\n<br/>\n<br/>\n<div align=\"right\"><small>Frei nach https://twitter.com/cdixon/status/428914681911070720</small></div>\n\nMeine Definition von Data Science\nWas bedeutet \"data\"?\n\"Without data you‘re just another person with an opinion.\"\n<br/>\n<div align=\"right\"><small>W. Edwards Deming</small></div>\n\n<b>=> Belastbare Erkenntnisse mittels <span class=\"green\">Fakten</span> liefern</b>\nWas bedeutet \"science\"?\n\"The aim of science is to seek the simplest explanations of complex facts.\"\n<br/>\n<div align=\"right\"><small>Albert Einstein</small></div>\n\n<b>=> Neue Erkenntnisse <span class=\"green\">verständlich</span> herausarbeiten</b>\nWas ist ein Data Scientist?\n\"Jemand, der mehr Ahnung von Statistik<br/>\n hat als ein <b><span class=\"green\">Softwareentwickler</span></b><br/>\n und mehr Ahnung von <b><span class=\"green\">Softwareentwicklung</span></b><br/>\n als ein Statistiker.\"\n<br/>\n<br/>\n<div align=\"right\"><small>Nach zu https://twitter.com/cdixon/status/428914681911070720</small></div>\n\n<b>Data Science:</b> Perfect <b><span class=\"green\">match</span></b>!\nWas an Daten analysieren?\nSoftwaredaten!\nAlles was aus der Entwicklung und dem Betrieb der Softwaresysteme so anfällt: \n* Statische Daten\n* Laufzeitdaten\n* Chronologische Daten\n* Daten aus der Software-Community\nZwischenfazit\nData Science <span class=\"red\">❤</span> Software Data <span style=\"color:white\">= <b>Software Analytics</b></span>\nZwischenfazit\nData Science <span class=\"red\">❤</span> Software Data = <b>Software Analytics</b>\nDefinition Software Analytics\n\"Software Analytics is analytics on software data for managers and <b class=\"green\">software engineers</b> with the aim of empowering software development individuals and teams to gain and share insight from their data to <b>make better decisions</b>.\"\n<br/>\n<div align=\"right\"><small>Tim Menzies and Thomas Zimmermann</small></div>\n\nWie Software Analytics umsetzen?\nDer Leitgedanke\n[(Daten + Code + Ergebnis) * gedanklichen Schritt] + komplette Automatisierung\nSchlüsselelement: Computational notebooks\nDer Notebook-Ansatz\n<br/>\n<div align=\"center\"><img src=\"../resources/notebook_approach.jpg\"></div>\n\nTechnologie (1/2)\nKlassischer Data-Science-Werkzeugkasten\n* Jupyter\n* Python 3\n* pandas\n* matplotlib\nTechnologie (2/2)\nJupyter funktioniert und integriert sich auch mit\n* Cypher / Neo4j / jQAssistant\n* JVM-Sprachen über beakerx / Tablesaw\n* bash\n* ...\nBeispiele für gezielte Datenanalysen\n\nPerformance-Bottlenecks\nVerborgene Teamkommunikation\nArchitektur-/Design-/Code-Smells\n<b>No-Go-Areas in Altanwendungen</b>\n...\n\nPraktischer Teil\nErstes Hands-On\nNo-Go-Areas in Altanwendungen\nDer Patient\nLinux\n\nBetriebsystem-Kernel\nHat verschiedene Treiberkomponenten\nFast ausschließlich in C geschrieben\nEntwickelt von über 800.000 Entwicklern\n\nI. Idee (1/2)\n<b>Fragestellung</b>\n* Gibt es besonders alte Komponenten, wo sich niemand mehr auskennt (No-Go-Areas)? \n<b>Heuristik</b>\n* Wann waren die letzten Änderungen innerhalb einer Komponente?\nI. Idee (2/2)\nUmsetzung\n\nWerkzeuge: Jupyter, Python, pandas, matplotlib\nDatenquelle: Git Blame Log\n\nMeta-Ziel: Grundfunktionen anhand eines einfachen Show-Cases sehen.\nAusgangsdaten: <b>Git Blame Log</b>\n<div align=\"center\">\n <img src =\"../resources/linux_1.gif\" align=\"center\"/>\n</div>\n\nAusgangsdaten: <b>Git Blame Log</b>\n<div align=\"center\">\n <img src =\"../resources/linux_2.gif\" align=\"center\"/>\n</div>\n\nAusgangsdaten: <b>Git Blame Log</b>\n<div align=\"center\">\n <img src =\"../resources/linux_3_static.gif\" align=\"center\"/>\n</div>\n\nII. Datenbeschaffung\nWir laden Git Blame Daten aus einer CSV-Datei",
"import pandas as pd\nlog = pd.read_csv(\"../dataset/linux_blame_log.csv.gz\")\nlog.head()",
"Was haben wir hier eigentlich?",
"log.info()",
"<b>1</b> DataFrame (~ programmierbares Excel-Arbeitsblatt), <b>4</b> Series (= Spalten), <b>5665947</b> Rows (= Einträge)\nIII. Bereinigen\n\nDaten sind oft nicht so, wie man sie braucht\nDatentypen passen teilweise noch nicht\n\nWir wandeln die Zeitstempel um",
"log['timestamp'] = pd.to_datetime(log['timestamp'])\nlog.head()",
"Wir berechnen uns das Alter jeder Codezeilenänderung",
"log['age'] = pd.Timestamp('today') - log['timestamp']\nlog.head()",
"IV. Anreichern\n\nVorhandenen Daten noch zusätzlich mit anderen Datenquellen verschneiden\nAber auch: Teile aus vorhanden Daten extrahieren\n\n=> Dadurch werden mehrere <b>Perspektiven</b> auf ein Problem möglich\nWir ordnen jeder Zeilenänderung einer Komponente zu",
"log['component'] = log['path'].str.split(\"/\").str[:2].str.join(\":\")\nlog.head()",
"<br/> <small><i>String-Operationen...die dauern. Gibt aber diverse Optimierungsmöglichkeiten!</i></small>\nV. Aggregieren\n\nVorhandene Daten sind oft zu viel für manuelle Sichtung\nNeue Einsichten über Problem aber oft auf hoher Flugbahn möglich\n\nWir fassen nach Komponenten zusammen und arbeiten mit der jeweils jüngsten Zeilenänderung weiter",
"age_per_component = log.groupby('component')['age'].min().sort_values()\nage_per_component.head()",
"IV. Visualisieren\n\nGrafische Darstellung geben Analysen den letzten Schliff\nProbleme können Außenstehenden visuell dargestellt besser kommuniziert werden\n\nWir bauen ein Diagramm mit Min-Alter pro Komponente",
"age_per_component.plot.bar(figsize=[20,5]);",
"Weitere Analysen\n\nCode-Hotspots identifizieren\nRefactorings nachweisen\nPerformance-Hotspots mit jQAssistant/Neo4j finden\nCode-Smells mit jQAssistant/Neo4j aufdecken\nSoftwarerückbau mit Groovy/Tablesaw motivieren\n\nZusammenfassung\n1. Es gibt unglaublich <b>viele Quellen</b> für Analysen<br/>\n2. <b>Problemanalysen</b> mit Standard-Data-Science-Werkzeugen <b>einfach</b> möglich<br/> \n3. Wer mehr <b>Software Analytics</b> will, bekommt auch <b>mehr</b>!<br/> \n<b>=> vom <strong>Problem</strong> über die <span class=\"green\">Daten</span> zur <span class=\"blue\" style=\"background-color: #FFFF00\">Erkenntnis</span>!</b>\nLiteratur\n\n<b>Adam Tornhill: Software Design X-Rays</b> \nWes McKinney: Python For Data Analysis\nLeek, Jeff: The Elements of Data Analytic Style\nTim Menzies, Laurie Williams, Thomas Zimmermann: Perspectives on Data Science for Software Engineering\n\nVielen Dank! Fragen?\n<b>Markus Harrer</b><br/>\ninnoQ Deutschland GmbH\nE-Mail: markus.harrer@innoq.com <br/>\nTwitter: @feststelltaste<br/>\nBlog: feststelltaste.de<br/>\n <br/>\n<img src=\"../resources/innoq_logo.jpg\" width=20% height=\"20%\" align=\"right\"/>"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
sastels/Onboarding
|
2.5 - String exercises.ipynb
|
mit
|
[
"String exercises",
"# Provided simple test() function\ndef test(got, expected):\n if got == expected:\n prefix = ' OK '\n else:\n prefix = ' X '\n print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))",
"Fill in the code for the functions below. main() is already set up\nto call the functions with a few different inputs,\nprinting 'OK' when each function is correct.\nThe starter code for each function includes a 'return'\nwhich is just a placeholder for your code.\nA. doughnuts\nGiven an int count of a number of doughnuts, return a string\nof the form 'Number of doughnuts: <count>', where <count> is the number\npassed in. However, if the count is 10 or more, then use the word 'many'\ninstead of the actual count.\nSo doughnuts(5) returns 'Number of doughnuts: 5'\nand doughnuts(23) returns 'Number of doughnuts: many'",
"def doughnuts(count):\n # +++your code here+++\n return\n\ntest(doughnuts(4), 'Number of doughnuts: 4')\ntest(doughnuts(9), 'Number of doughnuts: 9')\ntest(doughnuts(10), 'Number of doughnuts: many')\ntest(doughnuts(99), 'Number of doughnuts: many')",
"B. both_ends\nGiven a string s, return a string made of the first 2\nand the last 2 chars of the original string,\nso 'spring' yields 'spng'. However, if the string length\nis less than 2, return instead the empty string.",
"def both_ends(s):\n # +++your code here+++\n return\n\ntest(both_ends('spring'), 'spng')\ntest(both_ends('Hello'), 'Helo')\ntest(both_ends('a'), '')\ntest(both_ends('xyz'), 'xyyz')",
"C. fix_start\nGiven a string s, return a string\nwhere all occurences of its first char have\nbeen changed to '*', except do not change\nthe first char itself.\ne.g. 'babble' yields \nba**le\nAssume that the string is length 1 or more.\nHint: s.replace(stra, strb) returns a version of string s\nwhere all instances of stra have been replaced by strb.",
"def fix_start(s):\n # +++your code here+++\n return\n\ntest(fix_start('babble'), 'ba**le')\ntest(fix_start('aardvark'), 'a*rdv*rk')\ntest(fix_start('google'), 'goo*le')\ntest(fix_start('doughnut'), 'doughnut')",
"D. mix_up\nGiven strings a and b, return a single string with a and b separated\nby a space ' ', except swap the first 2 chars of each string.\nAssume a and b are length 2 or more.",
"def mix_up(a, b):\n # +++your code here+++\n return\n\ntest(mix_up('mix', 'pod'), 'pox mid')\ntest(mix_up('dog', 'dinner'), 'dig donner')\ntest(mix_up('gnash', 'sport'), 'spash gnort')\ntest(mix_up('pezzy', 'firm'), 'fizzy perm')",
"E. verbing\nGiven a string, if its length is at least 3,\nadd 'ing' to its end.\nUnless it already ends in 'ing', in which case\nadd 'ly' instead.\nIf the string length is less than 3, leave it unchanged.\nReturn the resulting string.",
"def verbing(s):\n # +++your code here+++\n return\n\ntest(verbing('hail'), 'hailing')\ntest(verbing('swimming'), 'swimmingly')\ntest(verbing('do'), 'do')",
"F. not_bad\nGiven a string, find the first appearance of the\nsubstring 'not' and 'bad'. If the 'bad' follows\nthe 'not', replace the whole 'not'...'bad' substring\nwith 'good'.\nReturn the resulting string.\nSo 'This dinner is not that bad!' yields:\nThis dinner is good!",
"def not_bad(s):\n # +++your code here+++\n return\n\ntest(not_bad('This movie is not so bad'), 'This movie is good')\ntest(not_bad('This dinner is not that bad!'), 'This dinner is good!')\ntest(not_bad('This tea is not hot'), 'This tea is not hot')\ntest(not_bad(\"It's bad yet not\"), \"It's bad yet not\")",
"G. front_back\nConsider dividing a string into two halves.\nIf the length is even, the front and back halves are the same length.\nIf the length is odd, we'll say that the extra char goes in the front half.\ne.g. 'abcde', the front half is 'abc', the back half 'de'.\nGiven 2 strings, a and b, return a string of the form\na-front + b-front + a-back + b-back",
"def front_back(a, b):\n # +++your code here+++\n return\n\ntest(front_back('abcd', 'xy'), 'abxcdy')\ntest(front_back('abcde', 'xyz'), 'abcxydez')\ntest(front_back('Kitten', 'Donut'), 'KitDontenut')",
"Note: This notebook is an adaption of Google's python tutorial https://developers.google.com/edu/python"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
MaxYousif/Data-Science-MSc-Projects
|
SVM Binary Classification.ipynb
|
mit
|
[
"Introduction\nMSc Machine Learning Assignment - Classification task. Private Kaggle Competition: \"Are you sure Brighton's seagull is not a man-made object?\"\nThe aim of the assignment was to build a classifier able to distinguish between man-made and not man-made objects. Each data instance was represented by a 4608 dimensional feature vector. This vector was a concatenation of 4096 dimensional deep Convolutional Neural Networks (CNNs) features extracted from the fc7 activation layer of CaffeNet and 512 dimensional GIST features. \nThree Additional pieces of information were granted: confidence label for each training instance, the test data class proportions, and additional training data containing missing values. \nThis Notebook contains the final workflow employed, to produce the model used to make the final predictions. The original Notebook contained a lot of trial and error methods, such as fine tuning the range of parameters fitted in the model. Some of these details from the original, and rather messy Notebook have been excluded here. Thus, this Notebook only intends to show the code for the key processes leading up to the development of the final model. \nIn addition, this notebook contains a report/commentary documenting the theory concerning the steps of the workflow. The theory is adopted from the Literature, and is referenced appropriately. \nReport also available in PDF, contact me for request. \n1. Approach\n1.1) Introduction of SVM\nThe approach of choice here was the Support Vector Machine (SVM). SVMs were pioneered in the late seventies [1]. SVMs are supervised learning models, which are extensively used for classification [2] and regression tasks [3]. In this context, SVM was employed for a binary classification task.\nIn layman’s terms, the basic premise of SVM for classification tasks is to find the optimal separating hyperplane (also called the decision boundary) between classes, through maximizing the margin between the data points closest to the decision boundary and the decision boundary itself. The points closest to the decision boundary are termed support vectors. The reason why the margin is maximized is to improve generalisation of the decision boundary; it is likely that many decision boundaries exist, but the decision boundary that maximizes the margin increases the likelihood that future outliers will be correctly classified [4]. This intuition seems relatively simple, but is complicated by ‘soft’ and ‘hard’ margins. A hard margin is only applicable when the data set is linearly separable. A soft margin is applicable when the data set is not linearly separable. Essentially, a soft margin is aware of future misclassifications due to the data not being linearly separable, thus tolerates misclassifications using a penalty term. On the contrary, a hard margin does not tolerate misclassifications. For a hard margin, misclassifications are dealt with by minimizing the margin. These concepts will be formalized below. \nAssume the problem of binary classification on a dataset ${(x_1, y_1), (x_2, y_2), ..., (x_n, y_n)}$ , where $x_i$ $\\in$ $R^d$, i.e. $x_i$ is a data point represented as a d-dimensional vector, and $y_i$ $\\in$ ${-1, 1}$ , which represents the class label of that data point, for $i= 1, 2, ..., n$ . A better optimal separation can be found by first transforming the data into a higher dimensional feature space by a non-linear mapping function $\\phi$ [2]. This $\\phi$ is also referred to as the ‘kernel’. A possible decision boundary can then be represented by $w \\cdot \\phi(x) + b = 0$ , where $w$ is the weight vector orthogonal to the decision boundary and $b$ is an intercept term. It follows that, if the data set is linearly separable, then the decision boundary that maximizes the margin can be found by solving the following optimization: $\\min (\\frac{1}{2} w \\cdot w) $ under the constraint $y_i (w \\cdot \\phi(x_i) + b) \\ge 1 $ where $i = 1, 2, ..., n$ . This encapsulates the concept of a ‘hard’ margin. However, in the case of non-linearly separable data, the above constraint has to be relaxed by the introduction of a slack variable $\\varepsilon$ . The optimization problem then becomes: $\\min(\\frac{1}{2} w \\cdot w + C \\sum_{i=1}^n \\varepsilon_i$) such that $y_i (w \\cdot \\phi(x_i) + b) \\ge 1 - \\varepsilon_i $ where $i = 1, 2, ..., n$ and $\\varepsilon_i \\ge 0$. The $\\sum_{i=1}^n \\varepsilon_i$ term can be interpreted as the misclassification cost. This new objective function comprises two aims. The first aim still remains to maximize the margin, and the second aim is to reduce the number of misclassifications. The trade-off between these two aims is controlled by the parameter $C$. This encapsulates the concept of a ‘soft’ margin. \n$C$ is coined the regularization parameter. A high value of $C$ increases the penalty for misclassifications, thus places more emphasis on the second goal. A large misclassification penalty enforces the model to reduce the number of misclassifications. Hence, a high enough value of $C$ could induce over-fitting. A small $C$ decreases the penalty for misclassifications, thus places more emphasis on the first goal. A small classification penalty enforces the model to tolerate classifications more readily. Hence, a small enough value of $C$ could induce under-fitting.\nThe SVM classifier is trained using the hinge-loss as the loss function [5]. \n1.2) Suitability of SVM \nSVM is a popular technique because of its solid mathematical background, high generalisation capability, ability to find global solutions and ability to find solutions that are non-linear [6]. However, SVMs can be impacted by data sets that do not have equal class balances. The methods for dealing with this will be discussed in Section 2.1 of this report. Thus, SVMs are still applicable to data sets with class imbalances, such as the data set provided here.\nIt has been argued that SVMs show superior performance than other techniques when analysis is conducted on high- dimensional data [7]. The dataset here, even after pre- processing, has many dimensions. Thus, the use of SVM in this context is justified.\nAnother downfall of SVM is the dependency on feature scaling; the performance of an SVM can be highly impacted by the selection of the feature scaling method. However, feature scaling is an important pre-processing technique. One encouraging reason for employing feature scaling is that the gradient descent algorithm converges much faster with feature scaling than without feature scaling. In particular, feature scaling reduces the time it takes for the SVM to find support vectors [8].\n2. Data Preparation Before Pre-Processing\nThis section will cover how the training data for the final model was prepared. Several additonal pieces of information were provided in the assignment outline. This section will demonstrate how these strands of information were incorporated, if they were incorporated at all.",
"#Import Relevant Modules and Packages \nimport pandas as pd\nimport numpy as np \nfrom sklearn.svm import SVC\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.decomposition import PCA\nfrom scipy import stats\nfrom sklearn.feature_selection import VarianceThreshold\n#see all rows of dataframe\n#pd.set_option('display.max_rows', 500)\n\n#Load the complete training data set \ntraining_data = pd.read_csv(\"/Users/Max/Desktop/Max's Folder/Uni Work/Data Science MSc/Machine Learning/ML Kaggle Competition /Data Sets/Training Data Set.csv\", header=0, index_col=0)\n\n#Observe the original training data \ntraining_data.head()\n\n#quantify class counts of original training data \ntraining_data.prediction.value_counts()",
"2.1) Dealing with Missing Values – Imputation\nImputation is the act of replacing missing data values in a data set with meaningful values. Simply removing rows with missing feature values is bad practice if the data is scarce, as a lot of information could be lost. In addition, deletion methods can introduce bias [9].\nThe incomplete additional training data was combined with the complete original training data because the complete original data was scarce in number. However, the incomplete additional training data was missing values, therefore imputation was appropriate, if not required. Two methods of imputation were employed. The first method of imputation employed was imputation via feature means. However, this method has been heavily criticized. In particular, it has been hypothesized that imputation via mean introduces bias and underestimates variability [10].\nThe second method of imputation employed was k- Nearest-Neighbours (kNN) [11]. This is a technique, which is part of hot-deck imputation techniques [12], where missing feature values are filled from data points that are similar, or geometrically speaking, points that are closest in distance. This method is more appropriate than using the mean imputation, given the flaws of feature mean imputation. Therefore, kNN was the imputation method used to build the final model. The kNN implementation was found in the ‘fancyimpute’ package [13].\nThe k of kNN can be considered a parameter that needs to be chosen carefully. Fortunately, the literature provides some direction on this. The work of [14] suggests that kNN with 3 nearest-neighbours is the best for the trade-off between imputation error and preservation of data structure. In summary, kNN was employed for imputation, and k was set to 3.\nThis section will cover how the incomplete additional training data set was incorporated to develop a larger training data set. In particular, the additional training data was combined with the original training data. The additonal training data was incomplete, with several NaN entries. Thus, imputation was performed to replace NaN entries with meaningful values.",
"#Load additional training data \nadd_training_data = pd.read_csv(\"/Users/Max/Desktop/Max's Folder/Uni Work/Data Science MSc/Machine Learning/ML Kaggle Competition /Data Sets/Additional Training Data Set .csv\", header=0, index_col=0)\n\n#observe additional training data \nadd_training_data\n\n#quantify class counts of additional training data \nadd_training_data.prediction.value_counts()\n\n#find number of NAs for each column for additional training data\nadd_training_data.isnull().sum()\n\n#concatenate original training data with additional training data \nfull_training_data_inc = pd.concat([training_data, add_training_data])\n#observe concatenated training data \nfull_training_data_inc",
"A couple of imputation methods were tried in the original Notebook: \n\nImputation using the column (feature) mean \nImputation with K-Nearest Neighbours \n\nThe most effective and theoretically advocated method producing the best results was the second method; imputation using the K-Nearest Neighbours. Note: the fancyimpute package may need installing before running. K was set to 3 here, see report for justification.",
"#imputation via KNN\nfrom fancyimpute import KNN\nknn_trial = full_training_data_inc\nknn_trial\n\ncomplete_knn = KNN(k=3).complete(knn_trial)\n\n#convert imputed matrix back to dataframe for visualisation and convert 'prediction' dtype to int\ncomplete_knn_df = pd.DataFrame(complete_knn, index=full_training_data_inc.index, columns=full_training_data_inc.columns)\nfull_training_data = complete_knn_df\nfull_training_data.prediction = full_training_data.prediction.astype('int')\nfull_training_data\n\n#quantify class counts for full training data \nfull_training_data.prediction.value_counts()",
"2.2) Dealing with Confidence Labels \nOne approach employed to incorporate the confidence labels was to use the confidence label of each instance as the corresponding sample weight for the instance. Theoretically, a confidence label of smaller than 1 would reduce the C parameter, which results in a lower penalty for misclassification of an instance whose label is not known with certainty. However, the implementation of this did not follow the theory; introducing the sample weights reduced the overall accuracy of the model. This matter was complicated more by the fact that samples generated from over-sampling via SMOTE had to be assigned a confidence label, which is difficult to determine objectively. Thus, it was decided that only data instances with a confidence label of 1 should be retained in the training data. This obviously leads to a massive loss of information. However, after removing instances, which do not have a confidence label of 1, 1922 training instances remained, which can be assumed to be a reasonable training data size. After truncating the data set, the procedure described in Section 2.2 was repeated for the truncated training data.\nIn summary, the training data was truncated to only include instances that have a confidence label of 1. The minority class of the training data was over-sampled using SMOTE to balance the class split. Class weights were then applied during the training of the SVM to ensure that the model was more sensitive to correctly classifying the majority class of the test data.\nThis section will cover how the confidence labels, one of the additional pieces of information provided in the assignment outline, were incorporated into the final training data set.",
"#Load confidence annotations \nconfidence_labels = pd.read_csv(\"/Users/Max/Desktop/Max's Folder/Uni Work/Data Science MSc/Machine Learning/ML Kaggle Competition /Data Sets/Annotation Confidence .csv\", header=0, index_col=0)\n\n#quantify confidence labels (how many are 1, how many are 0.66)\nprint(confidence_labels.confidence.value_counts())\n\n#observe confidence annotations \nconfidence_labels\n\n#adding confidence of label column to imputed full training data set \nfull_train_wcl = pd.merge(full_training_data, confidence_labels, left_index=True, right_index=True)\nfull_train_wcl",
"The original Notebook tried a couple of methods of incorporating the confidence labels into the model:\n\nUse all data samples, irrespective of confidence labels. However, the confidence label of each instance was set to be sample weight of each instance in the training phase. \nOnly use instances that have a confidence label of 1. \n\nThe best model was based on Method 2. Thus, only method 2 will be shown for this section here.",
"#only keep data instance with confidence label = 1\nconf_full_train = full_train_wcl.loc[full_train_wcl['confidence'] == 1]\nconf_full_train\n\n#quantify class counts \nconf_full_train.prediction.value_counts()\n\n#convert full training data dataframe with confidence instances only to matrix\nconf_ft_matrix = conf_full_train.as_matrix(columns=None)\nconf_ft_matrix\nconf_ft_matrix.shape\n\n#splitting full training data with confidence into inputs and outputs \nconf_ft_inputs = conf_ft_matrix[:,0:4608]\nprint(conf_ft_inputs.shape)\nconf_ft_outputs = conf_ft_matrix[:,4608]\nprint(conf_ft_outputs.shape)",
"2.3) Dealing with Class Imbalance\nBinary classification tasks suffer from imbalanced class splits. Training a model on a data set with more instances for one class than the other class can result in biases towards the majority class, as sensitivity will be lost in detecting the minority class [17]. This is pertinent because the training data (additional and original data included) has an unbalanced class split, with more instances of Class 1 than Class 0. Thus, training the model on this data would result in a model that is biased towards Class 1 detections. To exacerbate this issue, the test data is also unbalanced, but the majority class for the test data is Class 0.\nSome researchers have already attempted to tackle this problem. There are two primary methods of dealing with class imbalances: balancing or further unbalancing the data set as needs fit, or introducing class weights, where the underlying algorithm applies disparate\nmisclassification penalties to different classes [15]. Both approaches will be combined here, to first balance the data set, and then train the model to be bias towards Class 0 instances, as Class 0 is the majority class in the test data. The ‘imbalanced-learn’ API [16] has implementations of class balancing strategies found in the literature, such as SMOTE [17]. The premise of SMOTE is over-sampling of the minority class until a balance in the data set is reached. The sampling method is based on sampling via kNN. Unlike kNN for imputation, the best k was suggested to be 5 here.\nOnce the data set was balanced through SMOTE, class weights were introduced. Considering the test data has more instances belonging to Class 0, the class weights were adjusted so that misclassification of Class 0 is penalized more heavily than misclassification of Class 1. The ratio of class weights for training were adjusted to match the class proportions of the test data, i.e. Class 0 weight = 1.33 and Class 1 weight = 1.\nThe reason over-sampling of the minority class was preferred over under-sampling of the majority class is because the data quantity was already scarce (evident from Section 2.3). Furthermore, over-sampling to reach a class balance permits the use of ‘accuracy’ as the accuracy metric, as opposed to using AUC, which is more complex. In summary, as well as balancing the train data class-split, the model itself was adjusted to place more emphasis on correct Class 0 classifications.\nThis section will cover how the class imbalance of the training data was addressed. The best approach for this was Over-Sampling using SMOTE. This technique over-samples the minority class until the data set is completely balanced. Note: may need to install imblearn package first.",
"from imblearn.over_sampling import SMOTE \nfrom collections import Counter\n\n#fit over-sampling to training data inputs and putputs\nover_sampler = SMOTE(ratio='auto', k_neighbors=5, kind='regular', random_state=0)\nover_sampler.fit(conf_ft_inputs, conf_ft_outputs)\n\n#create new inputs and outputs with correct class proportions \nresampled_x, resampled_y = over_sampler.fit_sample(conf_ft_inputs, conf_ft_outputs)\n\n#quantify original class proportions prior to over-sampling\nCounter(conf_ft_outputs)\n\n#quantify class proportions after over-sampling\nCounter(resampled_y)\n\n#assign newly sampled input and outputs to old variable name used for inputs and outputs before\n#over-sampling \nconf_ft_inputs = resampled_x\nconf_ft_outputs = resampled_y\nprint(Counter(conf_ft_outputs))",
"3. Pre-Processing\nThe pre-processing of the data consisted of several steps. First, the features were rescaled appropriately. Secondly, Feature Extraction was performed to reduce the unwieldy dimensionality of the training data set, concomitantly increasing the signal-to-noise ratio and decreasing time complexity.\nThis section will cover the Pre-Processing conducted that produced the model capable of producing the best predictions. Feature Scaling was achieved via several methods. The best method was standardisation. Feature Extraction was achieved via PCA.\n3.1) Feature Scaling\nFeature scaling is important because it ensures that features have values plotted on the same scale, irrespective of the original units used to describe the original features. Feature scaling can be in the form of standardization, normalization or rescaling. The correct choice of feature scaling method is arbitrary and highly dependent on context. Thus, all three approaches were tried. The optimal results were obtained for standardization.",
"#standardise the full training data with confidence labels 1 only\nscaler_2 = preprocessing.StandardScaler().fit(conf_ft_inputs)\nstd_conf_ft_in = scaler_2.transform(conf_ft_inputs)\nstd_conf_ft_in",
"3.2) Principal Component Analysis (PCA)\nHigh-dimensionality should be reduced because it is likely to contain noisy features and because high-dimensionality increases computational time complexity [18]. Dimensionality reduction can be achieved via feature selection methods, such as filters and wrappers [19], or via feature extraction methods, such as PCA [20]. Here, the dimensionality reduction was conducted via feature extraction, vicariously through PCA. The rationale behind this is that the relative importance of GIST and CNN features is undetermined. Furthermore, feature selection methods may require some domain expertise to be effective.\nPCA uses the covariance matrix, its eigenvectors and eigenvalues to engineer principal components, which are uncorrelated eigenvectors that explain some proportion of the variance found in the dataset. The optimal number of principal components to engineer is arbitrary. Thus, the optimal number of principal components can be configured experimentally. This can be achieved by plotting the change in variance explained as a function of the number of principal components included, and by calculating the test score during cross validation for data transformed using different numbers of principal components.",
"import matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline\n\n#preprocessing: PCA (feature construction). High number of pcs chosen to plot a graph\n#showing how much more variance is explained as pc number increases \npca_2 = PCA(n_components=700, random_state=0)\nstd_conf_ft_in_pca = pca_2.fit_transform(std_conf_ft_in)\n#quantify amount of variance explained by principal components\nprint(\"Total Variance Explained by PCs (%): \", np.sum(pca_2.explained_variance_ratio_))",
"The cell below will plot how much more of the variance in the data set is explained as the number of principal components included is increased.",
"#calculate a list of cumulative sums for amount of variance explained\ncumulative_variance = np.cumsum(pca_2.explained_variance_ratio_)\nlen(cumulative_variance)\n#add 0 to the beginning of the list, otherwise list starts with variance explained by 1 pc\ncumulative_variance = np.insert(cumulative_variance, 0, 0) \n\n#define range of pcs\npcs_4_var_exp = np.arange(0,701,1)\nlen(pcs_4_var_exp)\n\nfig_1 = plt.figure(figsize=(7,4))\nplt.title('Number of PCs and Change In Variance Explained')\nplt.xlabel('Number of PCs')\nplt.ylabel('Variance Explained (%)')\nplt.plot(pcs_4_var_exp, cumulative_variance, 'x-', color=\"r\")\nplt.show()",
"The graph above suggests that the maximum number of principal components should not exceed 300, as less and less variance is explained as the number of principal components included increases beyond 300. For the optimisation, the optimal number of principal components was initially assumed to be 230.",
"#preprocessing: PCA (feature construction)\npca_2 = PCA(n_components=230, random_state=0)\nstd_conf_ft_in_pca = pca_2.fit_transform(std_conf_ft_in)\n#quantify ratio of variance explain by principal components\nprint(\"Total Variance Explained by PCs (%): \", np.sum(pca_2.explained_variance_ratio_))",
"4. Model Selection\nThe optimization was conducted through the use of a Grid search. In addition, the optimization was conducted for two kernels: the polynomial kernel and the RBF kernel. The initial search for optimal parameters was conducted on a logarithmic scale to explore as much of the parameter space as possible. From the results, the parameter ranges were refined and pruned to only include the potential best candidates. The choice of parameters was purely based on accuracy metrics, not on any other practical factors such as memory consumption or time complexity of predictions. The best model was determined on the following merits: \n1. Good generalisation - achieving a high testing 356 score during cross-validation. \n2. Avoidance of over-fitting - restriction on the magnitude of training scores during cross-validation. In particular, a training score beyond 360 an arbitrary limit is indicative of over-fitting. 361 Thus, a balance had to be struck to ensure that good 362 generalisation can be assumed.\nThis section covers how the best model was selected. Two kernels were tried and tested: RBF and polynomial. RBF outperformed polynomial, therefore only the optimisation results of RBF will be presented here. Furthermore, the parameter ranges to try have already been pruned at this point, so only the final ranges will be used to perform a Grid Search. \n4.1) Parameter Optimisation",
"#this cell takes around 7 minutes to run\n#parameter optimisation with Exhaustive Grid Search, with class weight \noriginal_c_range = np.arange(0.85, 1.01, 0.01)\ngamma_range = np.arange(0.00001, 0.00023, 0.00002)\n\n#define parameter ranges to test\nparam_grid = [{'C': original_c_range, 'gamma': gamma_range, 'kernel': ['rbf'],\n 'class_weight':[{0:1.33, 1:1}]}]\n\n#define model to do parameter search on\nsvr = SVC()\nclf = GridSearchCV(svr, param_grid, scoring='accuracy', cv=5,)\nclf.fit(std_conf_ft_in_pca, conf_ft_outputs)\n\n#create dictionary of results\nresults_dict = clf.cv_results_\n\n#convert the results into a dataframe\ndf_results = pd.DataFrame.from_dict(results_dict)\ndf_results",
"The cell below will plot two heat-maps side by side: one for showing how the training accuracy changes during cross-validation for different combinations of parameters, and one for showing how the testing accuracy changes during cross-validation for different combinations of parameters.",
"#Draw heatmap of the validation accuracy as a function of gamma and C\nfig = plt.figure(figsize=(10, 10))\nix=fig.add_subplot(1,2,1)\nval_scores = clf.cv_results_['mean_test_score'].reshape(len(original_c_range),len(gamma_range))\nval_scores\n\nax = sns.heatmap(val_scores, linewidths=0.5, square=True, cmap='PuBuGn', \n xticklabels=gamma_range, yticklabels=original_c_range, cbar_kws={'shrink':0.5})\nax.invert_yaxis()\nplt.yticks(rotation=0, fontsize=10)\nplt.xticks(rotation= 70,fontsize=10)\nplt.xlabel('Gamma', fontsize=15)\nplt.ylabel('C', fontsize=15)\nplt.title('Validation Accuracy', fontsize=15)\n\n#Draw heatmap of the validation accuracy as a function of gamma and C\nix=fig.add_subplot(1,2,2)\ntrain_scores = clf.cv_results_['mean_train_score'].reshape(len(original_c_range),len(gamma_range))\ntrain_scores\n#plt.figure(figsize=(6, 6))\nax_1 = sns.heatmap(train_scores, linewidths=0.5, square=True, cmap='PuBuGn', \n xticklabels=gamma_range, yticklabels=original_c_range, cbar_kws={'shrink':0.5})\nax_1.invert_yaxis()\nplt.yticks(rotation=0, fontsize=10)\nplt.xticks(rotation= 70,fontsize=10)\nplt.xlabel('Gamma', fontsize=15)\nplt.ylabel('C', fontsize=15)\nplt.title('Training Accuracy', fontsize=15)\nplt.show()",
"The cells below will plot a Validation Curves for Gamma.",
"#import module/library \nfrom sklearn.model_selection import validation_curve\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n#specifying gamma parameter range to plot for validation curve \nparam_range = gamma_range\nparam_range\n\n#calculating train and validation scores \ntrain_scores, valid_scores = validation_curve(SVC(C=0.92, kernel='rbf', class_weight={0:1.33, 1:1}), std_conf_ft_in_pca, conf_ft_outputs, param_name='gamma',param_range=param_range,scoring='accuracy')\ntrain_scores_mean = np.mean(train_scores, axis=1)\ntrain_scores_std = np.std(train_scores, axis=1)\nvalid_scores_mean = np.mean(valid_scores, axis=1)\nvalid_scores_std = np.std(valid_scores, axis=1)\n\n#plotting validation curve \nplt.title('Gamma Validation Curve for SVM With RBF Kernel | C=0.92')\nplt.xlabel('Gamma')\nplt.ylabel('Score')\nplt.xticks(rotation=70)\nplt.ylim(0.8,1.0)\nplt.xlim(0.0001,0.00021)\nplt.xticks(param_range)\nlw=2\nplt.plot(param_range, train_scores_mean, 'o-',label=\"Training Score\", color='darkorange', lw=lw)\nplt.fill_between(param_range, train_scores_mean-train_scores_std, train_scores_mean+train_scores_std, alpha=0.2, color='darkorange', lw=lw)\nplt.plot(param_range, valid_scores_mean, 'o-',label=\"Testing Score\", color='navy', lw=lw)\nplt.fill_between(param_range, valid_scores_mean-valid_scores_std, valid_scores_mean+valid_scores_std, alpha=0.2, color='navy', lw=lw)\nplt.legend(loc='best')\nplt.show()",
"The cells below will plot the Learning Curve.",
"#import module/library \nfrom sklearn.model_selection import learning_curve\n\n#define training data size increments \ntd_size = np.arange(0.1, 1.1, 0.1)\n#calculating train and validation scores\ntrain_sizes, train_scores, valid_scores = learning_curve(SVC(C=0.92, kernel='rbf', gamma=0.00011, class_weight={0:1.33, 1:1}), std_conf_ft_in_pca, conf_ft_outputs, train_sizes=td_size ,scoring='accuracy')\ntrain_scores_mean = np.mean(train_scores, axis=1)\ntrain_scores_std = np.std(train_scores, axis=1)\nvalid_scores_mean = np.mean(valid_scores, axis=1)\nvalid_scores_std = np.std(valid_scores, axis=1) \n\n#plotting learning curve \nfig = plt.figure(figsize=(5,5))\nplt.title('Learning Curve with SVM with RBF Kernel| C=0.92 & Gamma = 0.00011', fontsize=9)\nplt.xlabel('Train Data Size')\nplt.ylabel('Score')\nplt.ylim(0.8,1)\nlw=2\nplt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\", label=\"Training Score\")\nplt.fill_between(train_sizes, train_scores_mean-train_scores_std, train_scores_mean+train_scores_std, alpha=0.2, color='red', lw=lw)\nplt.plot(train_sizes, valid_scores_mean, 'o-', color=\"g\",label=\"Testing Score\")\nplt.fill_between(train_sizes, valid_scores_mean-valid_scores_std, valid_scores_mean+valid_scores_std, alpha=0.2, color='green', lw=lw)\nplt.legend(loc='best')\nplt.show()",
"Finding Best Number of Principal Components\nThe cells below will show the optimisation for the number of principal components to include. This is done by doing using a range of principal components, conducting PCA for each specified number in the interval and calculating the average of the test score over 3-fold cross-validation. This procedure is repeated 5 times to combat the randomness of PCA. The average test accuracy over the 5 runs is then plotted against the number of principal components included.",
"#this cell may take several minutes to run \n#plot how the number of PC's changes the test accuracy\nno_pcs = np.arange(20, 310, 10)\ncompute_average_of_5 = []\nfor t in range(0,5):\n pcs_accuracy_change = []\n for i in no_pcs:\n dummy_inputs = std_conf_ft_in\n dummy_outputs = conf_ft_outputs\n pca_dummy = PCA(n_components=i,)\n pca_dummy.fit(dummy_inputs)\n dummy_inputs_pca = pca_dummy.transform(dummy_inputs)\n dummy_model = SVC(C=0.92, kernel='rbf', gamma=0.00011, class_weight={0:1.33, 1:1})\n dummy_model.fit(dummy_inputs_pca, dummy_outputs,)\n dummy_scores = cross_val_score(dummy_model, dummy_inputs_pca, dummy_outputs, cv=3, scoring='accuracy')\n mean_cv = dummy_scores.mean()\n pcs_accuracy_change.append(mean_cv) \n print (len(pcs_accuracy_change))\n compute_average_of_5.append(pcs_accuracy_change)\n\n#calculate position specific average for the five trials \nfrom __future__ import division\naverage_acc_4_pcs = [sum(e)/len(e) for e in zip(*compute_average_of_5)]\n\nplt.title('Number of PCs and Change In Accuracy')\nplt.xlabel('Number of PCs')\nplt.ylabel('Accuracy (%)')\nplt.plot(no_pcs, average_acc_4_pcs, 'o-', color=\"r\")\nplt.show()",
"Making Predictions\nThe following cells will prepare the test data by getting it into the right format.",
"#Load the complete training data set \ntest_data = pd.read_csv(\"/Users/Max/Desktop/Max's Folder/Uni Work/Data Science MSc/Machine Learning/ML Kaggle Competition /Data Sets/Testing Data Set.csv\", header=0, index_col=0)\n\n##Observe the test data \ntest_data\n\n#turn test dataframe into matrix \ntest_data_matrix = test_data.as_matrix(columns=None)\ntest_data_matrix.shape",
"The following cell will apply the same pre-processing applied to the training data to the test data.",
"#pre-process test data in same way as train data \nscaled_test = scaler_2.transform(test_data_matrix)\ntransformed_test = pca_2.transform(scaled_test)\ntransformed_test.shape",
"The following cells will produce predictions on the test data using the final model.",
"#define and fit final model with best parameters from grid search\nfinal_model = SVC(C=0.92, cache_size=1000, kernel='rbf', gamma=0.00011, class_weight={0:1.33, 1:1})\nfinal_model.fit(std_conf_ft_in_pca, conf_ft_outputs)\n\n#make test data predictions\npredictions = final_model.predict(transformed_test)\n\n#create dictionary for outputs matched with ID\nto_export = {'ID': np.arange(1, 4201, 1), 'prediction': predictions}\nto_export\n\n#convert to dataframe \nfinal_predictions = pd.DataFrame.from_dict(to_export)\nfinal_predictions\n\n#convert prediction column float type entries to integers\nfinal_predictions = final_predictions.astype('int')\nfinal_predictions\n\n#check properties of predictions: class balance should be 42.86(1):57.14(0)\n#i.e. should predict 2400 Class 0 instances, and 1800 Class 1 instances\nfinal_predictions.prediction.value_counts()",
"References\n[1] Vapnik V. (1979) Estimation of Dependences based on Empirical Data. Springer Verlag, New York, 1982.\n[2] Cortes C, Vapnik V. (1995) Support Vector Networks. Machine Learning. Vol. 20: pages 273-297.\n[3] Drucker H, Burges CJC, Kaufman AS, Vapnik V. (1997) Support vector regression machines. Advances in Neural Information Processing Systems. Vol. 9: pages 155-161.\n[4] Vapnik VN. (1982) Estimation of Dependences Based on Empirical Data. Addendum 1, New York: Springer-Verlag.\n[5] Rosasco L, De Vito ED, Caponnetto A, Piana M, Verri A. (2004) Are Loss Functions All The Same? Neural Computation. Vol. 16: pages 1063-1076.\n[6] Batuwita R, Palade V. (2012) Class Imbalance learning methods for Support Vector Machines. In: Imbalanced Learning: Foundations, Algorithms and Applications, by He H, Ma Y. John Wiley & Sons: Chapter 6.\n[7] Lian H. (2012) On feature selection with principal component analysis for one-class SVM. Pattern Recognition Letters. Vol. 33: pages 1027-1031.\n[8] Juszczak P, Tax DJ, Dui RW. (2002) Feature scaling in support vector data descriptions. Proc. 8th Annual. Conf. Adv. School Comput. Imaging: pages 1-8.\nAccessed on link: http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.100.25 24&rep=rep1&type=pdf\n[9] Greenland S, Finkle WD. (1995) A critical look at methods for handling missing covariates in epidemiologic regression analyses. AM J Epdimiol. Vol. 142: pages 1255-1264.\n[10] Horton NJ, Kleinman KP. (2007) Much ado about nothing: A comparison of missing data methods and software to fit incomplete data regression models. Am Stat. Vol. 61: pages 79- 90.\n[11] Troyanskaya O, Cantor M, Sherlock G, Brown P, Hastie T, Tibshirani R, Botstein D, Altman RB. (2001) Missing value estimation methods for DNA microarrays. Bioinformatics. Vol. 17: pages 520-525.\n[12] Andridge RR, Little RJ. (2010) A Review of Hot Deck Imputation for Survey Non-response. Int Stat Review. Vol. 78: pages 40-64.\n[13] Rubinsteyn A, Feldman S, O’Donnell T, Beaulieu-Jones B. (2015) fancyimpute 0.2.0. Package found on: https://github.com/hammerlab/fancyimpute.\n[14] Beretta L, Santaniello A. (2016) Nearest neighbour imputation algorithms: a critical evaluation. BMC Medical Informatics and Decision Making. Vol. 16: pages 197-208.\n[15] Barandela R, Valdovinos RM, Sanchez JS, Ferri Fj. (2004)\nThe Imbalanced Training Sample Problem: Under or Over Sampling? Spring-Verlag, Berlin: pages 806-814.\n[16] Lematre G, Nogueira F, Adrias CK. (2017) Imbalanced- learn: A Python Toolbox to Tackle the Curse of Imbalanced Datasets in Machine Learning. Journal of Machine Learning Research. Vol. 18: pages 1-5.\n[17] Chawla NV, Bowyer KW, Hall Lo, Kegelmeyer WP. (2002) SMOTE: Synthetic minority oversampling. Journal of Artifical Intelligence Research. Vol. 16: pages 321-357.\n[18] Strong DM, Lee YW, Wang RY. (1997) Data Quality in context. Communications of the ACM. Vol. 40: pages 103-110.\n[19] Blum AL, Langley P. (1997) Selection of relevant features and examples in Machine Learning. Artificial Intelligence. Vol. 97: pages 245-271.\n[20] Hira ZM, Gillies DF. (2015) A Review of Feature Selection and Feature Extraction Methods Applied on Microarray data. Advances in Bioinformatics. Vol. 2015: pages 1-13."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
marcelomiky/PythonCodes
|
.ipynb_checkpoints/Curso Introdução à Ciência da Computação com Python - Parte 2-checkpoint.ipynb
|
mit
|
[
"def cria_matriz(tot_lin, tot_col, valor):\n matriz = [] #lista vazia\n for i in range(tot_lin):\n linha = []\n for j in range(tot_col):\n linha.append(valor)\n matriz.append(linha)\n return matriz\n\nx = cria_matriz(2, 3, 99)\nx\n\ndef cria_matriz(tot_lin, tot_col, valor):\n matriz = [] #lista vazia\n for i in range(tot_lin):\n linha = []\n for j in range(tot_col):\n linha.append(valor)\n matriz.append(linha)\n return matriz\n\nx = cria_matriz(2, 3, 99)\nx",
"Este código faz com que primeiramente toda a primeira linha seja preenchida, em seguida a segunda e assim sucessivamente. Se nós quiséssemos que a primeira coluna fosse preenchida e em seguida a segunda coluna e assim por diante, como ficaria o código?\nUm exemplo: se o usuário digitasse o seguinte comando “x = cria_matriz(2,3)” e em seguida informasse os seis números para serem armazenados na matriz, na seguinte ordem: 1, 2, 3, 4, 5, 6; o x teria ao final da função a seguinte matriz: [[1, 3, 5], [2, 4, 6]].",
"def cria_matriz(num_linhas, num_colunas):\n matriz = [] #lista vazia\n for i in range(num_linhas):\n linha = []\n for j in range(num_colunas):\n linha.append(0)\n matriz.append(linha)\n\n for i in range(num_colunas):\n for j in range(num_linhas):\n matriz[j][i] = int(input(\"Digite o elemento [\" + str(j) + \"][\" + str(i) + \"]: \"))\n\n return matriz\n\nx = cria_matriz(2, 3)\n\nx\n\ndef tarefa(mat):\n dim = len(mat)\n for i in range(dim):\n print(mat[i][dim-1-i], end=\" \")\n\nmat = [[1,2,3],[4,5,6],[7,8,9]]\ntarefa(mat)\n\n# Observação: o trecho do print (end = \" \") irá mudar a finalização padrão do print\n# que é pular para a próxima linha. Com esta mudança, o cursor permanecerá na mesma \n# linha aguardando a impressão seguinte.",
"Exercício 1: Tamanho da matriz\nEscreva uma função dimensoes(matriz) que recebe uma matriz como parâmetro e imprime as dimensões da matriz recebida, no formato iXj.\nExemplos:\nminha_matriz = [[1], [2], [3]]\ndimensoes(minha_matriz)\n3X1\nminha_matriz = [[1, 2, 3], [4, 5, 6]]\ndimensoes(minha_matriz)\n2X3",
"def dimensoes(A):\n \n '''Função que recebe uma matriz como parâmetro e imprime as dimensões da matriz recebida, no formato iXj.\n \n Obs: i = colunas, j = linhas\n \n Exemplo: \n >>> minha_matriz = [[1], \n [2], \n [3]\n ]\n >>> dimensoes(minha_matriz)\n >>> 3X1\n '''\n \n lin = len(A)\n col = len(A[0])\n \n return print(\"%dX%d\" % (lin, col))\n\nmatriz1 = [[1], [2], [3]]\ndimensoes(matriz1)\n\nmatriz2 = [[1, 2, 3], [4, 5, 6]]\ndimensoes(matriz2)",
"Exercício 2: Soma de matrizes\nEscreva a função soma_matrizes(m1, m2) que recebe 2 matrizes e devolve uma matriz que represente sua soma caso as matrizes tenham dimensões iguais. Caso contrário, a função deve devolver False.\nExemplos:\nm1 = [[1, 2, 3], [4, 5, 6]]\nm2 = [[2, 3, 4], [5, 6, 7]]\nsoma_matrizes(m1, m2) => [[3, 5, 7], [9, 11, 13]]\nm1 = [[1], [2], [3]]\nm2 = [[2, 3, 4], [5, 6, 7]]\nsoma_matrizes(m1, m2) => False",
"def soma_matrizes(m1, m2):\n \n def dimensoes(A):\n lin = len(A)\n col = len(A[0])\n \n return ((lin, col))\n \n if dimensoes(m1) != dimensoes(m2):\n return False\n else:\n matriz = []\n for i in range(len(m1)):\n linha = []\n for j in range(len(m1[0])):\n linha.append(m1[i][j] + m2[i][j])\n matriz.append(linha)\n return matriz\n\nm1 = [[1, 2, 3], [4, 5, 6]]\nm2 = [[2, 3, 4], [5, 6, 7]]\nsoma_matrizes(m1, m2)\n\nm1 = [[1], [2], [3]]\nm2 = [[2, 3, 4], [5, 6, 7]]\nsoma_matrizes(m1, m2)",
"Praticar tarefa de programação: Exercícios adicionais (opcionais)\nExercício 1: Imprimindo matrizes\nComo proposto na primeira vídeo-aula da semana, escreva uma função imprime_matriz(matriz), que recebe uma matriz como parâmetro e imprime a matriz, linha por linha. Note que NÃO se deve imprimir espaços após o último elemento de cada linha!\nExemplos:\nminha_matriz = [[1], [2], [3]]\nimprime_matriz(minha_matriz)\n1\n2\n3\nminha_matriz = [[1, 2, 3], [4, 5, 6]]\nimprime_matriz(minha_matriz)\n1 2 3\n4 5 6",
"def imprime_matriz(A):\n \n for i in range(len(A)):\n for j in range(len(A[i])):\n print(A[i][j])\n\nminha_matriz = [[1], [2], [3]]\nimprime_matriz(minha_matriz)\n\nminha_matriz = [[1, 2, 3], [4, 5, 6]]\nimprime_matriz(minha_matriz)",
"Exercício 2: Matrizes multiplicáveis\nDuas matrizes são multiplicáveis se o número de colunas da primeira é igual ao número de linhas da segunda. Escreva a função sao_multiplicaveis(m1, m2) que recebe duas matrizes como parâmetro e devolve True se as matrizes forem multiplicavéis (na ordem dada) e False caso contrário.\nExemplos:\nm1 = [[1, 2, 3], [4, 5, 6]]\nm2 = [[2, 3, 4], [5, 6, 7]]\nsao_multiplicaveis(m1, m2) => False\nm1 = [[1], [2], [3]]\nm2 = [[1, 2, 3]]\nsao_multiplicaveis(m1, m2) => True",
"def sao_multiplicaveis(m1, m2):\n \n '''Recebe duas matrizes como parâmetros e devolve True se as matrizes forem multiplicáveis (número de colunas \n da primeira é igual ao número de linhs da segunda). False se não forem\n '''\n \n if len(m1) == len(m2[0]):\n return True\n else:\n return False\n\nm1 = [[1, 2, 3], [4, 5, 6]]\nm2 = [[2, 3, 4], [5, 6, 7]]\nsao_multiplicaveis(m1, m2)\n\nm1 = [[1], [2], [3]]\nm2 = [[1, 2, 3]]\nsao_multiplicaveis(m1, m2)"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
groutr/conda-tools
|
Conda-tools.ipynb
|
bsd-3-clause
|
[
"from conda_tools import (cache, environment)\nfrom conda_tools import environment_utils as eu\nfrom conda_tools import cache_utils as cu\n\nimport os\nfrom os.path import join\nfrom itertools import groupby, chain\nfrom versio.version import Version\n\n# adjust root to be your Miniconda prefix\nroot = r\"C:\\Users\\Ryan\\Miniconda3\"\nroot_envs = join(root, 'envs')\nroot_pkgs = join(root, 'pkgs')\nprint(root_envs)\nprint(root_pkgs)",
"The two core components of the conda ecosystem are the package cache and the environment subfolders. These are abstracted with PackageInfo and Environment objects respectively.\nHere we create \"pools\" of PackageInfo and Environment objects. These objects permit easy, read-only access to various bits of metadata stored in the package cache and conda-meta/ subfolders in the environment. We want to reuse the objects as much as we can to minimize disk I/O. All the disk reads are currently cached with the objects, so the more objects you work with, the more RAM will be required.",
"# Create pkg_cache and environments\npkg_cache = cache.packages(root_pkgs)\nenvs = environment.environments(root_envs)\nprint(pkg_cache[:5])\nprint()\nprint(envs[:5])",
"Packages\nConda packages all have an info/ subdirectory for storing metadata about the package. PackageInfo provide convenient access to this metadata.",
"pi = pkg_cache[0]\npi.index # info/index.json\n\n# We can access fields of index.json directly from the object.\npi.name, pi.version, pi.build\n\n# Access to info/files\npi.files\n\n# The full spec of the package. This is always \"name-version-build\"\npi.full_spec\n\n# We can queries against the information we have on packages\n# For example, I want to find all MIT licensed packages in the cache\n{pi.full_spec: pi.license for pi in pkg_cache if pi.license == 'MIT'}",
"Environments",
"e = envs[2]\ne\n\n# We can discover the currently activated environment\n{e.path: e.activated() for e in envs}\n\n# We can see all the packages that claim to be linked into the environment, keyed by name\ne.linked_packages\n\n# linked packages are either hard-linked, symlinked, or copied into environments.\nset(chain(e.hard_linked, e.soft_linked, e.copy_linked)) ^ set(e.linked_packages.values())\n\n# The origin channel of each package\ne.package_channels\n\n# We also have access to the history of the environment.\n# The history object is an adaptation of conda's history parser.\n# (note: The interface to this may change in the future)\ne.history.object_log",
"Neat stuff\nConvenient access to the package cache and environment metadata allows you to do some neat stuff relatively easily.\nBelow are a few examples of some quick ideas that can be implemented with little effort.",
"# Calculate potential collisions in environments by packages claiming the same file paths\n# Very quick and naive way of detecting file path collisions.\nfor i, p1 in enumerate(pkg_cache):\n for p2 in pkg_cache[i+1:]:\n if p1.name == p2.name:\n continue\n x = p1.files.intersection(p2.files)\n if x:\n print(\"{} collides with {}\".format(p1, p2))\n print(\"\\tCollisions: \", x)\n\n# Cache Utils has some higher order, convenience functions\n\n# See what environments a package is linked into\n# Note that this is a O(n) operation where n is the sum of the installed packages in each environment you're checking.\n# If you're running this for the first time, it has to read all the metadata for each environment.\n# Also note, that this creates new package info objects and environment objects each run, so each run\n# prompts a full scan of both the package cache and all environments.\ncu.linked_environments((pkg_cache[0],), envs)\n\n# Find which environments the latest packages are linked to.\n# This example uses Versio to parse and compare PEP440 compliant version numbers\n# This will exclude packages like packages like jpeg and openssl\n\n# This loop simple creates Version objects so we can compare them later.\nVersions = {}\nfor x in pkg_cache:\n try:\n if x.name in Versions:\n Versions[x.name].append(Version(x.version))\n else:\n Versions[x.name] = [Version(x.version)]\n except:\n print(\"Skipping \", x.name, x.version)\n \n# sort the value lists and pick the latest versions\n#pversions = {k: str(list(sorted(v))[-1]) for k, v in Versions.items()}\n\n# sort the value lists and pick the older versions\npversions = {k: list(map(str, list(sorted(v))[:-1])) for k, v in Versions.items()}\n\n# The most up-to-date packages are linked to which environments?\n#latest_pkgs = [x for x in pkg_cache if x.name in pversions and x.version == pversions[x.name]]\n\n# Find the environments that older packages are linked to\nlatest_pkgs = [x for x in pkg_cache if x.name in pversions and x.version in set(pversions[x.name])]\n\n# Simply print the results nicely\n{str(k): list(map(str, v)) for k, v in cu.linked_environments(latest_pkgs, envs).items()}\n\n# All packages that are not linked to any environment\ncu.unlinked_packages(pkg_cache, envs)\n\n# Environment representation of root environment\ne = environment.Environment(join(root_envs, 'env2'))\n\n# Long running. Disk intensive.\nfilter_pyc = lambda f: filter(lambda x: not x.endswith('.pyc'), f)\n\n# List all files in an environment that are not hardlinked (and should be).\n# Note that *.pyc files are filtered out.\nnot_linked = {x: tuple(filter_pyc(y)) for x, y in eu.check_hardlinked_env(envs[0]).items()}\n\n# If you wish to see all the non-existant hardlinks, including *.pyc files, remove the filter_pyc function call\n# not_linked = {x: y for x, y in eu.check_hardlinked_env(envs[0]).items()}\n\nnot_linked\n\n# We can leverage the information in the environment's history to get packages \n# that were explicitly installed by the user.\neu.explicitly_installed(e)"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
hanezu/cs231n-assignment
|
assignment2/FullyConnectedNets.ipynb
|
mit
|
[
"Fully-Connected Neural Nets\nIn the previous homework you implemented a fully-connected two-layer neural network on CIFAR-10. The implementation was simple but not very modular since the loss and gradient were computed in a single monolithic function. This is manageable for a simple two-layer network, but would become impractical as we move to bigger models. Ideally we want to build networks using a more modular design so that we can implement different layer types in isolation and then snap them together into models with different architectures.\nIn this exercise we will implement fully-connected networks using a more modular approach. For each layer we will implement a forward and a backward function. The forward function will receive inputs, weights, and other parameters and will return both an output and a cache object storing data needed for the backward pass, like this:\n```python\ndef layer_forward(x, w):\n \"\"\" Receive inputs x and weights w \"\"\"\n # Do some computations ...\n z = # ... some intermediate value\n # Do some more computations ...\n out = # the output\ncache = (x, w, z, out) # Values we need to compute gradients\nreturn out, cache\n```\nThe backward pass will receive upstream derivatives and the cache object, and will return gradients with respect to the inputs and weights, like this:\n```python\ndef layer_backward(dout, cache):\n \"\"\"\n Receive derivative of loss with respect to outputs and cache,\n and compute derivative with respect to inputs.\n \"\"\"\n # Unpack cache values\n x, w, z, out = cache\n# Use values in cache to compute derivatives\n dx = # Derivative of loss with respect to x\n dw = # Derivative of loss with respect to w\nreturn dx, dw\n```\nAfter implementing a bunch of layers this way, we will be able to easily combine them to build classifiers with different architectures.\nIn addition to implementing fully-connected networks of arbitrary depth, we will also explore different update rules for optimization, and introduce Dropout as a regularizer and Batch Normalization as a tool to more efficiently optimize deep networks.",
"# As usual, a bit of setup\n\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom cs231n.classifiers.fc_net import *\nfrom cs231n.data_utils import get_CIFAR10_data\nfrom cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array\nfrom cs231n.solver import Solver\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n# for auto-reloading external modules\n# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n%load_ext autoreload\n%autoreload 2\n\ndef rel_error(x, y):\n \"\"\" returns relative error \"\"\"\n return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))\n\n# Load the (preprocessed) CIFAR10 data.\n\ndata = get_CIFAR10_data()\nfor k, v in data.iteritems():\n print '%s: ' % k, v.shape",
"Affine layer: foward\nOpen the file cs231n/layers.py and implement the affine_forward function.\nOnce you are done you can test your implementaion by running the following:",
"# Test the affine_forward function\n\nnum_inputs = 2\ninput_shape = (4, 5, 6)\noutput_dim = 3\n\ninput_size = num_inputs * np.prod(input_shape)\nweight_size = output_dim * np.prod(input_shape)\n\nx = np.linspace(-0.1, 0.5, num=input_size).reshape(num_inputs, *input_shape)\nw = np.linspace(-0.2, 0.3, num=weight_size).reshape(np.prod(input_shape), output_dim)\nb = np.linspace(-0.3, 0.1, num=output_dim)\n\nout, _ = affine_forward(x, w, b)\ncorrect_out = np.array([[ 1.49834967, 1.70660132, 1.91485297],\n [ 3.25553199, 3.5141327, 3.77273342]])\n\n# Compare your output with ours. The error should be around 1e-9.\nprint 'Testing affine_forward function:'\nprint 'difference: ', rel_error(out, correct_out)",
"Affine layer: backward\nNow implement the affine_backward function and test your implementation using numeric gradient checking.",
"# Test the affine_backward function\n\nx = np.random.randn(10, 2, 3)\nw = np.random.randn(6, 5)\nb = np.random.randn(5)\ndout = np.random.randn(10, 5)\n\ndx_num = eval_numerical_gradient_array(lambda x: affine_forward(x, w, b)[0], x, dout)\ndw_num = eval_numerical_gradient_array(lambda w: affine_forward(x, w, b)[0], w, dout)\ndb_num = eval_numerical_gradient_array(lambda b: affine_forward(x, w, b)[0], b, dout)\n\n_, cache = affine_forward(x, w, b)\ndx, dw, db = affine_backward(dout, cache)\n\n# The error should be around 1e-10\nprint 'Testing affine_backward function:'\nprint 'dx error: ', rel_error(dx_num, dx)\nprint 'dw error: ', rel_error(dw_num, dw)\nprint 'db error: ', rel_error(db_num, db)",
"ReLU layer: forward\nImplement the forward pass for the ReLU activation function in the relu_forward function and test your implementation using the following:",
"# Test the relu_forward function\n\nx = np.linspace(-0.5, 0.5, num=12).reshape(3, 4)\n\nout, _ = relu_forward(x)\ncorrect_out = np.array([[ 0., 0., 0., 0., ],\n [ 0., 0., 0.04545455, 0.13636364,],\n [ 0.22727273, 0.31818182, 0.40909091, 0.5, ]])\n\n# Compare your output with ours. The error should be around 1e-8\nprint 'Testing relu_forward function:'\nprint 'difference: ', rel_error(out, correct_out)",
"ReLU layer: backward\nNow implement the backward pass for the ReLU activation function in the relu_backward function and test your implementation using numeric gradient checking:",
"x = np.random.randn(10, 10)\ndout = np.random.randn(*x.shape)\n\ndx_num = eval_numerical_gradient_array(lambda x: relu_forward(x)[0], x, dout)\n\n_, cache = relu_forward(x)\ndx = relu_backward(dout, cache)\n\n# The error should be around 1e-12\nprint 'Testing relu_backward function:'\nprint 'dx error: ', rel_error(dx_num, dx)",
"\"Sandwich\" layers\nThere are some common patterns of layers that are frequently used in neural nets. For example, affine layers are frequently followed by a ReLU nonlinearity. To make these common patterns easy, we define several convenience layers in the file cs231n/layer_utils.py.\nFor now take a look at the affine_relu_forward and affine_relu_backward functions, and run the following to numerically gradient check the backward pass:",
"from cs231n.layer_utils import affine_relu_forward, affine_relu_backward\n\nx = np.random.randn(2, 3, 4)\nw = np.random.randn(12, 10)\nb = np.random.randn(10)\ndout = np.random.randn(2, 10)\n\nout, cache = affine_relu_forward(x, w, b)\ndx, dw, db = affine_relu_backward(dout, cache)\n\ndx_num = eval_numerical_gradient_array(lambda x: affine_relu_forward(x, w, b)[0], x, dout)\ndw_num = eval_numerical_gradient_array(lambda w: affine_relu_forward(x, w, b)[0], w, dout)\ndb_num = eval_numerical_gradient_array(lambda b: affine_relu_forward(x, w, b)[0], b, dout)\n\nprint 'Testing affine_relu_forward:'\nprint 'dx error: ', rel_error(dx_num, dx)\nprint 'dw error: ', rel_error(dw_num, dw)\nprint 'db error: ', rel_error(db_num, db)",
"Loss layers: Softmax and SVM\nYou implemented these loss functions in the last assignment, so we'll give them to you for free here. You should still make sure you understand how they work by looking at the implementations in cs231n/layers.py.\nYou can make sure that the implementations are correct by running the following:",
"num_classes, num_inputs = 10, 50\nx = 0.001 * np.random.randn(num_inputs, num_classes)\ny = np.random.randint(num_classes, size=num_inputs)\n\n# [0] after svm_loss: indicate to get only the return of 0\ndx_num = eval_numerical_gradient(lambda x: svm_loss(x, y)[0], x, verbose=False)\nloss, dx = svm_loss(x, y)\n\n# Test svm_loss function. Loss should be around 9 and dx error should be 1e-9\nprint 'Testing svm_loss:'\nprint 'loss: ', loss\nprint 'dx error: ', rel_error(dx_num, dx)\n\ndx_num = eval_numerical_gradient(lambda x: softmax_loss(x, y)[0], x, verbose=False)\nloss, dx = softmax_loss(x, y)\n\n# Test softmax_loss function. Loss should be 2.3 and dx error should be 1e-8\nprint '\\nTesting softmax_loss:'\nprint 'loss: ', loss\nprint 'dx error: ', rel_error(dx_num, dx)",
"Two-layer network\nIn the previous assignment you implemented a two-layer neural network in a single monolithic class. Now that you have implemented modular versions of the necessary layers, you will reimplement the two layer network using these modular implementations.\nOpen the file cs231n/classifiers/fc_net.py and complete the implementation of the TwoLayerNet class. This class will serve as a model for the other networks you will implement in this assignment, so read through it to make sure you understand the API. You can run the cell below to test your implementation.",
"N, D, H, C = 3, 5, 50, 7\nX = np.random.randn(N, D)\ny = np.random.randint(C, size=N)\n\nstd = 1e-2\nmodel = TwoLayerNet(input_dim=D, hidden_dim=H, num_classes=C, weight_scale=std)\n\nprint 'Testing initialization ... '\nW1_std = abs(model.params['W1'].std() - std)\nb1 = model.params['b1']\nW2_std = abs(model.params['W2'].std() - std)\nb2 = model.params['b2']\nassert W1_std < std / 10, 'First layer weights do not seem right'\nassert np.all(b1 == 0), 'First layer biases do not seem right'\nassert W2_std < std / 10, 'Second layer weights do not seem right'\nassert np.all(b2 == 0), 'Second layer biases do not seem right'\n\nprint 'Testing test-time forward pass ... '\nmodel.params['W1'] = np.linspace(-0.7, 0.3, num=D*H).reshape(D, H)\nmodel.params['b1'] = np.linspace(-0.1, 0.9, num=H)\nmodel.params['W2'] = np.linspace(-0.3, 0.4, num=H*C).reshape(H, C)\nmodel.params['b2'] = np.linspace(-0.9, 0.1, num=C)\nX = np.linspace(-5.5, 4.5, num=N*D).reshape(D, N).T\nscores = model.loss(X)\ncorrect_scores = np.asarray(\n [[11.53165108, 12.2917344, 13.05181771, 13.81190102, 14.57198434, 15.33206765, 16.09215096],\n [12.05769098, 12.74614105, 13.43459113, 14.1230412, 14.81149128, 15.49994135, 16.18839143],\n [12.58373087, 13.20054771, 13.81736455, 14.43418138, 15.05099822, 15.66781506, 16.2846319 ]])\nscores_diff = np.abs(scores - correct_scores).sum()\nassert scores_diff < 1e-6, 'Problem with test-time forward pass'\n\nprint 'Testing training loss (no regularization)'\ny = np.asarray([0, 5, 1])\nloss, grads = model.loss(X, y)\ncorrect_loss = 3.4702243556\nassert abs(loss - correct_loss) < 1e-10, 'Problem with training-time loss'\n\nmodel.reg = 1.0\nloss, grads = model.loss(X, y)\ncorrect_loss = 26.5948426952\nassert abs(loss - correct_loss) < 1e-10, 'Problem with regularization loss'\n\nfor reg in [0.0, 0.7]:\n print 'Running numeric gradient check with reg = ', reg\n model.reg = reg\n loss, grads = model.loss(X, y)\n\n for name in sorted(grads):\n f = lambda _: model.loss(X, y)[0]\n grad_num = eval_numerical_gradient(f, model.params[name], verbose=False)\n print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name]))",
"Solver\nIn the previous assignment, the logic for training models was coupled to the models themselves. Following a more modular design, for this assignment we have split the logic for training models into a separate class.\nOpen the file cs231n/solver.py and read through it to familiarize yourself with the API. After doing so, use a Solver instance to train a TwoLayerNet that achieves at least 50% accuracy on the validation set.",
"model = TwoLayerNet()\nsolver = None\n\n##############################################################################\n# TODO: Use a Solver instance to train a TwoLayerNet that achieves at least #\n# 50% accuracy on the validation set. #\n##############################################################################\npass\ndata = {\n 'X_train': data['X_train'],\n 'y_train': data['y_train'],\n 'X_val': data['X_val'],\n 'y_val': data['y_val']\n}\nmodel = TwoLayerNet(hidden_dim=100, reg=1e-1) # reg is important!\nsolver = Solver(model, data,\n update_rule='sgd',\n optim_config={\n 'learning_rate': 1e-3,\n },\n # lr_decay is important\n lr_decay=0.8,\n num_epochs=10, batch_size=100,\n verbose=True,\n print_every=100)\nsolver.train()\n##############################################################################\n# END OF YOUR CODE #\n##############################################################################\n\n# Run this cell to visualize training loss and train / val accuracy\n\nplt.subplot(2, 1, 1)\nplt.title('Training loss')\nplt.plot(solver.loss_history, 'o')\nplt.xlabel('Iteration')\n\nplt.subplot(2, 1, 2)\nplt.title('Accuracy')\nplt.plot(solver.train_acc_history, '-o', label='train')\nplt.plot(solver.val_acc_history, '-o', label='val')\nplt.plot([0.5] * len(solver.val_acc_history), 'k--')\nplt.xlabel('Epoch')\nplt.legend(loc='lower right')\nplt.gcf().set_size_inches(15, 12)\nplt.show()",
"Multilayer network\nNext you will implement a fully-connected network with an arbitrary number of hidden layers.\nRead through the FullyConnectedNet class in the file cs231n/classifiers/fc_net.py.\nImplement the initialization, the forward pass, and the backward pass. For the moment don't worry about implementing dropout or batch normalization; we will add those features soon.\nInitial loss and gradient check\nAs a sanity check, run the following to check the initial loss and to gradient check the network both with and without regularization. Do the initial losses seem reasonable?\nFor gradient checking, you should expect to see errors around 1e-6 or less.",
"N, D, H1, H2, C = 2, 15, 20, 30, 10\nX = np.random.randn(N, D)\ny = np.random.randint(C, size=(N,))\n\nfor reg in [0, 3.14]:\n print 'Running check with reg = ', reg\n model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,\n reg=reg, weight_scale=5e-2, dtype=np.float64)\n\n loss, grads = model.loss(X, y)\n print 'Initial loss: ', loss\n\n for name in sorted(grads):\n f = lambda _: model.loss(X, y)[0]\n grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)\n print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name]))",
"As another sanity check, make sure you can overfit a small dataset of 50 images. First we will try a three-layer network with 100 units in each hidden layer. You will need to tweak the learning rate and initialization scale, but you should be able to overfit and achieve 100% training accuracy within 20 epochs.",
"# TODO: Use a three-layer Net to overfit 50 training examples.\n\nnum_train = 50\nsmall_data = {\n 'X_train': data['X_train'][:num_train],\n 'y_train': data['y_train'][:num_train],\n 'X_val': data['X_val'],\n 'y_val': data['y_val'],\n}\n\nweight_scale = 1e-2\nlearning_rate = 1e-2\nmodel = FullyConnectedNet([100, 100],\n weight_scale=weight_scale, dtype=np.float64)\nsolver = Solver(model, small_data,\n print_every=10, num_epochs=20, batch_size=25,\n update_rule='sgd',\n optim_config={\n 'learning_rate': learning_rate,\n }\n )\nsolver.train()\n\nplt.plot(solver.loss_history, 'o')\nplt.title('Training loss history')\nplt.xlabel('Iteration')\nplt.ylabel('Training loss')\nplt.show()",
"Now try to use a five-layer network with 100 units on each layer to overfit 50 training examples. Again you will have to adjust the learning rate and weight initialization, but you should be able to achieve 100% training accuracy within 20 epochs.",
"# TODO: Use a five-layer Net to overfit 50 training examples.\n\nnum_train = 50\nsmall_data = {\n 'X_train': data['X_train'][:num_train],\n 'y_train': data['y_train'][:num_train],\n 'X_val': data['X_val'],\n 'y_val': data['y_val'],\n}\n\nlearning_rate = 3e-4\nweight_scale = 1e-1\nmodel = FullyConnectedNet([100, 100, 100, 100],\n weight_scale=weight_scale, dtype=np.float64)\nsolver = Solver(model, small_data,\n print_every=10, num_epochs=100, batch_size=25,\n update_rule='sgd',\n optim_config={\n 'learning_rate': learning_rate,\n }\n )\nsolver.train()\n\nplt.plot(solver.loss_history, 'o')\nplt.title('Training loss history')\nplt.xlabel('Iteration')\nplt.ylabel('Training loss')\nplt.show()",
"Inline question:\nDid you notice anything about the comparative difficulty of training the three-layer net vs training the five layer net?\nAnswer:\nThe three-layer net is much more easy to train and I just don't know how to train five layer one...\nThe loss do not go down whatever learning rate & weight_scale are.\nI looked up other's parameter and plugged it back here, it seems that a too small weight_scale seemed to cause the not-decreasing loss problem.\nProbably using the correct weight init method is vital for my failure here. I should try \npython\nw = np.random.randn(n) * sqrt(2.0/n)\nUpdate rules\nSo far we have used vanilla stochastic gradient descent (SGD) as our update rule. More sophisticated update rules can make it easier to train deep networks. We will implement a few of the most commonly used update rules and compare them to vanilla SGD.\nSGD+Momentum\nStochastic gradient descent with momentum is a widely used update rule that tends to make deep networks converge faster than vanilla stochstic gradient descent.\nOpen the file cs231n/optim.py and read the documentation at the top of the file to make sure you understand the API. Implement the SGD+momentum update rule in the function sgd_momentum and run the following to check your implementation. You should see errors less than 1e-8.",
"from cs231n.optim import sgd_momentum\n\nN, D = 4, 5\nw = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)\ndw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)\nv = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)\n\nconfig = {'learning_rate': 1e-3, 'velocity': v}\nnext_w, _ = sgd_momentum(w, dw, config=config)\n\nexpected_next_w = np.asarray([\n [ 0.1406, 0.20738947, 0.27417895, 0.34096842, 0.40775789],\n [ 0.47454737, 0.54133684, 0.60812632, 0.67491579, 0.74170526],\n [ 0.80849474, 0.87528421, 0.94207368, 1.00886316, 1.07565263],\n [ 1.14244211, 1.20923158, 1.27602105, 1.34281053, 1.4096 ]])\nexpected_velocity = np.asarray([\n [ 0.5406, 0.55475789, 0.56891579, 0.58307368, 0.59723158],\n [ 0.61138947, 0.62554737, 0.63970526, 0.65386316, 0.66802105],\n [ 0.68217895, 0.69633684, 0.71049474, 0.72465263, 0.73881053],\n [ 0.75296842, 0.76712632, 0.78128421, 0.79544211, 0.8096 ]])\n\nprint 'next_w error: ', rel_error(next_w, expected_next_w)\nprint 'velocity error: ', rel_error(expected_velocity, config['velocity'])",
"Once you have done so, run the following to train a six-layer network with both SGD and SGD+momentum. You should see the SGD+momentum update rule converge faster.",
"num_train = 4000\nsmall_data = {\n 'X_train': data['X_train'][:num_train],\n 'y_train': data['y_train'][:num_train],\n 'X_val': data['X_val'],\n 'y_val': data['y_val'],\n}\n\nsolvers = {}\n\nfor update_rule in ['sgd', 'sgd_momentum']:\n print 'running with ', update_rule\n model = FullyConnectedNet([100, 100, 100, 100, 100], weight_scale=5e-2)\n\n solver = Solver(model, small_data,\n num_epochs=5, batch_size=100,\n update_rule=update_rule,\n optim_config={\n 'learning_rate': 1e-2,\n },\n verbose=True)\n solvers[update_rule] = solver\n solver.train()\n print\n\nplt.subplot(3, 1, 1)\nplt.title('Training loss')\nplt.xlabel('Iteration')\n\nplt.subplot(3, 1, 2)\nplt.title('Training accuracy')\nplt.xlabel('Epoch')\n\nplt.subplot(3, 1, 3)\nplt.title('Validation accuracy')\nplt.xlabel('Epoch')\n\nfor update_rule, solver in solvers.iteritems():\n plt.subplot(3, 1, 1)\n plt.plot(solver.loss_history, 'o', label=update_rule)\n \n plt.subplot(3, 1, 2)\n plt.plot(solver.train_acc_history, '-o', label=update_rule)\n\n plt.subplot(3, 1, 3)\n plt.plot(solver.val_acc_history, '-o', label=update_rule)\n \nfor i in [1, 2, 3]:\n plt.subplot(3, 1, i)\n plt.legend(loc='upper center', ncol=4)\nplt.gcf().set_size_inches(15, 15)\nplt.show()",
"RMSProp and Adam\nRMSProp [1] and Adam [2] are update rules that set per-parameter learning rates by using a running average of the second moments of gradients.\nIn the file cs231n/optim.py, implement the RMSProp update rule in the rmsprop function and implement the Adam update rule in the adam function, and check your implementations using the tests below.\n[1] Tijmen Tieleman and Geoffrey Hinton. \"Lecture 6.5-rmsprop: Divide the gradient by a running average of its recent magnitude.\" COURSERA: Neural Networks for Machine Learning 4 (2012).\n[2] Diederik Kingma and Jimmy Ba, \"Adam: A Method for Stochastic Optimization\", ICLR 2015.",
"# Test RMSProp implementation; you should see errors less than 1e-7\nfrom cs231n.optim import rmsprop\n\nN, D = 4, 5\nw = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)\ndw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)\ncache = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)\n\nconfig = {'learning_rate': 1e-2, 'cache': cache}\nnext_w, _ = rmsprop(w, dw, config=config)\n\nexpected_next_w = np.asarray([\n [-0.39223849, -0.34037513, -0.28849239, -0.23659121, -0.18467247],\n [-0.132737, -0.08078555, -0.02881884, 0.02316247, 0.07515774],\n [ 0.12716641, 0.17918792, 0.23122175, 0.28326742, 0.33532447],\n [ 0.38739248, 0.43947102, 0.49155973, 0.54365823, 0.59576619]])\nexpected_cache = np.asarray([\n [ 0.5976, 0.6126277, 0.6277108, 0.64284931, 0.65804321],\n [ 0.67329252, 0.68859723, 0.70395734, 0.71937285, 0.73484377],\n [ 0.75037008, 0.7659518, 0.78158892, 0.79728144, 0.81302936],\n [ 0.82883269, 0.84469141, 0.86060554, 0.87657507, 0.8926 ]])\n\nprint 'next_w error: ', rel_error(expected_next_w, next_w)\nprint 'cache error: ', rel_error(expected_cache, config['cache'])\n\n# Test Adam implementation; you should see errors around 1e-7 or less\nfrom cs231n.optim import adam\n\nN, D = 4, 5\nw = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)\ndw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)\nm = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)\nv = np.linspace(0.7, 0.5, num=N*D).reshape(N, D)\n\nconfig = {'learning_rate': 1e-2, 'm': m, 'v': v, 't': 5}\nnext_w, _ = adam(w, dw, config=config)\n\nexpected_next_w = np.asarray([\n [-0.40094747, -0.34836187, -0.29577703, -0.24319299, -0.19060977],\n [-0.1380274, -0.08544591, -0.03286534, 0.01971428, 0.0722929],\n [ 0.1248705, 0.17744702, 0.23002243, 0.28259667, 0.33516969],\n [ 0.38774145, 0.44031188, 0.49288093, 0.54544852, 0.59801459]])\nexpected_v = np.asarray([\n [ 0.69966, 0.68908382, 0.67851319, 0.66794809, 0.65738853,],\n [ 0.64683452, 0.63628604, 0.6257431, 0.61520571, 0.60467385,],\n [ 0.59414753, 0.58362676, 0.57311152, 0.56260183, 0.55209767,],\n [ 0.54159906, 0.53110598, 0.52061845, 0.51013645, 0.49966, ]])\nexpected_m = np.asarray([\n [ 0.48, 0.49947368, 0.51894737, 0.53842105, 0.55789474],\n [ 0.57736842, 0.59684211, 0.61631579, 0.63578947, 0.65526316],\n [ 0.67473684, 0.69421053, 0.71368421, 0.73315789, 0.75263158],\n [ 0.77210526, 0.79157895, 0.81105263, 0.83052632, 0.85 ]])\n\nprint 'next_w error: ', rel_error(expected_next_w, next_w)\nprint 'v error: ', rel_error(expected_v, config['v'])\nprint 'm error: ', rel_error(expected_m, config['m'])",
"Once you have debugged your RMSProp and Adam implementations, run the following to train a pair of deep networks using these new update rules:",
"learning_rates = {'rmsprop': 1e-4, 'adam': 1e-3}\nfor update_rule in ['adam', 'rmsprop']:\n print 'running with ', update_rule\n model = FullyConnectedNet([100, 100, 100, 100, 100], weight_scale=5e-2)\n\n solver = Solver(model, small_data,\n num_epochs=5, batch_size=100,\n update_rule=update_rule,\n optim_config={\n 'learning_rate': learning_rates[update_rule]\n },\n verbose=True)\n solvers[update_rule] = solver\n solver.train()\n print\n\nplt.subplot(3, 1, 1)\nplt.title('Training loss')\nplt.xlabel('Iteration')\n\nplt.subplot(3, 1, 2)\nplt.title('Training accuracy')\nplt.xlabel('Epoch')\n\nplt.subplot(3, 1, 3)\nplt.title('Validation accuracy')\nplt.xlabel('Epoch')\n\nfor update_rule, solver in solvers.iteritems():\n plt.subplot(3, 1, 1)\n plt.plot(solver.loss_history, 'o', label=update_rule)\n \n plt.subplot(3, 1, 2)\n plt.plot(solver.train_acc_history, '-o', label=update_rule)\n\n plt.subplot(3, 1, 3)\n plt.plot(solver.val_acc_history, '-o', label=update_rule)\n \nfor i in [1, 2, 3]:\n plt.subplot(3, 1, i)\n plt.legend(loc='upper center', ncol=4)\nplt.gcf().set_size_inches(15, 15)\nplt.show()",
"Train a good model!\nTrain the best fully-connected model that you can on CIFAR-10, storing your best model in the best_model variable. We require you to get at least 50% accuracy on the validation set using a fully-connected net.\nIf you are careful it should be possible to get accuracies above 55%, but we don't require it for this part and won't assign extra credit for doing so. Later in the assignment we will ask you to train the best convolutional network that you can on CIFAR-10, and we would prefer that you spend your effort working on convolutional nets rather than fully-connected nets.\nYou might find it useful to complete the BatchNormalization.ipynb and Dropout.ipynb notebooks before completing this part, since those techniques can help you train powerful models.",
"best_model = None\n################################################################################\n# TODO: Train the best FullyConnectedNet that you can on CIFAR-10. You might #\n# batch normalization and dropout useful. Store your best model in the #\n# best_model variable. #\n################################################################################\npass\nhidden_dims = [600, 500, 400, 300, 200, 100]\n\ndata = {\n 'X_train': data['X_train'],\n 'y_train': data['y_train'],\n 'X_val': data['X_val'],\n 'y_val': data['y_val']\n}\n\nweight_scale = 2.5e-2\nmodel = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, dtype=np.float64, use_batchnorm=True, dropout=0.25, reg=1e-2)\n\nsolver = Solver(model, data,\n num_epochs=30, batch_size=100,\n update_rule='adam',\n optim_config={\n 'learning_rate': 3.1e-4,\n },\n lr_decay=0.9,\n verbose=True, print_every=500)\nsolver.train()\n\nscores = model.loss(data['X_test'])\ny_pred = np.argmax(scores, axis = 1)\nacc = np.mean(y_pred == data['y_test'])\nprint 'test acc: %f' %(acc)\nbest_model = model\n\nplt.subplot(2, 1, 1)\nplt.plot(solver.loss_history)\nplt.title('Loss history')\nplt.xlabel('Iteration')\nplt.ylabel('Loss')\n\nplt.subplot(2, 1, 2)\nplt.plot(solver.train_acc_history, label='train')\nplt.plot(solver.val_acc_history, label='val')\nplt.title('Classification accuracy history')\nplt.xlabel('Epoch')\nplt.ylabel('Clasification accuracy')\nplt.show() \n################################################################################\n# END OF YOUR CODE #\n################################################################################",
"Test you model\nRun your best model on the validation and test sets. You should achieve above 50% accuracy on the validation set.",
"y_test_pred = np.argmax(best_model.loss(X_test), axis=1)\ny_val_pred = np.argmax(best_model.loss(X_val), axis=1)\nprint 'Validation set accuracy: ', (y_val_pred == y_val).mean()\nprint 'Test set accuracy: ', (y_test_pred == y_test).mean()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
borja876/Thinkful-DataScience-Borja
|
DRILL+Mo%27+blobs%2C+mo%27+problems.ipynb
|
mit
|
[
"import numpy as np\nimport pandas as pd\nimport scipy\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.cluster import MeanShift, estimate_bandwidth\nfrom sklearn.cluster import SpectralClustering\nfrom sklearn.cluster import AffinityPropagation\nfrom sklearn import metrics\nfrom itertools import cycle\nfrom sklearn.cluster import KMeans\nfrom sklearn.preprocessing import normalize\nfrom sklearn.decomposition import PCA\nfrom sklearn.cluster import MiniBatchKMeans",
"Blobs 1",
"# Create blobs\n# The coordinates of the centers of our blobs.\ncenters = [[0, 0], [-10, -10], [10, 10], [5,-5], [-5,5]]\n\n# Make 10,000 rows worth of data with two features representing three\n# clusters, each having a standard deviation of 1.\nX, y = make_blobs(\n n_samples=10000,\n centers=centers,\n cluster_std=1,\n n_features=2,\n random_state=42)\n\nplt.scatter(X[:, 0], X[:, 1], c=y)\nplt.show()\n\n#Divide into training and test sets.\nX_train, X_test, y_train, y_test = train_test_split(\n X,\n y,\n test_size=0.9,\n random_state=42)",
"K-Means",
"# Normalize the data.\nX_norm = normalize(X_train)\n\n# Reduce it to two components.\nX_pca = PCA(2).fit_transform(X_train)\n\n# Calculate predicted values.\ny_pred = KMeans(n_clusters=4, random_state=42).fit_predict(X_pca)\n\n# Plot the solution.\nplt.scatter(X_pca[:, 0], X_pca[:, 1], c=y_pred)\nplt.show()\n\n# Check the solution against the data.\nprint('Comparing k-means clusters against the data:')\nprint(pd.crosstab(y_pred, y_train))",
"Mean Shift Clustering",
"# Here we set the bandwidth. This function automatically derives a bandwidth\n# number based on an inspection of the distances among points in the data.\nbandwidth = estimate_bandwidth(X_train, quantile=0.2, n_samples=500)\n\n# Declare and fit the model.\nms = MeanShift(bandwidth=bandwidth, bin_seeding=True)\nms.fit(X_train)\n\n# Extract cluster assignments for each data point.\nlabels = ms.labels_\n\n# Coordinates of the cluster centers.\ncluster_centers = ms.cluster_centers_\n\n# Count our clusters.\nn_clusters_ = len(np.unique(labels))\n\nprint(\"Number of estimated clusters: {}\".format(n_clusters_))\n\n\nplt.scatter(X_train[:, 0], X_train[:, 1], c=labels)\nplt.show()\n\nprint('Comparing the assigned categories to the ones in the data:')\nprint(pd.crosstab(y_train,labels))",
"Spectral Clustering",
"# We know we're looking for three clusters.\nn_clusters=4\n\n# Declare and fit the model.\nsc = SpectralClustering(n_clusters=n_clusters)\nsc.fit(X_train)\n\n#Predicted clusters.\npredict=sc.fit_predict(X_train)\n\n#Graph results.\nplt.scatter(X_train[:, 0], X_train[:, 1], c=predict)\nplt.show()\n\nprint('Comparing the assigned categories to the ones in the data:')\nprint(pd.crosstab(y_train,predict))",
"Affinity Propagation",
"# Declare the model and fit it in one statement.\n# Note that you can provide arguments to the model, but we didn't.\naf = AffinityPropagation().fit(X_train)\nprint('Done')\n\n# Pull the number of clusters and cluster assignments for each data point.\ncluster_centers_indices = af.cluster_centers_indices_\nn_clusters_ = len(cluster_centers_indices)\nlabels = af.labels_\n\nprint('Estimated number of clusters: {}'.format(n_clusters_))\n\n\n# Plot the clusters\nplt.figure(1)\nplt.clf()\n\n# Cycle through each cluster and graph them with a center point for the\n# exemplar and lines from the exemplar to each data point in the cluster.\ncolors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')\nfor k, col in zip(range(n_clusters_), colors):\n class_members = labels == k\n cluster_center = X_train[cluster_centers_indices[k]]\n plt.plot(X_train[class_members, 0], X_train[class_members, 1], col + '.')\n plt.plot(cluster_center[0],\n cluster_center[1],\n 'o',\n markerfacecolor=col,\n markeredgecolor='k')\n for x in X_train[class_members]:\n plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)\n\nplt.title('Estimated number of clusters: {}'.format(n_clusters_))\nplt.show()",
"Blobs 2",
"# Create blobs\n# The coordinates of the centers of our blobs.\ncenters = [[0, 0], [-10, 0], [10, 0]]\n\n# Make 10,000 rows worth of data with two features representing three\n# clusters, each having a standard deviation of 1.\nX, y = make_blobs(\n n_samples=10000,\n centers=centers,\n cluster_std=1.5,\n n_features=2,\n random_state=123)\n\nplt.scatter(X[:, 0], X[:, 1], c=y)\nplt.show()\n\n#Divide into training and test sets.\nX_train, X_test, y_train, y_test = train_test_split(\n X,\n y,\n test_size=0.9,\n random_state=42)",
"K-Means",
"# Normalize the data.\n#X_norm = normalize(X_train)\n\n# Reduce it to two components.\nX_pca = PCA(2).fit_transform(X_train)\n\n# Calculate predicted values.\ny_pred = KMeans(n_clusters=3, random_state=42).fit_predict(X_pca)\n\n# Plot the solution.\nplt.scatter(X_pca[:, 0], X_pca[:, 1], c=y_pred)\nplt.show()\n\n# Check the solution against the data.\nprint('Comparing k-means clusters against the data:')\nprint(pd.crosstab(y_pred, y_train))",
"Mean Shift Clustering",
"# Here we set the bandwidth. This function automatically derives a bandwidth\n# number based on an inspection of the distances among points in the data.\nbandwidth = estimate_bandwidth(X_train, quantile=0.2, n_samples=500)\n\n# Declare and fit the model.\nms = MeanShift(bandwidth=bandwidth, bin_seeding=True)\nms.fit(X_train)\n\n# Extract cluster assignments for each data point.\nlabels = ms.labels_\n\n# Coordinates of the cluster centers.\ncluster_centers = ms.cluster_centers_\n\n# Count our clusters.\nn_clusters_ = len(np.unique(labels))\n\nprint(\"Number of estimated clusters: {}\".format(n_clusters_))\n\n\nplt.scatter(X_train[:, 0], X_train[:, 1], c=labels)\nplt.show()\n\nprint('Comparing the assigned categories to the ones in the data:')\nprint(pd.crosstab(y_train,labels))",
"Spectral Clustering",
"# We know we're looking for three clusters.\nn_clusters=3\n\n# Declare and fit the model.\nsc = SpectralClustering(n_clusters=n_clusters)\nsc.fit(X_train)\n\n#Predicted clusters.\npredict=sc.fit_predict(X_train)\n\n#Graph results.\nplt.scatter(X_train[:, 0], X_train[:, 1], c=predict)\nplt.show()\n\nprint('Comparing the assigned categories to the ones in the data:')\nprint(pd.crosstab(y_train,predict))",
"Affinity Propagation",
"# Declare the model and fit it in one statement.\n# Note that you can provide arguments to the model, but we didn't.\naf = AffinityPropagation().fit(X_train)\nprint('Done')\n\n# Pull the number of clusters and cluster assignments for each data point.\ncluster_centers_indices = af.cluster_centers_indices_\nn_clusters_ = len(cluster_centers_indices)\nlabels = af.labels_\n\nprint('Estimated number of clusters: {}'.format(n_clusters_))\n\n\n# Plot the clusters\nplt.figure(1)\nplt.clf()\n\n# Cycle through each cluster and graph them with a center point for the\n# exemplar and lines from the exemplar to each data point in the cluster.\ncolors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')\nfor k, col in zip(range(n_clusters_), colors):\n class_members = labels == k\n cluster_center = X_train[cluster_centers_indices[k]]\n plt.plot(X_train[class_members, 0], X_train[class_members, 1], col + '.')\n plt.plot(cluster_center[0],\n cluster_center[1],\n 'o',\n markerfacecolor=col,\n markeredgecolor='k')\n for x in X_train[class_members]:\n plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)\n\nplt.title('Estimated number of clusters: {}'.format(n_clusters_))\nplt.show()",
"Blobs 3",
"# Create blobs\n# The coordinates of the centers of our blobs.\ncenters = [[0, 0], [-3, 3], [5, 5]]\n\n# Make 10,000 rows worth of data with two features representing three\n# clusters, each having a standard deviation of 1.\nX, y = make_blobs(\n n_samples=10000,\n centers=centers,\n cluster_std=1,\n n_features=2,\n random_state=123)\n\nplt.scatter(X[:, 0], X[:, 1], c=y)\nplt.show()\n\n#Divide into training and test sets.\nX_train, X_test, y_train, y_test = train_test_split(\n X,\n y,\n test_size=0.9,\n random_state=42)",
"K-Means",
"# Normalize the data.\n#X_norm = normalize(X_train)\n\n# Reduce it to two components.\nX_pca = PCA(2).fit_transform(X_train)\n\n# Calculate predicted values.\ny_pred = KMeans(n_clusters=3, random_state=42).fit_predict(X_pca)\n\n# Plot the solution.\nplt.scatter(X_pca[:, 0], X_pca[:, 1], c=y_pred)\nplt.show()\n\n# Check the solution against the data.\nprint('Comparing k-means clusters against the data:')\nprint(pd.crosstab(y_pred, y_train))",
"Mean Shift Clustering",
"# Here we set the bandwidth. This function automatically derives a bandwidth\n# number based on an inspection of the distances among points in the data.\nbandwidth = estimate_bandwidth(X_train, quantile=0.2, n_samples=500)\n\n# Declare and fit the model.\nms = MeanShift(bandwidth=bandwidth, bin_seeding=True)\nms.fit(X_train)\n\n# Extract cluster assignments for each data point.\nlabels = ms.labels_\n\n# Coordinates of the cluster centers.\ncluster_centers = ms.cluster_centers_\n\n# Count our clusters.\nn_clusters_ = len(np.unique(labels))\n\nprint(\"Number of estimated clusters: {}\".format(n_clusters_))\n\n\nplt.scatter(X_train[:, 0], X_train[:, 1], c=labels)\nplt.show()\n\nprint('Comparing the assigned categories to the ones in the data:')\nprint(pd.crosstab(y_train,labels))",
"Spectral Clustering",
"# We know we're looking for three clusters.\nn_clusters=3\n\n# Declare and fit the model.\nsc = SpectralClustering(n_clusters=n_clusters)\nsc.fit(X_train)\n\n#Predicted clusters.\npredict=sc.fit_predict(X_train)\n\n#Graph results.\nplt.scatter(X_train[:, 0], X_train[:, 1], c=predict)\nplt.show()\n\nprint('Comparing the assigned categories to the ones in the data:')\nprint(pd.crosstab(y_train,predict))",
"Affinity Propagation",
"# Declare the model and fit it in one statement.\n# Note that you can provide arguments to the model, but we didn't.\naf = AffinityPropagation().fit(X_train)\nprint('Done')\n\n# Pull the number of clusters and cluster assignments for each data point.\ncluster_centers_indices = af.cluster_centers_indices_\nn_clusters_ = len(cluster_centers_indices)\nlabels = af.labels_\n\nprint('Estimated number of clusters: {}'.format(n_clusters_))\n\n\n# Plot the clusters\nplt.figure(1)\nplt.clf()\n\n# Cycle through each cluster and graph them with a center point for the\n# exemplar and lines from the exemplar to each data point in the cluster.\ncolors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')\nfor k, col in zip(range(n_clusters_), colors):\n class_members = labels == k\n cluster_center = X_train[cluster_centers_indices[k]]\n plt.plot(X_train[class_members, 0], X_train[class_members, 1], col + '.')\n plt.plot(cluster_center[0],\n cluster_center[1],\n 'o',\n markerfacecolor=col,\n markeredgecolor='k')\n for x in X_train[class_members]:\n plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)\n\nplt.title('Estimated number of clusters: {}'.format(n_clusters_))\nplt.show()",
"Conclusions\nWhen applying kmeans to different blobs, we see that overlapping exists when the centres that are given are close to each other. It can be seen how k-means assumes that clusters are radially symmetrical and that the edges curve outwards in all cases.\nMean shift clustering improves the classification of each datapoint assigning it to the \"true\" taking more computing time to do so.\nIn the case of blobbs 1, where the std is high compared to the proximity of the clusters it can be seen how the clusters that it produces come close to each other not having an outwards edge and with some overlap (they are not clearly separated) that can also be seen in the classification accuracy.\nThere is a significant difference between the clusters that k-means produces and the clusters that mean shift produces, being the latter better from a classification standpoint.\nIn the Spectral cluster, it can be seen how points from one of the clusters are scattered within the rest, mosr probably due tio the similarity measures that is using. In all cases, independently of where the initial blobs are (centers) of their size (std) the Spectral Clusters finds similarities between clusters, expect for the last clase in which the blobs are originally overlapping but the spectral Clustering separated the clusters.\nThe affinity propagation increases exponentially the number of clusters but the aggregation of clusters remain within the shape and size of the clusters that have been calculated using the opther methodologies. In this case time to execute the algorithm grows depending on the number of datapoints included. As no preference parameter is set, the medians of the similarity values are used as in all theses cases there is no reason to chose certain datapoints as exemplars."
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
jdhp-docs/python_notebooks
|
nb_dev_jupyter/notebook_snippets_en.ipynb
|
mit
|
[
"Notebook snippets, tips and tricks\nTODO:\n* Read https://www.dataquest.io/blog/jupyter-notebook-tips-tricks-shortcuts/\n* Read http://blog.juliusschulz.de/blog/ultimate-ipython-notebook\n* howto avoid loosing matplotlib interactive rendering when a document is converted to HTML ?\n * https://www.reddit.com/r/IPython/comments/36p360/try_matplotlib_notebook_for_interactive_plots/\n * http://stackoverflow.com/questions/36151181/exporting-interactive-jupyter-notebook-to-html\n * https://jakevdp.github.io/blog/2013/12/05/static-interactive-widgets/\n* table of contents (JS)\n* matplotlib / D3.js interaction\n* matplotlib animations: how to make it faster\n* inspiration\n * http://louistiao.me/posts/notebooks/embedding-matplotlib-animations-in-jupyter-notebooks/\n * https://github.com/ltiao/notebooks\n * https://blog.dominodatalab.com/lesser-known-ways-of-using-notebooks/\n* Howto make (personalized) Reveal.js slides from this notebook: https://forum.poppy-project.org/t/utiliser-jupyter-pour-des-presentations-etape-par-etape-use-jupyter-to-present-step-by-step/2271/2\n* See https://blog.dominodatalab.com/lesser-known-ways-of-using-notebooks/\nExtension wishlist and todo:\n- Table of content\n- Hide some blocks in the HTML export\n - See https://github.com/jupyter/notebook/issues/534\n- Customize CSS in HTML export\n- Add disqus in HTML export\n - See: https://github.com/jupyter/nbviewer/issues/80\n - Example: http://nbviewer.jupyter.org/gist/tonyfast/977184c1243287e7e55e\n- Add metadata header/footer (initial publication date, last revision date, author, email, website, license, ...)\n- Vim like editor/navigation shortcut keys (search, search+edit, ...)\n- Spell checking\n - See https://github.com/ipython/ipython/issues/3216#issuecomment-59507673 and http://www.simulkade.com/posts/2015-04-07-spell-checking-in-jupyter-notebooks.html\nInspiration:\n- https://github.com/jupyter/jupyter/wiki/A-gallery-of-interesting-Jupyter-Notebooks\nImport directives",
"%matplotlib notebook\n\n# As an alternative, one may use: %pylab notebook\n\n# For old Matplotlib and Ipython versions, use the non-interactive version:\n# %matplotlib inline or %pylab inline\n\n# To ignore warnings (http://stackoverflow.com/questions/9031783/hide-all-warnings-in-ipython)\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport ipywidgets\nfrom ipywidgets import interact",
"Useful keyboard shortcuts\n\nEnter edit mode: Enter\nEnter command mode: Escape\n\nIn command mode:\n\n\nShow keyboard shortcuts: h\n\n\nFind and replace: f\n\n\nInsert a cell above the selection: a\n\nInsert a cell below the selection: b\n\nSwitch to Markdown: m\n\n\nDelete the selected cells: dd (type twice 'd' quickly)\n\n\nUndo cell deletion: z\n\n\nExecute the selected cell: Ctrl + Enter\n\nExecute the selected cell and select the next cell: Shift + Enter\n\nExecute the selected cell and insert below: Alt + Enter\n\n\nToggle output: o\n\n\nToggle line number: l\n\n\nCopy selected cells: c\n\n\nPaste copied cells below: v\n\n\nSelect the previous cell: k\n\n\nSelect the next cell: j\n\n\nMerge selected cells, or current cell with cell below if only one cell selected: Shift + m\n\n\nIn edit mode:\n\nCode completion or indent: Tab\n\nTooltip: Shift + Tab\n\nType \"Shift + Tab\" twice to see the online documentation of the selected element\nType \"Shift + Tab\" 4 times to the online documentation in a dedicated frame\n\n\n\nIndent: ⌘] (on MacOS)\n\n\nDedent: ⌘[ (on MacOS)\n\n\nExecute the selected cell: Ctrl + Enter\n\nExecute the selected cell and select the next cell: Shift + Enter\n\nExecute the selected cell and insert below: Alt + Enter\n\n\nCut a cell at the current cursor position: Ctrl + Shift + -\n\n\nMatplotlib\nTo plot a figure within a notebook, insert the\n%matplotlib notebook (or %pylab notebook)\ndirective at the begining of the document.\nAs an alternative, one may use\n%matplotlib inline (or %pylab inline)\nfor non-interactive plots on old Matplotlib/Ipython versions.\n2D plots",
"x = np.arange(-2 * np.pi, 2 * np.pi, 0.1)\ny = np.sin(x)\nplt.plot(x, y)",
"3D plots",
"from mpl_toolkits.mplot3d import axes3d\n\n# Build datas ###############\n\nx = np.arange(-5, 5, 0.25)\ny = np.arange(-5, 5, 0.25)\n\nxx,yy = np.meshgrid(x, y)\nz = np.sin(np.sqrt(xx**2 + yy**2))\n\n# Plot data #################\n\nfig = plt.figure()\nax = axes3d.Axes3D(fig)\nax.plot_wireframe(xx, yy, z)\n\nplt.show()",
"Animations",
"from matplotlib.animation import FuncAnimation\n\n# Plots\nfig, ax = plt.subplots()\n\ndef update(frame):\n x = np.arange(frame/10., frame/10. + 2. * math.pi, 0.1)\n ax.clear()\n ax.plot(x, np.cos(x))\n\n # Optional: save plots\n filename = \"img_{:03}.png\".format(frame)\n plt.savefig(filename)\n\n# Note: \"interval\" is in ms\nanim = FuncAnimation(fig, update, interval=100)\n\nplt.show()",
"Interactive plots with Plotly\nTODO: https://plot.ly/ipython-notebooks/\nInteractive plots with Bokeh\nTODO: http://bokeh.pydata.org/en/latest/docs/user_guide/notebook.html\nEmbedded HTML and Javascript",
"%%html\n<div id=\"toc\"></div>\n\n%%javascript\nvar toc = document.getElementById(\"toc\");\ntoc.innerHTML = \"<b>Table of contents:</b>\";\ntoc.innerHTML += \"<ol>\"\n\nvar h_list = $(\"h2, h3\"); //$(\"h2\"); // document.getElementsByTagName(\"h2\");\nfor(var i = 0 ; i < h_list.length ; i++) {\n var h = h_list[i];\n var h_str = h.textContent.slice(0, -1); // \"slice(0, -1)\" remove the last character\n if(h_str.length > 0) {\n if(h.tagName == \"H2\") { // https://stackoverflow.com/questions/10539419/javascript-get-elements-tag\n toc.innerHTML += \"<li><a href=\\\"#\" + h_str.replace(/\\s+/g, '-') + \"\\\">\" + h_str + \"</a></li>\";\n } else if(h.tagName == \"H3\") { // https://stackoverflow.com/questions/10539419/javascript-get-elements-tag\n toc.innerHTML += \"<li> <a href=\\\"#\" + h_str.replace(/\\s+/g, '-') + \"\\\">\" + h_str + \"</a></li>\";\n }\n }\n}\n\ntoc.innerHTML += \"</ol>\"",
"IPython built-in magic commands\nSee http://ipython.readthedocs.io/en/stable/interactive/magics.html\nExecute an external python script",
"%run ./notebook_snippets_run_test.py\n\n%run ./notebook_snippets_run_mpl_test.py",
"Load an external python script\nLoad the full script",
"# %load ./notebook_snippets_run_mpl_test.py\n#!/usr/bin/env python3\n\n# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"\nThis module has been written to illustrate the ``%run`` magic command in\n``notebook_snippets.ipynb``.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef main():\n x = np.arange(-10, 10, 0.1)\n y = np.cos(x)\n\n plt.plot(x, y)\n plt.grid(True)\n\n plt.show()\n\nif __name__ == '__main__':\n main()\n",
"Load a specific symbol (funtion, class, ...)",
"# %load -s main ./notebook_snippets_run_mpl_test.py\ndef main():\n x = np.arange(-10, 10, 0.1)\n y = np.cos(x)\n\n plt.plot(x, y)\n plt.grid(True)\n\n plt.show()\n",
"Load specific lines",
"# %load -r 22-41 ./notebook_snippets_run_mpl_test.py\n\n\"\"\"\nThis module has been written to illustrate the ``%run`` magic command in\n``notebook_snippets.ipynb``.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef main():\n x = np.arange(-10, 10, 0.1)\n y = np.cos(x)\n\n plt.plot(x, y)\n plt.grid(True)\n\n plt.show()\n\nif __name__ == '__main__':\n main()",
"Time measurement\n%time",
"%%time\nplt.hist(np.random.normal(loc=0.0, scale=1.0, size=100000), bins=50)",
"%timeit",
"%%timeit\nplt.hist(np.random.normal(loc=0.0, scale=1.0, size=100000), bins=50)",
"ipywidget\nOn jupyter lab, you should install widgets extension first (see https://ipywidgets.readthedocs.io/en/latest/user_install.html#installing-the-jupyterlab-extension):\njupyter labextension install @jupyter-widgets/jupyterlab-manager",
"#help(ipywidgets)\n#dir(ipywidgets)\n\nfrom ipywidgets import IntSlider\nfrom IPython.display import display\n\nslider = IntSlider(min=1, max=10)\ndisplay(slider)",
"ipywidgets.interact\nDocumentation\nSee http://ipywidgets.readthedocs.io/en/latest/examples/Using%20Interact.html",
"#help(ipywidgets.interact)",
"Using interact as a decorator with named parameters\nTo me, this is the best option for single usage functions...\nText",
"@interact(text=\"IPython Widgets\")\ndef greeting(text):\n print(\"Hello {}\".format(text))",
"Integer (IntSlider)",
"@interact(num=5)\ndef square(num):\n print(\"{} squared is {}\".format(num, num*num))\n\n@interact(num=(0, 100))\ndef square(num):\n print(\"{} squared is {}\".format(num, num*num))\n\n@interact(num=(0, 100, 10))\ndef square(num):\n print(\"{} squared is {}\".format(num, num*num))",
"Float (FloatSlider)",
"@interact(num=5.)\ndef square(num):\n print(\"{} squared is {}\".format(num, num*num))\n\n@interact(num=(0., 10.))\ndef square(num):\n print(\"{} squared is {}\".format(num, num*num))\n\n@interact(num=(0., 10., 0.5))\ndef square(num):\n print(\"{} squared is {}\".format(num, num*num))",
"Boolean (Checkbox)",
"@interact(upper=False)\ndef greeting(upper):\n text = \"hello\"\n if upper:\n print(text.upper())\n else:\n print(text.lower())",
"List (Dropdown)",
"@interact(name=[\"John\", \"Bob\", \"Alice\"])\ndef greeting(name):\n print(\"Hello {}\".format(name))",
"Dictionnary (Dropdown)",
"@interact(word={\"One\": \"Un\", \"Two\": \"Deux\", \"Three\": \"Trois\"})\ndef translate(word):\n print(word)\n\nx = np.arange(-2 * np.pi, 2 * np.pi, 0.1)\n\n@interact(function={\"Sin\": np.sin, \"Cos\": np.cos})\ndef plot(function):\n y = function(x)\n plt.plot(x, y)",
"Using interact as a decorator\nText",
"@interact\ndef greeting(text=\"World\"):\n print(\"Hello {}\".format(text))",
"Integer (IntSlider)",
"@interact\ndef square(num=2):\n print(\"{} squared is {}\".format(num, num*num))\n\n@interact\ndef square(num=(0, 100)):\n print(\"{} squared is {}\".format(num, num*num))\n\n@interact\ndef square(num=(0, 100, 10)):\n print(\"{} squared is {}\".format(num, num*num))",
"Float (FloatSlider)",
"@interact\ndef square(num=5.):\n print(\"{} squared is {}\".format(num, num*num))\n\n@interact\ndef square(num=(0., 10.)):\n print(\"{} squared is {}\".format(num, num*num))\n\n@interact\ndef square(num=(0., 10., 0.5)):\n print(\"{} squared is {}\".format(num, num*num))",
"Boolean (Checkbox)",
"@interact\ndef greeting(upper=False):\n text = \"hello\"\n if upper:\n print(text.upper())\n else:\n print(text.lower())",
"List (Dropdown)",
"@interact\ndef greeting(name=[\"John\", \"Bob\", \"Alice\"]):\n print(\"Hello {}\".format(name))",
"Dictionnary (Dropdown)",
"@interact\ndef translate(word={\"One\": \"Un\", \"Two\": \"Deux\", \"Three\": \"Trois\"}):\n print(word)\n\nx = np.arange(-2 * np.pi, 2 * np.pi, 0.1)\n\n@interact\ndef plot(function={\"Sin\": np.sin, \"Cos\": np.cos}):\n y = function(x)\n plt.plot(x, y)",
"Using interact as a function\nTo me, this is the best option for multiple usage functions...\nText",
"def greeting(text):\n print(\"Hello {}\".format(text))\n \ninteract(greeting, text=\"IPython Widgets\")",
"Integer (IntSlider)",
"def square(num):\n print(\"{} squared is {}\".format(num, num*num))\n\ninteract(square, num=5)\n\ndef square(num):\n print(\"{} squared is {}\".format(num, num*num))\n\ninteract(square, num=(0, 100))\n\ndef square(num):\n print(\"{} squared is {}\".format(num, num*num))\n\ninteract(square, num=(0, 100, 10))",
"Float (FloatSlider)",
"def square(num):\n print(\"{} squared is {}\".format(num, num*num))\n\ninteract(square, num=5.)\n\ndef square(num):\n print(\"{} squared is {}\".format(num, num*num))\n\ninteract(square, num=(0., 10.))\n\ndef square(num):\n print(\"{} squared is {}\".format(num, num*num))\n\ninteract(square, num=(0., 10., 0.5))",
"Boolean (Checkbox)",
"def greeting(upper):\n text = \"hello\"\n if upper:\n print(text.upper())\n else:\n print(text.lower())\n\ninteract(greeting, upper=False)",
"List (Dropdown)",
"def greeting(name):\n print(\"Hello {}\".format(name))\n\ninteract(greeting, name=[\"John\", \"Bob\", \"Alice\"])",
"Dictionnary (Dropdown)",
"def translate(word):\n print(word)\n\ninteract(translate, word={\"One\": \"Un\", \"Two\": \"Deux\", \"Three\": \"Trois\"})\n\nx = np.arange(-2 * np.pi, 2 * np.pi, 0.1)\n\ndef plot(function):\n y = function(x)\n plt.plot(x, y)\n\ninteract(plot, function={\"Sin\": np.sin, \"Cos\": np.cos})",
"Example of using multiple widgets on one function",
"@interact(upper=False, name=[\"john\", \"bob\", \"alice\"])\ndef greeting(upper, name):\n text = \"hello {}\".format(name)\n if upper:\n print(text.upper())\n else:\n print(text.lower())",
"Display images (PNG, JPEG, GIF, ...)\nWithin a code cell (using IPython.display)",
"from IPython.display import Image\nImage(\"fourier.gif\")",
"Within a Markdown cell\n\nSound player widget\nSee: https://ipython.org/ipython-doc/dev/api/generated/IPython.display.html#IPython.display.Audio",
"from IPython.display import Audio",
"Generate a sound",
"framerate = 44100\nt = np.linspace(0, 5, framerate*5)\ndata = np.sin(2*np.pi*220*t) + np.sin(2*np.pi*224*t)\n\nAudio(data, rate=framerate)",
"Generate a multi-channel (stereo or more) sound",
"data_left = np.sin(2 * np.pi * 220 * t)\ndata_right = np.sin(2 * np.pi * 224 * t)\n\nAudio([data_left, data_right], rate=framerate)",
"From URL",
"Audio(\"http://www.nch.com.au/acm/8k16bitpcm.wav\")\n\nAudio(url=\"http://www.w3schools.com/html/horse.ogg\")",
"From file",
"#Audio('/path/to/sound.wav')\n\n#Audio(filename='/path/to/sound.ogg')",
"From bytes",
"#Audio(b'RAW_WAV_DATA..)\n\n#Audio(data=b'RAW_WAV_DATA..)",
"Youtube widget\nClass for embedding a YouTube Video in an IPython session, based on its video id.\ne.g. to embed the video from https://www.youtube.com/watch?v=0HlRtU8clt4 , you would do:\nSee https://ipython.org/ipython-doc/dev/api/generated/IPython.display.html#IPython.display.YouTubeVideo",
"from IPython.display import YouTubeVideo\n\nvid = YouTubeVideo(\"0HlRtU8clt4\")\ndisplay(vid)",
"Convert a Reveal.js presentation written with Markdown to a Jupyter notebook\nThis is a quick and dirty hack to have one cell per slide in the notebook; it assumes the string \"---\" is used to separate slides within the markdown file.\n\ncopy the markdown document within the Jupyter notebook (in a Markdown cell), save it and close it;\nto split this cell at each \"---\", open the ipynb notebook with vim and enter the following command and save the file:\n\n:%s/,\\n \"---\\\\n\",/\\r ]\\r },\\r {\\r \"cell_type\": \"markdown\",\\r \"metadata\": {},\\r \"source\": [/gc"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
jaalonso/AFV
|
Programacion_imperativa_en_Python.ipynb
|
gpl-3.0
|
[
"<a href=\"https://colab.research.google.com/github/jaalonso/AFV/blob/master/Programacion_imperativa_en_Python.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\nProgramación imperativa en Python con Colaboratory\nPython como calculadora",
"2 * 5 + 6 - (100 + 3)\n\n8/2, 8/3\n\n7 // 2, 7 % 2\n\n2 ** 10",
"Variables y asignaciones",
"a = 8\nb = 2 * a\nb\n\na + b\n\na = a + 1\na\n\na, b = 2, 5\na + 2 * b",
"Definición de funciones\nEjercicio 1. Definir la función suma tal que suma(x, y) es la suma de x e y. . Por ejemplo,\n~~~\n\n\n\nsuma(2,3)\n5\n~~~",
"def suma(x, y):\n return x+y\n\nsuma(2, 3)",
"Escritura y lectura\nEjercicio 2. Definir el procedimiento suma que lea dos números y escriba su suma. Por ejemplo,\n~~~\n\n\n\nsuma()\nEscribe el primer número: 2\nEscribe el segundo número: 3\nLa suma es: 5\n~~~",
"def suma():\n a = eval(input(\"Escribe el primer número: \"))\n b = eval(input(\"Escribe el segundo número: \"))\n print(\"La suma es:\",a+b)\n\nsuma()",
"La estructura condicional\nCondicionales simples\nEjercicio 3. Definir, usando condicionales, la función maximo tal que maximo(x,y) es el máximo de x e y. Por ejemplo,\n~~~\n\n\n\nmaximo(2, 5)\n5\nmaximo(2, 1)\n2\n~~~",
"def maximo(x, y) : \n if x > y:\n return x\n else:\n return y\n\nmaximo(2, 5)\n\nmaximo (2, 1)",
"Condicionales múltiples\nEjercicio 4. Definir la función signo tal que signo(x) es el signo de x. Por ejemplo,\n~~~\n\n\n\nsigno(5)\n1\nsigno(-7)\n- 1\nsigno(0)\n0\n~~~",
"def signo(x): \n if x > 0: \n return 1\n elif x < 0:\n return -1\n else:\n return 0\n\nsigno(5)\n\nsigno(-7)\n\nsigno(0)",
"Estructuras iterativas\nBucles mientras\nEjercicio 5. Definir, con un bucle while, la función sumaImpares tal que sumaImpares(n) es la suma de los n primeros números impares. Por ejemplo,\n~~~\n\n\n\nsumaImpares(3)\n9\nsumaImpares(4)\n16\n~~~",
"def sumaImpares(n):\n s, k = 0, 0 \n while k < n:\n s = s + 2*k + 1\n k = k + 1\n return s\n\nsumaImpares(3)\n\nsumaImpares(4)",
"Ejercicio 6. Definir la función mayorExponente tal que mayorExponente(a,n) es el mayor k tal que a^k divide a n. Por ejemplo,\n~~~\n\n\n\nmayorExponente(2,40);\n3\n~~~",
"def mayorExponente(a, n):\n k = 0\n while (n % a == 0):\n n = n/a\n k = k + 1\n return k\n\nmayorExponente(2, 40)",
"Bucle para\nEjercicio 7. Definir, por iteración con for, la función fact tal que fact(n) es el factorial de n. Por ejemplo,\n~~~\n\n\n\nfact 4\n24\n~~~",
"def fact(n): \n f = 1\n for k in range(1,n+1):\n f = f * k\n return f\n\nfact(4)",
"Bucle para sobre listas\nEjercicio 8. Definir, por iteración, la función suma tal que suma(xs) es a suma de los números de la lista xs. Por ejemplo,\n~~~\n\n\n\nsuma([3,2,5])\n10\n~~~",
"def suma(xs): \n r = 0\n for x in xs: \n r = x + r\n return r\n\nsuma([3, 2, 5])",
"Recursión\nEjercicio 9. Definir, por recursión, la función fact tal que factR(n) es el factorial de n. Por ejemplo,\n~~~\n\n\n\nfact 4\n24\n~~~",
"def fact(n):\n if n == 0: \n return 1\n else: \n return n * fact(n-1)\n\nfact(4)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
mhdella/pattern_classification
|
machine_learning/decision_trees/decision-tree-cheatsheet.ipynb
|
gpl-3.0
|
[
"%load_ext watermark\n%watermark -a 'Sebastian Raschka' -d -v",
"Cheatsheet for Decision Tree Classification\nAlgorithm\n\nStart at the root node as parent node\nSplit the parent node at the feature a to minimize the sum of the child node impurities (maximize information gain)\nAssign training samples to new child nodes\nStop if leave nodes are pure or early stopping criteria is satisfied, else repeat steps 1 and 2 for each new child node\n\nStopping Rules\n\na maximal node depth is reached\nsplitting a note does not lead to an information gain\n\nCriterion\nSplitting criterion: Information Gain (IG), sum of node impurities\nObjective function: Maximize IG at each split, eqiv. minimize the the impurity criterion\nInformation Gain (IG)\nExamples below are given for binary splits.\n$$IG(D_{p}, a) = I(D_{p}) - \\frac{N_{left}}{N_p} I(D_{left}) - \\frac{N_{right}}{N_p} I(D_{right})$$\n\n$IG$: Information Gain\n$a$: feature to perform the split\n$N_p$: number of samples in the parent node\n$N_{left}$: number of samples in the left child node\n$N_{right}$: number of samples in the right child node\n$I$: impurity\n$D_{p}$: training subset of the parent node\n$D_{left}$: training subset of the left child node\n$D_{right}$: training subset of the right child node\n\nImpurity (I) Indices\nEntropy\nThe entropy is defined as\n$$I_H(t) = - \\sum_{i =1}^{C} p(i \\mid t) \\;log_2 \\,p(i \\mid t)$$\nfor all non-empty classes ($p(i \\mid t) \\neq 0$), where $p(i \\mid t)$ is the proportion (or frequency or probability) of the samples that belong to class $i$ for a particular node $t$; $C$ is the number of unique class labels.\nThe entropy is therefore 0 if all samples at a node belong to the same class, and the entropy is maximal if we have an uniform class distribution. For example, in a binary class setting, the entropy is 0 if $p(i =1 \\mid t) =1$ or $p(i =0 \\mid t) =1$. And if the classes are distributed uniformly with $p(i =1 \\mid t) = 0.5$ and $p(i =0 \\mid t) =0.5$ the entropy is 1 (maximal), which we can visualize by plotting the entropy for binary class setting below.",
"import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n\ndef entropy(p):\n return - p*np.log2(p) - (1 - p)*np.log2((1 - p))\nx = np.arange(0.0, 1.0, 0.01)\nent = [entropy(p) if p != 0 else None for p in x]\nplt.plot(x, ent)\nplt.ylim([0,1.1])\nplt.xlabel('p(i=1)')\nplt.axhline(y=1.0, linewidth=1, color='k', linestyle='--')\nplt.ylabel('Entropy')\nplt.show()",
"Gini Impurity\n$$I_G(t) = \\sum_{i =1}^{C}p(i \\mid t) \\big(1-p(i \\mid t)\\big)$$",
"def gini(p):\n return (p)*(1 - (p)) + (1-p)*(1 - (1-p))\n\nx = np.arange(0.0, 1.0, 0.01)\nplt.plot(x, gini(x))\nplt.ylim([0,1.1])\nplt.xlabel('p(i=1)')\nplt.axhline(y=0.5, linewidth=1, color='k', linestyle='--')\nplt.ylabel('Gini Impurity')\nplt.show()",
"Misclassification Error\n$$I_M(t) = 1 - max{{p_i}}$$",
"def error(p):\n return 1 - np.max([p, 1-p])\n\nx = np.arange(0.0, 1.0, 0.01)\nerr = [error(i) for i in x]\nplt.plot(x, err)\nplt.ylim([0,1.1])\nplt.xlabel('p(i=1)')\nplt.axhline(y=0.5, linewidth=1, color='k', linestyle='--')\nplt.ylabel('Misclassification Error')\nplt.show()",
"Comparison",
"fig = plt.figure()\nax = plt.subplot(111)\n\nfor i, lab in zip([ent, gini(x), err], \n ['Entropy', 'Gini Impurity', 'Misclassification Error']):\n line, = ax.plot(x, i, label=lab)\n\nax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15),\n ncol=3, fancybox=True, shadow=False)\n\nax.axhline(y=0.5, linewidth=1, color='k', linestyle='--')\nax.axhline(y=1.0, linewidth=1, color='k', linestyle='--')\nplt.ylim([0,1.1])\nplt.xlabel('p(i=1)')\nplt.ylabel('Impurity Index')\nplt.tight_layout()\nplt.show()"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
mlamoureux/PIMS_YRC
|
PDE_Solve_widget.ipynb
|
mit
|
[
"WaveSolver - slider\nM. Lamoureux May 31, 2016. Pacific Insitute for the Mathematical Sciences\nUpdated June 2017, to remove all reference to Bokeh, cuz it doesn't work now. (deprecated = broken)\n(Bokeh = Brokehn)\nThis code does some numerical simulation of wave propagation in 1D, suitable for demoing in a class. \nThis is a rewrite of some of my old Julia code, translated to run in Python. My belief is that Python is a more mature system and I will likely be more productive by sticking to Python instead of Julia. Let's see if this is true.\nThe code is based on formulas from the textbook Linear Partial Differential Equations for Scientists and Engineers, by Myint-U and Debnath. It's a good book -- you should buy it. Some extracts are also available online. \nIn this Jupyter notebook, animation has been added, using GUI sliders to advance the waveform in time.\nThis was also written as a test of how far we can push Bokeh and the Jupyter Hub in making some calculations and graphics. There is some dangerous code at the bottom, that can bring down our Jupyter Hub, probably because it eats up too much memory. Interesting for debugging purposes, but you have been warned.\nIntroduction to the 1D wave equation.\nThe basic idea is to represent the vibrations of a (horizonal) string under tension by a function $u(x,t)$ where $x$ is the horizontal displacement along the string, $t$ is the time parameter, and $y=u(x,t)$ indicates the (vertical) displacement of the string at point $x$ along the string, at time $t$ \nA typical wave would look something like\n$$u(x,t) = \\sin(x-ct)$$\nwhich represents a sinusoidal wave moving to the right at velocity $c$.\nFrom Newon's law of motion (mass times acceleration equals force), we can derive the wave equation for the string in the form \n$$ \\rho \\cdot u_{tt} = k \\cdot u_{xx}$$\nwhere $\\rho$ is a mass density, $u_{tt}$ is an acceleration (the second derivative w.r.t. time), $k$ is the modulus of elasticity and $u_{xx}$ is a measure of curvature (the second derivative w.r.t. $x$), causing the bending force. \nIncluding boundary and initial conditions, while setting the parameter $c^2 = k/\\rho$, we obtain the usual 1D wave equation on an interval as this:\n$$u_{tt} = c^2 u_{xx} \\mbox{ on interval } [x_0,x_1]$$\nsubject to boundary conditions\n$$u(x_0,t) = u(x_1, t) = 0 \\mbox{ for all $t$. }$$\nand initial conditions\n$$u(x,0) = u_0(x), \\quad u_t(x,t) = u_1(x) \\mbox{ on interval } [x_0,x_1].$$\nLet's write some PDE solvers for this 1D wave equation.\nStart with some simple solvers, then get more complex. \nThe function to solve the PDE needs to know $c^2, u_0, u_1$, the sampling intervals $dx$ and $dt$, and the number of time steps to execute. Everything else can be inferred from those inputs. The output should be a 2D array, indexed in x and t steps. \nMaybe we could actually output the solution $u(x,t)$ as well as the vector of indices for $x$ and $t$.\nSoftware tools\nWe import some tools for numerical work (NumPy) and plotting (Matplotlib), using Matplotlib because Boken seems to be broken. At least, it broke my code with deprecations.",
"%matplotlib inline\nimport numpy as np\nfrom matplotlib.pyplot import *\n",
"We also include some code from SciPy for numerical calculations",
"from scipy.linalg import solve_toeplitz # a matrix equation solver\nfrom scipy.integrate import cumtrapz # a numerical integrator, using trapezoid rule\n",
"And we include some code to create the graphical user interface -- namely, sliders. You can read about them here:\nhttp://bokeh.pydata.org/en/0.10.0/_images/notebook_interactors.png",
"from ipywidgets import interact",
"Explicit method of solution in finite differences\nThe finite difference method is probably the most common numerical method for solving PDEs. The derivatives are approximated by Newton difference ratios, and you step through in time to progress the solution from $t=0$ to some ending time.\nThe following defines a function that solves the wave equation, using an explicit finite difference method. We follow the notes in the referenced book by Myint-U and Debnath. \nThe differencing in spatial variable $x$ is done by a convolution, as this will be fast. \nWe print out the value of the CFL constant as a sanity check. The method is stable and convergent provided the CFL value is less than one. (This means $dt$ needs to be small enough.) See the book for details. \nThe input parameters are velocity squared $c2$, spatial step size $dx$, temporal step size $dt$, number of time steps $t$_$len$, initial position $u0$ and initial velocity $u1$. $u0$ and $u1$ are arrays of some length $N$, which the code will use to deduce everything it needs to know.\nI tend to think of these as dimensionless variables, but you can use real physical values if you like, so long as you are consistent. For instance $dx$ in meters, $dt$ in seconds, and $c2$ in (meters/second) squared.",
"# Based on Section 14.4 in Myint-U and Debnath's book\ndef w_solve1(c2,dx,dt,t_len,u0,u1):\n x_len = np.size(u0) # the length of u0 implicitly defines the num of points in x direction\n u = np.zeros((x_len,t_len),order='F') # output array initialized to zero \n e2 = c2*dt*dt/(dx*dx) # Courant parameter squared (test for convergence!)\n print(\"CFL value is \",np.sqrt(e2))\n kern = np.array([e2, 2*(1-e2), e2]) # the convolution kernel we need for laplacian solver\n u[:,0] = u0 # t=0 initial condition\n u[:,1] = np.convolve(u0,kern/2)[1:x_len+1] + dt*u1 # t=0 derivative condition, eqn 14.4.6\n for j in range(2,t_len):\n u[:,j] = np.convolve(u[:,j-1],kern)[1:x_len+1] - u[:,j-2] # eqn 14.4.3\n # let's produce the x and t vectors, in case we need them\n x = np.linspace(0,dx*x_len,x_len,endpoint=False)\n t = np.linspace(0,dt*t_len,t_len,endpoint=False)\n return u,x,t",
"Let's try a real wave equation solution. \nWe start with a simple triangle waveform.",
"x_len = 1000\nt_len = 1000\ndx = 1./x_len\ndt = 1./t_len\nx = np.linspace(0,1,x_len)\nt = np.linspace(0,1,t_len)\ntriangle = np.maximum(0.,.1-np.absolute(x-.4))",
"Now we call up our wave equation solver, using the parameters above",
"# Here we solve the wave equation, with initial position $u(x,0)$ set to the triangle waveform\n(u,x,t)=w_solve1(.5,dx,dt,t_len,triangle,0*triangle)",
"We can plot the inital waveform, just to see what it looks like.",
"plot(x,u[:,0])\n\ndef update(k=0):\n plot(x,u[:,k])\n show()",
"And the next cell sets up a slider which controls the above graphs (it moves time along)",
"# This runs an animation, controlled by a slider which advances time\ninteract(update,k=(0,t_len-1))",
"Derivative initial condition test",
"# We try again, but this time with the $u_t$ initial condition equal to the triangle impulse\n(u,x,t)=w_solve1(.5,dx,dt,3*t_len,0*triangle,1*triangle)",
"We can use the same update function, since nothing has changed.",
"interact(update,k=(0,3*t_len-1))",
"Implicit method\nHere we try an implicit method for solving the wave equation. Again from Myint-U and Debnath's book, in Section 14.5, part (B) on Hyperbolic equations. \nWe need to use scipy libraries as we have to solve a system of linear equation. In fact the system is tridiagonal and Toepliz, so this should be fast. I see how to use Toepliz in scipy, but I don't know how to tell it that the system is only tridiagonal. It should be possible to speed this up.",
"# Based on Section 14.5 (B) in Myint-U and Debnath's book\ndef w_solve2(c2,dx,dt,t_len,u0,u1):\n x_len = np.size(u0) # the length of u0 implicitly defines the num of points in x direction\n u = np.zeros((x_len,t_len),order='F') # output array initialized to zero \n e2 = c2*dt*dt/(dx*dx) # Courant parameter squared (test for convergence!)\n print(\"CFL value is \",np.sqrt(e2))\n kern = np.array([e2, 2*(1-e2), e2]) # the convolution kernel we need for laplacian solver\n u[:,0] = u0 # t=0 initial condition\n u[:,1] = np.convolve(u0,kern/2)[1:x_len+1] + dt*u1 # t=0 derivative condition, eqn 14.4.6\n # Note the above is a cheat, we are using the explicit method to find u[:,1], Should do this implicitly\n kern2 = np.array([e2, -2*(1+e2), e2]) # the convolution kernel we need for implicit solver. It is different.\n toepk = np.zeros(x_len) # this will hold the entries for the tridiagonal Toeplitz matrix\n toepk[0] = 2*(1+e2);\n toepk[1] = -e2\n for j in range(2,t_len):\n rhs = np.convolve(u[:,j-2],kern2)[1:x_len+1] + 4*u[:,j-1] # eqn 14.5.17\n u[:,j] = solve_toeplitz(toepk, rhs) # here is a linear system solver (hence an implicit method)\n # let's produce the x and t vectors, in case we need them\n x = np.linspace(0,dx*x_len,x_len,endpoint=False)\n t = np.linspace(0,dt*t_len,t_len,endpoint=False)\n return u,x,t\n\n(u,x,t)=w_solve2(.5,dx,dt,t_len,1*triangle,0*triangle)\n\ninteract(update,k=(0,3*t_len-1))",
"Derivative initial condition",
"(u,x,t)=w_solve2(.5,dx,dt,3*t_len,0*triangle,1*triangle)\n\ninteract(update,k=(0,3*t_len-1))",
"D'Alembert's solution\nSince the velocity $c$ is constant in these examples, we can get the exact solution via D'Alembert. The general solution will be of the form \n$$u(x,t) = \\phi(x+ct) + \\psi(x-ct). $$\nInitial conditions tell use that\n$$u(x,0) = \\phi(x) + \\psi(x) = f(x), $$ and\n$$u_t(x,0) = c\\phi'(x) - c\\psi'(x) = g(x). $$\nWith $G(x)$ the antiderivative of $g(x)$ with appropriate zero at zero, we get a 2x2 system\n$$\\phi(x) + \\psi(x) = f(x), \\ c(\\phi(x) - \\psi(x)) = G(x),$$\nwhich we solve as\n$$\\phi(x) = \\frac{1}{2}\\left( f(x) + \\frac{1}{c} G(x) \\right), \\\n \\psi(x) = \\frac{1}{2}\\left( f(x) - \\frac{1}{c} G(x) \\right).$$\nNow $f(x)$ is given as the argument $u0$ in the code. $G(x)$ can be computed using scipy. The arguments $x+ct$ and $x-ct$ must be converted to integer indices. They have to wrap around. And with the zero boundary condition, we need to wrap around with a negative reflection. \nThere is the messy question as to whether we require $u(0,t)$ to actually equal zero, or do we require it to be zero one index \"to the left\" of x=0. Let's not think too much about that just yet.",
"# Based on D'Alembert's solution, as described above\ndef w_solve3(c2,dx,dt,t_len,u0,u1):\n x_len = np.size(u0) # the length of u0 implicitly defines the num of points in x direction\n u = np.zeros((x_len,t_len),order='F') # output array initialized to zero \n c = np.sqrt(c2) # the actual velocity parameter is needed\n f = u0 # use notation from above notes\n G = cumtrapz(u1,dx=dx,initial=0) # the antiderivative, using cumulative trapezoidal rule\n f2 = np.append(f,-f[::-1]) # odd symmetry\n G2 = np.append(G,G[::-1]) # even symmetry\n phi2 = (f2 + G2/c)/2\n psi2 = (f2 - G2/c)/2\n x = np.linspace(0,dx*x_len,x_len,endpoint=False)\n t = np.linspace(0,dt*t_len,t_len,endpoint=False)\n # in the loop, we convert x+ct to index's into vectors phi2, psi2, modulo the vector length\n for j in range(t_len):\n ii1 = np.mod( np.round((x+c*t[j])/dx), 2*x_len)\n ii2 = np.mod( np.round((x-c*t[j])/dx), 2*x_len)\n u[:,j] = phi2[ii1.astype(int)] + psi2[ii2.astype(int)]\n return u,x,t\n\n(u,x,t)=w_solve3(.5,dx,dt,t_len,1*triangle,0*triangle)\n\ninteract(update,k=(0,t_len-1))\n",
"Derivative initial condition",
"(u,x,t)=w_solve3(.5,dx,dt,3*t_len,0*triangle,1*triangle)\n\ninteract(update,k=(0,3*t_len-1))",
"Comparing solutions\nIn principle, we want these different solution methods to be directly comparable.\nSo let's try this out, by computing the difference of two solution. \nHere we compare the explicit f.d. method with d'Alambert's method.",
"(u_exp,x,t)=w_solve1(.5,dx,dt,t_len,1*triangle,0*triangle)\n(u_dal,x,t)=w_solve3(.5,dx,dt,t_len,1*triangle,0*triangle)\n\ndef update2(k=0):\n plot(x,u_dal[:,k]-u_exp[:,k])\n show() \n\ninteract(update2,k=(0,t_len-1))",
"A moving wavefront\nLet's try an actual wave. We want something like\n$$u(x,t) = \\exp(-(x -x_a-ct)^2/w^2), $$\nwhere $x_a$ is the center of the Gaussian at $t=0$, $w$ is the width of the Gaussian, $c$ is the velocity of the wave.\nThis gives\n$$u_0(x) = \\exp(-(x -x_a)^2/w^2) \\\n u_1(x) = \\frac{2c(x-x_a)}{w^2}\\exp(-(x -x_a)^2/w^2) = \\frac{2c(x-x_a)}{w^2}u_0(x).$$",
"c = .707 # velocity\nx_len = 1000\nt_len = 1000\ndx = 1./x_len\ndt = 1./t_len\nx = np.linspace(0,1,x_len)\nt = np.linspace(0,1,t_len)\nu0 = np.exp(-(x-.5)*(x-.5)/.01)\nu1 = 2*c*u0*(x-.5)/.01\n\n(u,x,t)=w_solve3(c*c,dx,dt,t_len,u0,u1) # notice we input the velocity squared!\n\ninteract(update,k=(0,t_len-1))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
LSSTC-DSFP/LSSTC-DSFP-Sessions
|
Sessions/Session09/Day1/gps/02-Inference.ipynb
|
mit
|
[
"Inference with GPs\nThe dataset needed for this worksheet can be downloaded. Once you have downloaded s9_gp_dat.tar.gz, and moved it to this folder, execute the following cell:",
"!tar -zxvf s9_gp_dat.tar.gz\n!mv *.txt data/",
"Here are the functions we wrote in the previous tutorial to compute and draw from a GP:",
"import numpy as np\nfrom scipy.linalg import cho_factor\n\n\ndef ExpSquaredKernel(t1, t2=None, A=1.0, l=1.0):\n \"\"\"\n Return the ``N x M`` exponential squared\n covariance matrix between time vectors `t1`\n and `t2`. The kernel has amplitude `A` and\n lengthscale `l`.\n \n \"\"\"\n if t2 is None:\n t2 = t1\n T2, T1 = np.meshgrid(t2, t1)\n return A ** 2 * np.exp(-0.5 * (T1 - T2) ** 2 / l ** 2)\n\n\ndef draw_from_gaussian(mu, S, ndraws=1, eps=1e-12):\n \"\"\"\n Generate samples from a multivariate gaussian\n specified by covariance ``S`` and mean ``mu``.\n \n (We derived these equations in Day 1, Notebook 01, Exercise 7.)\n \"\"\"\n npts = S.shape[0]\n L, _ = cho_factor(S + eps * np.eye(npts), lower=True)\n L = np.tril(L)\n u = np.random.randn(npts, ndraws)\n x = np.dot(L, u) + mu[:, None]\n return x.T\n\n\ndef compute_gp(t_train, y_train, t_test, sigma=0, A=1.0, l=1.0):\n \"\"\"\n Compute the mean vector and covariance matrix of a GP\n at times `t_test` given training points `y_train(t_train)`.\n The training points have uncertainty `sigma` and the\n kernel is assumed to be an Exponential Squared Kernel\n with amplitude `A` and lengthscale `l`.\n \n \"\"\"\n # Compute the required matrices\n kernel = ExpSquaredKernel\n Stt = kernel(t_train, A=1.0, l=1.0)\n Stt += sigma ** 2 * np.eye(Stt.shape[0])\n Spp = kernel(t_test, A=1.0, l=1.0)\n Spt = kernel(t_test, t_train, A=1.0, l=1.0)\n\n # Compute the mean and covariance of the GP\n mu = np.dot(Spt, np.linalg.solve(Stt, y_train))\n S = Spp - np.dot(Spt, np.linalg.solve(Stt, Spt.T))\n \n return mu, S",
"The Marginal Likelihood\nIn the previous notebook, we learned how to construct and sample from a simple GP. This is useful for making predictions, i.e., interpolating or extrapolating based on the data you measured. But the true power of GPs comes from their application to regression and inference: given a dataset $D$ and a model $M(\\theta)$, what are the values of the model parameters $\\theta$ that are consistent with $D$? The parameters $\\theta$ can be the hyperparameters of the GP (the amplitude and time scale), the parameters of some parametric model, or all of the above.\nA very common use of GPs is to model things you don't have an explicit physical model for, so quite often they are used to model \"nuisances\" in the dataset. But just because you don't care about these nuisances doesn't mean they don't affect your inference: in fact, unmodelled correlated noise can often lead to strong biases in the parameter values you infer. In this notebook, we'll learn how to compute likelihoods of Gaussian Processes so that we can marginalize over the nuisance parameters (given suitable priors) and obtain unbiased estimates for the physical parameters we care about.\nGiven a set of measurements $y$ distributed according to\n$$\n\\begin{align}\n y \\sim \\mathcal{N}(\\mathbf{\\mu}(\\theta), \\mathbf{\\Sigma}(\\alpha))\n\\end{align}\n$$\nwhere $\\theta$ are the parameters of the mean model $\\mu$ and $\\alpha$ are the hyperparameters of the covariance model $\\mathbf{\\Sigma}$, the marginal likelihood of $y$ is\n$$\n\\begin{align}\n \\ln P(y | \\theta, \\alpha) = -\\frac{1}{2}(y-\\mu)^\\top \\mathbf{\\Sigma}^{-1} (y-\\mu) - \\frac{1}{2}\\ln |\\mathbf{\\Sigma}| - \\frac{N}{2} \\ln 2\\pi\n\\end{align}\n$$\nwhere $||$ denotes the determinant and $N$ is the number of measurements. The term marginal refers to the fact that this expression implicitly integrates over all possible values of the Gaussian Process; this is not the likelihood of the data given one particular draw from the GP, but given the ensemble of all possible draws from $\\mathbf{\\Sigma}$.\n<div style=\"background-color: #D6EAF8; border-left: 15px solid #2E86C1;\">\n <h1 style=\"line-height:2.5em; margin-left:1em;\">Exercise 1</h1>\n</div>\n\nDefine a function ln_gp_likelihood(t, y, sigma, A=1, l=1) that returns the log-likelihood defined above for a vector of measurements y at a set of times t with uncertainty sigma. As before, A and l should get passed direcetly to the kernel function. Note that you're going to want to use np.linalg.slogdet to compute the log-determinant of the covariance instead of np.log(np.linalg.det). (Why?)",
"def ln_gp_likelihood(t, y, sigma=0, A=1.0, l=1.0):\n \"\"\"\n \n \"\"\"\n # do stuff in here\n pass",
"<div style=\"background-color: #D6EAF8; border-left: 15px solid #2E86C1;\">\n <h1 style=\"line-height:2.5em; margin-left:1em;\">Exercise 2</h1>\n</div>\n\nThe following dataset was generated from a zero-mean Gaussian Process with a Squared Exponential Kernel of unity amplitude and unknown timescale. Compute the marginal log likelihood of the data over a range of reasonable values of $l$ and find the maximum. Plot the likelihood (not log likelihood) versus $l$; it should be pretty Gaussian. How well are you able to constrain the timescale of the GP?",
"import matplotlib.pyplot as plt\nt, y, sigma = np.loadtxt(\"data/sample_data.txt\", unpack=True)\nplt.plot(t, y, \"k.\", alpha=0.5, ms=3)\nplt.xlabel(\"time\")\nplt.ylabel(\"data\");",
"<div style=\"background-color: #D6EAF8; border-left: 15px solid #2E86C1;\">\n <h1 style=\"line-height:2.5em; margin-left:1em;\">Exercise 3a</h1>\n</div>\n\nThe timeseries below was generated by a linear function of time, $y(t)= mt + b$. In addition to observational uncertainty $\\sigma$ (white noise), there is a fair bit of correlated (red) noise, which we will assume is well described\nby the squared exponential covariance with a certain (unknown) amplitude $A$ and timescale $l$.\nYour task is to estimate the values of $m$ and $b$, the slope and intercept of the line, respectively. In this part of the exercise, assume there is no correlated noise. Your model for the $n^\\mathrm{th}$ datapoint is thus\n$$\n\\begin{align}\n y_n \\sim \\mathcal{N}(m t_n + b, \\sigma_n\\mathbf{I})\n\\end{align}\n$$\nand the probability of the data given the model can be computed by calling your GP likelihood function:\npython\ndef lnprob(params):\n m, b = params\n model = m * t + b\n return ln_gp_likelihood(t, y - model, sigma, A=0, l=1)\nNote, importantly, that we are passing the residual vector, $y - (mt + b)$, to the GP, since above we coded up a zero-mean Gaussian process. We are therefore using the GP to model the residuals of the data after applying our physical model (the equation of the line).\nTo estimate the values of $m$ and $b$ we could generate a fine grid in those two parameters and compute the likelihood at every point. But since we'll soon be fitting for four parameters (in the next part), we might as well upgrade our inference scheme and use the emcee package to do Markov Chain Monte Carlo (MCMC). If you haven't used emcee before, check out the first few tutorials on the documentation page. The basic setup for the problem is this:\n```python\nimport emcee\nsampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)\ninitial = [4.0, 15.0]\np0 = initial + 1e-3 * np.random.randn(nwalkers, ndim)\nprint(\"Running burn-in...\")\np0, _, _ = sampler.run_mcmc(p0, nburn) # nburn = 500 should do\nsampler.reset()\nprint(\"Running production...\")\nsampler.run_mcmc(p0, nsteps); # nsteps = 1000 should do\n```\nwhere nwalkers is the number of walkers (something like 20 or 30 is fine), ndim is the number of dimensions (2 in this case), and lnprob is the log-probability function for the data given the model. Finally, p0 is a list of starting positions for each of the walkers. Above we picked some fiducial/eyeballed value for $m$ and $b$, then added a small random number to each to generate different initial positions for each walker. This will initialize all walkers in a ball centered on some point, and as the chain progresses they'll diffuse out and begin to explore the posterior.\nOnce you have sampled the posterior, plot several draws from it on top of the data. You can access a random draw from the posterior by doing\npython\nm, b = sampler.flatchain[np.random.randint(len(sampler.flatchain))]\nAlso plot the true line that generated the dataset (given by the variables m_true and b_true below). Do they agree, or is there bias in your inferred values? Use the corner package to plot the joint posterior. How many standard deviations away from the truth are your inferred values?",
"t, y, sigma = np.loadtxt(\"data/sample_data_line.txt\", unpack=True)\nm_true, b_true, A_true, l_true = np.loadtxt(\"data/sample_data_line_truths.txt\", unpack=True)\nplt.errorbar(t, y, yerr=sigma, fmt=\"k.\", label=\"observed\")\nplt.plot(t, m_true * t + b_true, color=\"C0\", label=\"truth\")\nplt.legend(fontsize=12)\nplt.xlabel(\"time\")\nplt.ylabel(\"data\");",
"<div style=\"background-color: #D6EAF8; border-left: 15px solid #2E86C1;\">\n <h1 style=\"line-height:2.5em; margin-left:1em;\">Exercise 3b</h1>\n</div>\n\nThis time, let's actually model the correlated noise. Re-define your lnprob function to accept four parameters (slope, intercept, amplitude, and timescale). If you didn't before, it's a good idea to enforce some priors to keep the parameters within reasonable (and physical) ranges. If any parameter falls outside this range, have lnprob return negative infinity (i.e., zero probability).\nYou'll probably want to run your chains for a bit longer this time, too. As before, plot some posterior samples for the line, as well as the corner plot. How did you do this time? Is there any bias in your inferred values? How does the variance compare to the previous estimate?\n<div style=\"background-color: #D6EAF8; border-left: 15px solid #2E86C1;\">\n <h1 style=\"line-height:2.5em; margin-left:1em;\">Exercise 3c</h1>\n</div>\n\nIf you didn't do this already, re-plot the posterior samples on top of the data, but this time draw them from the GP, conditioned on the data. How good is the fit?"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ernestyalumni/servetheloop
|
passiveSkis/passiveSkis.ipynb
|
mit
|
[
"%matplotlib inline\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom scipy.optimize import curve_fit\n\nimport os, sys\n\nSUBDIR='./data/'\n\n# get the current directory and files inside \nprint(os.getcwd()); print(os.listdir( SUBDIR ));\n\nUpdatedHalbach040202ends = pd.read_excel(SUBDIR+\"Updated Halbach 4x2x2 ski at each end.xls\")\n\nskieval_edit = pd.read_excel(SUBDIR+\"ski-eval.xlsx.xlsx\")\n\nskieval = pd.read_excel(SUBDIR+\"ski-eval.xlsx\")\n\nskieval2d = pd.read_excel(SUBDIR+\"ski-eval-2d.xlsx\")\n\nskieval_edit",
"I believe some preprocessing to remove elements or comments that don't \"fit\" a table would help. \nManually, I copied and pasted spreadsheet cells with only tables and saved them as new files. Then I will load those.",
"skieval2d_periodic = pd.read_excel(SUBDIR+\"ski-eval_2d_periodic_abridged.xlsx\")\n\nskieval2d_periodic",
"Then I can immediately make some quick plots. For instance, for each width in inch, I can plot drag or lift vs. velocity (m/s):",
"ax1 = skieval2d_periodic.ix[skieval2d_periodic['inch']==1].plot.area(x=\"m/s\",y=\"drag\",color=\"Red\",label=\"1 in\")\n\nax2 = skieval2d_periodic.ix[skieval2d_periodic['inch']==2].plot.area(x=\"m/s\",y=\"drag\",color=\"Green\",label=\"2 in\",ax=ax1)\n\nax3 = skieval2d_periodic.ix[skieval2d_periodic['inch']==3].plot.area(x=\"m/s\",y=\"drag\",color=\"Blue\",label=\"3 in\",ax=ax2)\n\nax5 = skieval2d_periodic.ix[skieval2d_periodic['inch']==5].plot.area(x=\"m/s\",y=\"drag\",color=\"Purple\",label=\"5 in\",ax=ax3)\n\nskieval2d_periodic.ix[skieval2d_periodic['inch']==1][\"lift\"]"
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
joelagnel/lisa
|
ipynb/examples/trace_analysis/TraceAnalysis_FunctionsProfiling.ipynb
|
apache-2.0
|
[
"Trace Analysis Examples\nKernel Functions Profiling\nDetails on functions profiling are given in Plot Functions Profiling Data below.",
"import logging\nfrom conf import LisaLogging\nLisaLogging.setup()",
"Import required modules",
"# Generate plots inline\n%matplotlib inline\n\nimport json\nimport os\n\n# Support to access the remote target\nimport devlib\nfrom env import TestEnv\nfrom executor import Executor\n\n# RTApp configurator for generation of PERIODIC tasks\nfrom wlgen import RTA, Ramp\n\n# Support for trace events analysis\nfrom trace import Trace",
"Target Configuration\nThe target configuration is used to describe and configure your test environment.\nYou can find more details in examples/utils/testenv_example.ipynb.",
"# Setup target configuration\nmy_conf = {\n\n # Target platform and board\n \"platform\" : 'linux',\n \"board\" : 'juno',\n \"host\" : '192.168.0.1',\n \"password\" : 'juno',\n\n # Folder where all the results will be collected\n \"results_dir\" : \"TraceAnalysis_FunctionsProfiling\",\n\n # Define devlib modules to load\n \"exclude_modules\" : [ 'hwmon' ],\n\n # FTrace events to collect for all the tests configuration which have\n # the \"ftrace\" flag enabled\n \"ftrace\" : {\n \"functions\" : [\n \"pick_next_task_fair\",\n \"select_task_rq_fair\",\n \"enqueue_task_fair\",\n \"update_curr_fair\",\n \"dequeue_task_fair\",\n ],\n \n \"buffsize\" : 100 * 1024,\n },\n\n # Tools required by the experiments\n \"tools\" : [ 'trace-cmd', 'rt-app' ],\n \n # Comment this line to calibrate RTApp in your own platform\n \"rtapp-calib\" : {\"0\": 360, \"1\": 142, \"2\": 138, \"3\": 352, \"4\": 352, \"5\": 353},\n}\n\n# Initialize a test environment using:\nte = TestEnv(my_conf, wipe=False, force_new=True)\ntarget = te.target",
"Workload Execution and Functions Profiling Data Collection\nDetailed information on RTApp can be found in examples/wlgen/rtapp_example.ipynb.",
"def experiment(te):\n\n # Create and RTApp RAMP task\n rtapp = RTA(te.target, 'ramp', calibration=te.calibration())\n rtapp.conf(kind='profile',\n params={\n 'ramp' : Ramp(\n start_pct = 60,\n end_pct = 20,\n delta_pct = 5,\n time_s = 0.5).get()\n })\n\n # FTrace the execution of this workload\n te.ftrace.start()\n rtapp.run(out_dir=te.res_dir)\n te.ftrace.stop()\n\n # Collect and keep track of the trace\n trace_file = os.path.join(te.res_dir, 'trace.dat')\n te.ftrace.get_trace(trace_file)\n \n # Collect and keep track of the Kernel Functions performance data\n stats_file = os.path.join(te.res_dir, 'trace.stats')\n te.ftrace.get_stats(stats_file)\n\n # Dump platform descriptor\n te.platform_dump(te.res_dir)\n\nexperiment(te)",
"Parse Trace and Profiling Data",
"# Base folder where tests folder are located\nres_dir = te.res_dir\nlogging.info('Content of the output folder %s', res_dir)\n!tree {res_dir}\n\nwith open(os.path.join(res_dir, 'platform.json'), 'r') as fh:\n platform = json.load(fh)\nprint json.dumps(platform, indent=4)\nlogging.info('LITTLE cluster max capacity: %d',\n platform['nrg_model']['little']['cpu']['cap_max'])\n\ntrace = Trace(platform, res_dir, events=[])",
"Report Functions Profiling Data",
"# Get the DataFrame for the specified list of kernel functions\ndf = trace.data_frame.functions_stats(['enqueue_task_fair', 'dequeue_task_fair'])\ndf\n\n# Get the DataFrame for the single specified kernel function\ndf = trace.data_frame.functions_stats('select_task_rq_fair')\ndf",
"Plot Functions Profiling Data\nThe only method of the FunctionsAnalysis class that is used for functions profiling is plotProfilingStats. This method is used to plot functions profiling metrics for the specified kernel functions. For each speficied metric a barplot is generated which reports the value of the metric when the kernel function has been executed on each CPU.\nThe default metric is avg if not otherwise specified. A list of kernel functions to plot can also be passed to plotProfilingStats. Otherwise, by default, all the kernel functions are plotted.",
"# Plot Average and Total execution time for the specified\n# list of kernel functions\ntrace.analysis.functions.plotProfilingStats(\n functions = [\n 'select_task_rq_fair',\n 'enqueue_task_fair',\n 'dequeue_task_fair'\n ],\n metrics = [\n # Average completion time per CPU\n 'avg',\n # Total execution time per CPU\n 'time',\n ]\n)\n\n# Plot Average execution time for the single specified kernel function\ntrace.analysis.functions.plotProfilingStats(\n functions = 'update_curr_fair',\n)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
csaladenes/blog
|
kendo romania/scripts/.ipynb_checkpoints/cleanerÜold-checkpoint.ipynb
|
mit
|
[
"Romania Kendo Stats\n25 years of Kendo History in Romania, visualized\nData cleaning workbook\nCreated by Dénes Csala | 2019 | MIT License \nFor any improvement suggestions and spotted processing mistakes drop me a message on Facebook.\nIf you would like to have your country/club data visualized in a similar manner, or any other data visualization and analytics consultancy inquiries contact me at mail@csaladen.es\nThis workbook guides you through the data cleaning stage for the Romania Kendo Stats visualization. This is a multi-stage process, you will need access to the raw data (liaise with Secretary or other member in charge of data the Romanian Kendo Association), Python and Excel installed. Any Python packages will also be installed on the way, but we recommend using the Anaconda distribution of Python 3. If you would like to edit the visualization part, then you will need PowerBI Desktop.\nThe general structure of the repository is the following:\n - /data\n - /raw: this where you place the downloaded data from the official data source, sorted by years and competitions, only keep those that have relevant data for matches only\n - /ocr: this is where the data gets saved after an OCR has been performed - this is necessary for some older files in image format \n - /manual: this is where manually extracted matches from old image files get placed - they should follow the 2018 CN format, i.e. all matches in one sheet\n - /export: this is where we save the dataformatted for loading into the viz\n - /clean: this is where all the processed, cleaned data ends up - they should follow the 2018 CN format, i.e. all matches in one sheet\n - /scripts: this is the main code repository for all data processing scripts\n - /viz: this is where the visualization files get saved - they are created using PowerBI and load data from /data/clean\n1. Load and clean members\nThis section reads and clean the RKA members list. Save as baseline.",
"import pandas as pd, numpy as np, json\nimport members_loader, matches_loader, clubs_loader, point_utils, save_utils",
"First, download members data (Evidenta membrilor.xlsx) from the official data source, and create a macro-enabled Excel file from the Google Sheet. Then write a simple macro to extract the cell comments from the Club column in order to get info about club Transfers. Follow the instructions here. Save the new file as Evidenta membrilor.xlsm in the /data/manual folder. Use the members_loader module to process this file.",
"members=members_loader.get_members('../data/manual/Evidenta membrilor.xlsm')",
"Members are loaded but a bit messy.",
"members.head(2)\n\nmembers_clean=members_loader.cleaner(members).reset_index(drop=False)\n\nmembers_clean.to_csv('../data/clean/members.csv')",
"2. Load and clean matches\nMatches are loaded from excel sheets in the /data folder, organized by year and competition. We are always looking for match list data,the cleaner the better, the more concentrated the better. While this is not possible all the time, we have several demo import routines. These are stored in the matches_loader.py function library. While not all matches have textual data available, these will need to be processed through OCR first. Raw excel data that can be processed right away can be found in the /data/raw folder, while the processed ones in /data/ocr. We use a separate workbook, ocr.ipynb to walk you through the OCR process.",
"matches={i:{} for i in range(1993,2019)}\ncompetitions={\n 2018:['CR','CN','SL'],\n 2017:['CR','CN','SL'],\n 2016:['CR','CN','SL'],\n 2015:['CR','CN','SL'],\n 2014:['CR','CN','SL'],\n 2013:['CR','CN','SL'],\n 2012:['CR','CN'],\n 2011:['CR','CN'],\n 2010:['CR','CN'],\n 2009:['CR','CN'],\n 1998:['CR'],\n 1997:['CR'],\n 1993:['CR']\n}",
"2.1. Load matches",
"for year in competitions:\n for competition in competitions[year]:\n matches[year][competition]=matches_loader.get_matches(year,competition)",
"2.2. Standardize names\nNames in name_exceptions get replaced with their right hand side values before processing.",
"name_exceptions={'Atanasovski':'Atanasovski A. (MAC)',\n 'Dobrovicescu (SON)':'Dobrovicescu T. (SON)',\n 'Ianăș':'Ianăș F.',\n 'Crăciun (Tamang) Sujata':'Crăciun S.',\n 'Abe (Carțiș) Emilia':'Abe E.',\n 'Dinu (Ioniță) Claudia-Andreea':'Dinu A.',\n 'Mureșan (Egri) Melinda':'Mureșan M.',\n 'Grădișteanu (Gușu) Rebeca':'Grădișteanu R.',\n 'Józsa (Gușu) Rodiana':'Józsa R.',\n 'Arabadjiyski': 'Arabadjiyski A.',\n 'Dudaș Francisc Andrei':'Dudaș F.', \n 'Dudaș Francisc':'Dudaș F.',\n 'Mandia':'Mandia F.',\n 'Stanev':'Stanev A.',\n 'Mochalov':'Mochalov O.',\n 'Sozzi':'Sozzi A.',\n 'Crăciunel':'Crăciunel I.',\n 'Craciunel':'Crăciunel I.',\n 'Sagaev':'Sagaev L.',\n 'Buzás':'Búzás C.',\n 'Csala':'Csala T.',\n 'Dimitrov':'Dimitrov M.',\n 'Józsa':'Józsa L.',\n 'Creangă':'Creangă A.',\n 'Duțescu':'Duțescu M.', \n 'Furtună':'Furtună G.',\n 'Gârbea':'Gârbea I.',\n 'Stupu':'Stupu I.',\n 'Mahika-Voiconi':'Mahika-Voiconi S.',\n 'Mahika':'Mahika-Voiconi S.',\n 'Stanciu':'Stanciu F.',\n 'Vrânceanu':'Vrânceanu R.',\n 'Wolfs':'Wolfs J.',\n 'Ducarme':'Ducarme A.',\n 'Sbârcea':'Sbârcea B.',\n 'Mocian':'Mocian A.',\n 'Hatvani':'Hatvani L.',\n 'Dusan':'Dusan N.',\n 'Borota':'Borota V.',\n 'Tsushima':'Tsushima K.',\n 'Tráser':'Tráser T.',\n 'Colțea':'Colțea A.',\n 'Brîcov':'Brîcov A.',\n 'Yamamoto':'Yamamoto M.',\n 'Crăciun':'Crăciun D.'}",
"Names in name_equals get replaced with their right hand side values after processing.",
"name_equals={'Chirea M.':'Chirea A.',\n 'Ghinet C.':'Ghineț C.',\n 'Anghelescu A.':'Anghelescu M.',\n 'Domnița M.':'Domniță M.',\n 'Bejgu N.':'Beygu N.',\n 'Canceu A.':'Canceu Ad.',\n 'Dinu C.':'Dinu A.',\n 'Grapa D.':'Grapă D.',\n 'Cristea C.':'Cristea Că.',\n 'Cismas O.':'Cismaș O.',\n 'Garbea I.':'Gârbea I.',\n 'Vitali O.':'Oncea V.',\n 'Ah-hu W.':'Ah-hu S.',\n 'Horvát M.':'Horváth M.',\n 'Ionita A.':'Ioniță A.',\n 'Medvedschi I.':'Medvețchi I.',\n 'Mahika S.':'Mahika-Voiconi S.',\n 'Mate L.':'Máté L.',\n 'Hentea L.':'Hentea A.',\n 'Stupu I.':'Stupu A.',\n 'Ah-Hu S.':'Ah-hu S.',\n 'Alexa I.':'Alexa A.',\n 'Albert V.':'Albert J.',\n 'Angelescu M.':'Angelescu M.',\n 'Apostu D.':'Apostu T.',\n 'Brâcov A.':'Brîcov A.',\n 'Zaporojan R.':'Zaporojan O.',\n 'Vasile C.':'Vasile I.',\n 'Dițu I.':'Dițu A.',\n 'Tudor-Duicu C.':'Tudor D.',\n 'Sandu M.':'Sandu Mar.',\n 'Radulescu A.':'Rădulescu An.',\n 'Péter C.':'Péter Cso.',\n 'Movatz E.':'Movatz V.',\n 'Molinger B.':'Molinger P.',\n 'Mitelea C.':'Mițelea C.',\n 'Macavei I.':'Macaveiu A.',\n 'Macavei A.' : 'Macaveiu A.',\n 'Macaveiu I.' : 'Macaveiu A.',\n 'Luca T.':'Luca Tr.',\n 'Leca L.':'Leca F.',\n 'Gutu E.':'Guțu E.',\n 'Angelescu A.':'Angelescu M.',\n 'Mehelean L.':'Mahalean L.',\n 'Catoriu D.':'Cantoriu D.',\n 'Călina A.':'Călina C.',\n 'Ștefu I.' : 'Ștefu L.',\n 'Țarălungă A.' : 'Țarălungă D.',\n 'Buzás C.':'Búzás C.',\n 'Korenshi E.':'Korenschi E.',\n 'Pleșa R.':'Pleșea R.',\n 'Galos A.':'Galoș A.',\n 'Győrfi G.':'Györfi G.',\n 'Győrfi S.':'Györfi S.',\n 'Ghineț G.':'Ghineț C.',\n 'Hostina E.':'Hoștină E.', \n 'Hostină E.':'Hoștină E.', \n 'Ianăs F.':'Ianăș F.',\n 'Ianas F.':'Ianăș F.',\n 'Tamang S.':'Crăciun S.',\n 'Taralunga D.':'Țarălungă D.',\n 'Lacatus M.':'Lăcătuș M.',\n 'Máthé L.':'Máté L.',\n 'Burinaru A.':'Burinaru Al.',\n 'Nastase M.':'Năstase E.',\n 'Oprisan A.':'Oprișan A.',\n 'Pârlea A.':'Pîrlea A.',\n 'Parlea A.':'Pîrlea A.',\n 'Sabau D.':'Sabău D.',\n 'Spriu C.':'Spiru C.',\n 'Crețiu T.':'Crețiu-Codreanu T.',\n 'Crețiu M.':'Crețiu-Codreanu M.',\n 'Bíró S.':'Biró S.',\n 'Oprișan B.':'Oprișan A.',\n 'Székely J.':'Székely P.',\n 'Bărbulescu M.' : 'Bărbulescu E.',\n 'Bejenariu G.' : 'Bejenaru G.', \n 'Bojan V.' : 'Bojan Vl.',\n 'Moise A.' : 'Moise Ad.',\n 'Măgirdicean R.' : 'Magirdicean Ră.',\n 'Pall D.':'Páll D.',\n 'Stănculascu C.':'Stănculescu C.',\n 'Vrânceanu M.': 'Vrânceanu L.',\n 'Georgescu A.':'Georgescu An.', \n 'Wasicek V.':'Wasicheck W.',\n 'Wasicsec W.':'Wasicheck W.',\n 'Wasichek W.' : 'Wasicheck W.',\n 'Wasicsek W.':'Wasicheck W.',\n 'Zolfoghari A.':'Zolfaghari A.'}",
"Names in name_doubles handle situation where the default name abbreviation might lead to duplicates.",
"name_doubles={\n 'Cristea Cristina':'Cristea Cr.', \n 'Cristea Călin-Ștefan':'Cristea Că.',\n 'Sandu Marius-Cristian':'Sandu Mar.', \n 'Sandu Matei-Serban':'Sandu Mat.',\n 'Sandu Matei':'Sandu Mat.',\n 'Georgescu Andrei':'Georgescu An.', \n 'Georgescu Alexandra':'Georgescu Al.',\n 'Péter Csongor':'Péter Cso.', \n 'Péter Csanád':'Péter Csa.',\n 'Luca Mihnea':'Luca Mihn.', \n 'Luca Mihai-Cătălin':'Luca Miha.',\n 'Luca':'Luca Miha.',\n 'Luca M':'Luca Miha.',\n 'Luca M.':'Luca Miha.',\n 'Luca Mihai':'Luca Miha.',\n 'Luca Traian-Dan':'Luca Tr.', \n 'Luca Tudor':'Luca Tu.',\n 'Canceu Anamaria':'Canceu An.', \n 'Canceu Adriana-Maria':'Canceu Ad.',\n 'Cioată Daniel-Mihai':'Cioată M.', \n 'Cioată Dragoș':'Cioată D.',\n 'Burinaru Alexandra':'Burinaru Al.', \n 'Burinaru Andreea':'Burinaru An.',\n 'Kovács Andrei':'Kovács An.',\n 'Kovács Alexandru':'Kovács Al.',\n 'Cristea Adrian':'Cristea Ad.',\n 'Cristea Andrei':'Cristea An.',\n 'Cristea A.':'Cristea An.',\n 'Ungureanu Nicolae Marius':'Ungureanu M.',\n 'Ungureanu Nicoleta':'Ungureanu N.',\n 'Vincze Vlad':'Vincze Vl.',\n 'Vincze Valentina':'Vincze Va.',\n 'Bojan Vladimir':'Bojan Vl.',\n 'Bojan Voicu':'Bojan Vo.',\n 'Crețiu Codreanu Matei':'Crețiu-Codreanu M.',\n 'Crețiu Codreanu Tudor':'Crețiu-Codreanu T.',\n 'Pop Mugurel Voicu':'Pop-Mugurel V.',\n 'Pop Mihai':'Pop M.',\n 'Moise Alexandru':'Moise Al.',\n 'Moise Adrian':'Moise Ad.',\n 'Rădulescu Andrei-Savin':'Rădulescu An.',\n 'Rădulescu Adrian':'Rădulescu Ad.',\n 'Magirdicean Romeo':'Magirdicean Ro.',\n 'Magirdicean Răzvan Ionuț':'Magirdicean Ră.'}",
"Normalize Romanian characters, define name cleaner function to get Name IDs. Name ID are unique competitor names in the form of: Surname, First letter of Name. If the First Letter of Name leads to a non-unique ID, the second letter is taken, and so forth, until a unique ID is found. It gets contructed as follows:\n 1. If name in doubles return the solution directly\n 2. Normalize characters\n 3. If name is in exceptions, clean\n 4. Replace any double spaces, then split at ( (to split away club, if embedded in the name)\n 5. Split into Surname and Name, store in rnames\n 6. Store Surname N. in sname\n 7. If sname is in equals, clean\n 8. Retrun sname",
"letter_norm={'ţ':'ț','ş':'ș','Ş':'Ș'}\ndef name_cleaner(name):\n name=str(name)\n if name in name_doubles:\n return name_doubles[name]\n else:\n for letter in letter_norm:\n name=name.replace(letter,letter_norm[letter])\n if name in name_exceptions:\n name=name_exceptions[name]\n nc=name.replace(' ',' ').split('(') \n \n rname=nc[0].strip()\n rnames=rname.split(' ')\n sname=rnames[0]+' '+rnames[1][0]+'.'\n if sname in name_equals:\n sname=name_equals[sname]\n if sname in name_doubles:\n print(name,sname)\n return sname",
"Names equalling any string in redflags_names get thrown out of the final dataset.\nNames containing any string in redflags_names2 get thrown out of the final dataset.",
"redflags_names=['-','—','—',np.nan,'. ()','— ','- -.','- -. (-)','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','R','S',\n 'Kashi','Sankon','București','Victorii:','Sakura','Taiken','Ikada','Sonkei','CRK','Museido',\n 'Ichimon','Bushi Tokukai 1','Competitori – Shiai-sha','Echipa - roşu','Numele şi prenumele',\n 'Victorii:','Victorii: 0','Victorii: 1','Victorii: 2','Victorii: 3','Victorii: 4',\n 'Victorii: 5','?','Kyobukan','2/5','2/6','3/8','Finala','Kyobukan (0/0/0)','―',\n '(clasament final după meci de baraj)','CRK (Bucuresti)','Kaybukan','Isshin (Cluj)',\n 'Ikada (Bucureşti)','Kyobukan (Braşov)','Puncte:','KASHI','Budoshin','Isshin',\n '— (—)','4. B.','4. Baraj: Stupu M - Hostina','4. Baraj: Moise KM - Korenschi M',\n 'Bushi Tokukai (2/8/17)','CRK 2 (1/6/14)', 'CRK 2','CRK 1','Loc I.:','Loc',\n 'Bushi Tokukai 2 (M Ciuc)','Echipa suport']\nredflags_names2=['Bushi Tokukai','Eliminatoriu','finala','Finala','Fianala','Ikada','Ichimon','Pool',\n 'Locul ','Lotul ','Loc ','Grupa ','Isshin','Meciul ','Victorii:','L1','1','2','3','4','5','6','7','8','9','0']",
"Check is name is not in redflags. Ignore these entries.",
"def name_ok(name):\n name=str(name)\n if name=='nan': return False\n if name not in redflags_names:\n if np.array([i not in name for i in redflags_names2]).all():\n return True\n return False",
"Process all names for standardization. Create 3 variables:\n1. all_players: forward relationship: unclean name -> cleaned name\n2. all_players_r: reverse relationship\n3. all_players_unsorted: unique set of all names processed \nProcess both competitor and shinpan names.",
"all_players={}\nall_players_r={}\nall_players_unsorted=set()\nfor year in matches:\n for competition in matches[year]:\n for match in matches[year][competition]:\n for color in ['aka','shiro']:\n name=match[color]['name']\n all_players_unsorted.add(name)\n if name_ok(name):\n name=name_cleaner(name)\n rname=match[color]['name']\n if rname not in all_players_r:all_players_r[rname]=name\n if name not in all_players: all_players[name]={}\n if year not in all_players[name]:all_players[name][year]={'names':set()}\n all_players[name][year]['names'].add(rname)\n if 'shinpan' in match:\n for color in ['fukushin1','shushin','fukushin2']:\n aka=match['aka']['name']\n shiro=match['shiro']['name']\n if (name_ok(aka)) and\\\n (name_ok(shiro)) and\\\n (name_cleaner(aka) in all_players) and\\\n (name_cleaner(shiro) in all_players):\n rname=match['shinpan'][color]\n all_players_unsorted.add(rname)\n if name_ok(rname):\n name=name_cleaner(rname)\n if rname not in all_players_r:all_players_r[rname]=name\n if name not in all_players: all_players[name]={}\n if year not in all_players[name]:all_players[name][year]={'names':set()}\n all_players[name][year]['names'].add(rname)",
"Link procesed to names in members. The name_linker dictionary contains Name IDs (short names) as keys and sets of long names as values. Ideally, this set should contain only one element, so that the mapping is unique.",
"name_linker={}\nfor i in members_clean.index:\n name=members_clean.loc[i]['name']\n try:\n cname=name_cleaner(name)\n except:\n print(name)\n if cname not in name_linker:name_linker[cname]=set()\n name_linker[cname].add(name)",
"Do the opposite mapping in names_abbr: long->short. Create exceptions for duplicate names.",
"names_abbr={}\nfor name in name_linker:\n if len(name_linker[name])>1:\n #only for dev to create exceptions for duplicate person names.\n print(name,name_linker[name])\n for i in name_linker[name]:\n names_abbr[i]=name",
"Save club mappings by short name, by year.",
"names_abbr_list=[]\nname_abbr2long={}\nname_abbr2club={}\nfor i in members_clean.index:\n name=members_clean.loc[i]['name']\n club=members_clean.loc[i]['club']\n year=members_clean.loc[i]['year']\n names_abbr_list.append(names_abbr[name])\n name_abbr2long[names_abbr[name]]=name\n if names_abbr[name] not in name_abbr2club:name_abbr2club[names_abbr[name]]={}\n if year not in name_abbr2club[names_abbr[name]]:\n name_abbr2club[names_abbr[name]][year]=club",
"Add short names to members_clean.",
"members_clean['name_abbr']=names_abbr_list",
"Some names appear in the short form, we need to add them manually to the long list. We parse through all forms in which the name appears, and choose the longest. We call this the inferred name.",
"for name in all_players:\n if name not in name_abbr2long:\n #infer using longest available name\n names={len(j):j for i in all_players[name] for j in all_players[name][i]['names']}\n if len(names)>0:\n inferred_name=names[max(names.keys())]\n if '(' in inferred_name:\n inferred_name=inferred_name[:inferred_name.find('(')-1]\n name_abbr2long[name]=inferred_name",
"Infer duplicates",
"def levenshteinDistance(s1, s2):\n if len(s1) > len(s2):\n s1, s2 = s2, s1\n\n distances = range(len(s1) + 1)\n for i2, c2 in enumerate(s2):\n distances_ = [i2+1]\n for i1, c1 in enumerate(s1):\n if c1 == c2:\n distances_.append(distances[i1])\n else:\n distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))\n distances = distances_\n return distances[-1]\n\nnkeys=np.sort(list(name_abbr2long.keys()))\nfor ii in range(len(name_abbr2long)):\n i=nkeys[ii]\n for jj in range(ii):\n j=nkeys[jj]\n if levenshteinDistance(name_abbr2long[i],name_abbr2long[j])<4:\n print(name_abbr2long[i],':',name_abbr2long[j],' - ',i,':',j)\n\nnkeys=np.sort(list(name_abbr2long.keys()))\nfor ii in range(len(name_abbr2long)):\n i=nkeys[ii]\n for jj in range(ii):\n j=nkeys[jj]\n if levenshteinDistance(i,j)<3:\n print(i,':',j,' - ',name_abbr2long[i],':',name_abbr2long[j])",
"2.3. Infer clubs\nInfer clubs from name if club is part of name in the competition. Club names in redflags_clubs get ignored. Clubs in club_equals get replaced after processing. The convention is to have 3 letter all-caps club names for Romanian clubs, 3 letter club names followed by a / and a two letter country code for foreign clubs.",
"redflags_clubs=['','N/A','RO1','RO2']\nclub_equals=clubs_loader.club_equals",
"Attach clubs to all_players who have it in their competition name data, but we don't already known from members.",
"for name in all_players:\n #if we dont already know the club for this year from the members register\n if name not in name_abbr2club: \n for year in all_players[name]:\n for name_form in all_players[name][year]['names']:\n if '(' in name_form:\n club=name_form.split('(')[1].strip()[:-1]\n if club not in redflags_clubs:\n if name not in name_abbr2club:name_abbr2club[name]={}\n name_abbr2club[name][year]=club\n else:\n for year in all_players[name]:\n #else if no club info for particular year\n if year not in name_abbr2club[name]:\n for name_form in all_players[name][year]['names']:\n if '(' in name_form:\n club=name_form.split('(')[1].strip()[:-1]\n if club not in redflags_clubs:\n name_abbr2club[name][year]=club",
"Normalize club names and long names.",
"for name in name_abbr2club:\n for year in name_abbr2club[name]:\n if name_abbr2club[name][year] in club_equals: \n name_abbr2club[name][year]=club_equals[name_abbr2club[name][year]]\nfor name in name_abbr2long:\n name_abbr2long[name]=name_abbr2long[name].replace(' ',' ').strip()",
"If club still not found, fill the gaps between years. Forward fill first, then backward fill, if necessary.",
"manual_club_needed=set()\nfor name in all_players:\n if name in name_abbr2club:\n years=np.sort(list(all_players[name].keys()))\n minyear1=min(years)\n maxyear1=max(years)\n minyear2=min(name_abbr2club[name].keys())\n maxyear2=min(name_abbr2club[name].keys())\n \n if len(years)>1:\n for year in range(min(minyear1,minyear2),max(maxyear1,maxyear2)+1):\n if year not in name_abbr2club[name]:\n #get club from previous year\n for y in range(years[0],year):\n if y in name_abbr2club[name]:\n name_abbr2club[name][year]=str(name_abbr2club[name][y])\n break\n if year not in name_abbr2club[name]:\n #if still not found, get club from next year\n for y in np.arange(years[-1],year,-1):\n if y in name_abbr2club[name]:\n name_abbr2club[name][year]=str(name_abbr2club[name][y])\n break\n if year not in name_abbr2club[name]:\n #if still not found, get first known year\n if year<minyear2:\n name_abbr2club[name][year]=str(name_abbr2club[name][minyear2])\n else:\n name_abbr2club[name][year]=str(name_abbr2club[name][maxyear2])\n else:\n manual_club_needed.add(name)",
"We have extracted what was possible from the data. Now we do a save of short name to long name and club mappings (by year). We then edit this file manually, if necessary.\n2.4. Manual club and long name overrides",
"manual_name_needed=set()\n#check if we dont have first name information, then flag for manual additions\nfor name in name_abbr2long:\n names=name_abbr2long[name].split(' ')\n if len(names)<2:\n manual_name_needed.add(name)\n elif len(names[1])<3:\n manual_name_needed.add(name) \n\nmanual_data_override=pd.read_excel('../data/manual/members_manual.xlsx').set_index('name')\n\ncommon_manual=set(manual_club_needed).intersection(set(manual_data_override.index))\nmanual_data_override=manual_data_override.loc[common_manual]\n\nmanual_data_needed=[]\nfor i in manual_name_needed.union(manual_club_needed):\n if i not in list(manual_data_override.index):\n dummy={'name':i,'long_name':'','club':''}\n if i in name_abbr2club:\n dummy['club']=name_abbr2club[name][max(list(name_abbr2club[name].keys()))]\n if i in manual_club_needed:\n if i in name_abbr2long:\n dummy['long_name']=name_abbr2long[i]\n manual_data_needed.append(dummy)\n\ndf=pd.DataFrame(manual_data_needed).set_index('name')\ndf=pd.concat([manual_data_override,df]).drop_duplicates().sort_index()\n\ndf.to_excel('../data/manual/members_manual.xlsx')",
"Extend with manual data",
"for i in df['long_name'].replace('',np.nan).dropna().index:\n name_abbr2long[i]=df.loc[i]['long_name']\n all_players_r[name_abbr2long[i]]=i\n\nmanual_club_needed=set()\nfor name in all_players:\n years=np.sort(list(all_players[name].keys()))\n minyear=min(years)\n maxyear=max(years)\n for year in range(minyear,maxyear+1):\n if name not in name_abbr2club:name_abbr2club[name]={}\n if year not in name_abbr2club[name]:\n if name in df['club'].replace('',np.nan).dropna().index:\n name_abbr2club[name][year]=df.loc[name]['club']\n else:\n name_abbr2club[name][year]='XXX'",
"Update and overwrite with club existence data\n3. Update members\nExtend members data with data mined from matches\nExtend members with unregistered members. Probably inactive now, or from abroad. Only that one year when he appared in competition. But we only register them as known to be active that year. This is in ontrast with the Inactive members from the registry, for whom we know when did they go inactive.",
"unregistered_members=[]\nfor name in all_players:\n if name not in set(members_clean['name_abbr'].values):\n years=np.sort(list(name_abbr2club[name].keys()))\n for year in range(min(years),max(years)+1):\n if year in all_players[name]:\n iyear=year\n else:\n iyear=max(years)\n club,country=clubs_loader.club_cleaner(name_abbr2club[name][year])\n if country=='RO':\n activ='Active'\n dan=''#dan=0\n else:\n activ='Abroad'\n dan=''\n unregistered_members.append({'name':name_abbr2long[name],'name_abbr':name,\n 'club':club,'active':activ,'year':year,'dan':dan,'country':country,'source':'matches'})\n\nmembers_clean['country']='RO'\nmembers_clean['source']='member list'\n\nmembers_updated=pd.concat([members_clean,pd.DataFrame(unregistered_members)]).reset_index(drop=True)",
"Extend 0 dan down to starting year.",
"members_mu_dan_extensions=[]\nmembers_by_name=members_updated.set_index(['name_abbr'])\nfor year in matches:\n members_by_year=members_updated.set_index(['year']).loc[year]\n for competition in matches[year]:\n print(year,competition)\n for k in matches[year][competition]:\n aka=k['aka']['name']\n shiro=k['shiro']['name']\n if (name_ok(aka)) and\\\n (name_ok(shiro)) and\\\n (name_cleaner(aka) in all_players) and\\\n (name_cleaner(shiro) in all_players):\n for a in ['aka','shiro']:\n for h in k[a]:\n if h=='name':\n name=k[a][h]\n rname=all_players_r[name]\n if rname in list(members_by_name.index):\n if rname not in members_by_year['name_abbr'].values:\n dummy=members_by_name.loc[[rname]]\n minyear=min(dummy['year'])\n maxyear=max(dummy['year'])\n if year>maxyear:\n dummy=dummy[dummy['year']==maxyear]\n yeardiff=min(dummy['year'])-year\n else:\n dummy=dummy[dummy['year']==minyear]\n yeardiff=year-max(dummy['year'])\n dummy=dummy.reset_index()\n dummy['year']=year\n dummy['dan']=0\n dummy['age']=dummy['age']+yeardiff\n dummy['source']='matches, mu dan'\n members_mu_dan_extensions.append(dummy)\n #if only appears in competition in one year, then not in members table\n else:\n print(rname,year)\n #fix in unregistered_members",
"Update members",
"members_mu_dan_extensions=pd.concat(members_mu_dan_extensions)\nmembers_updated=pd.concat([members_updated,members_mu_dan_extensions]).reset_index(drop=True)",
"Prettify club names, and IDs",
"clubs=[]\npclubs=[]\ncountries=[]\nfor i in members_updated.index:\n club=members_updated.loc[i]['club']\n country=members_updated.loc[i]['country']\n year=members_updated.loc[i]['year']\n club,country=clubs_loader.club_cleaner(club,country)\n club,pclub=clubs_loader.club_year(club,country,year)\n clubs.append(club)\n pclubs.append(pclub)\n countries.append(country)\n\nmembers_updated['club']=clubs\nmembers_updated['pretty_club']=pclubs\nmembers_updated['country']=countries",
"Fix unknwown genders",
"manual_mf_data_override=pd.read_excel('../data/manual/members_mf_manual.xlsx')\n\nmanual_mf_data_needed=members_updated[(members_updated['gen']!='M')&(members_updated['gen']!='F')][['name_abbr','name']]\\\n .drop_duplicates()\n\ndf=manual_mf_data_needed#.merge(manual_mf_data_override[['name_abbr','gen']],'outer').drop_duplicates()\ndf.to_excel('../data/manual/members_mf_manual.xlsx')",
"Update members with manual gender data.",
"members_updated=members_updated.reset_index(drop=True).drop_duplicates()\n\ngens=[]\nfor i in members_updated.index:\n name=members_updated.loc[i]['name_abbr']\n if name in list(df.index):\n gens.append(df.loc[name])\n else:\n gens.append(members_updated.loc[i]['gen'])\n\nmembers_updated['gen']=gens",
"Save to /data/export.",
"members_updated.to_csv('../data/export/members.csv')\n\nclubs_updated=members_updated.groupby(['club','country','pretty_club','year'])[['name_abbr']].count()\nclubs_updated=clubs_updated.reset_index().set_index('club').join(clubs_loader.club_year_df['Oraș'])\nclubs_updated.to_csv('../data/export/clubs.csv')",
"4. Update matches\nUpdate and save cleaned match data",
"master_matches=[]\nfor year in matches:\n members_by_year=members_updated.set_index(['year']).loc[year].drop_duplicates()\n for competition in matches[year]:\n print(year,competition)\n for k in matches[year][competition]:\n good=True\n match={'year':year,'competition':competition}\n match['match_category'],match['match_teams'],match['match_phase']=point_utils.match_cleaner(year,k['match_type'])\n if 'shinpan' in k:\n for color in ['fukushin1','shushin','fukushin2']:\n if color in k['shinpan']:\n if k['shinpan'][color] in all_players_r:\n #normalize shinpan names\n match[color]=name_abbr2long[all_players_r[k['shinpan'][color]]]\n aka=k['aka']['name']\n shiro=k['shiro']['name']\n if (name_ok(aka)) and\\\n (name_ok(shiro)) and\\\n (name_cleaner(aka) in all_players) and\\\n (name_cleaner(shiro) in all_players):\n for a in ['aka','shiro']:\n points=''\n for h in k[a]:\n if h=='name':\n name=k[a][h]\n #normalize competitor names\n rname=all_players_r[name]\n df=members_by_year[members_by_year['name_abbr']==rname]\n match[a+' name']=name_abbr2long[rname]\n else:\n point=k[a][h]\n if str(point)=='nan': point=''\n points=points+point\n good=point_utils.point_redflags(points)\n if good:\n match[a+' point1'],match[a+' point2'],match[a+' points'],\\\n match[a+' hansoku'],match['encho']=point_utils.points_cleaner(points)\n else:\n good=False \n if good:\n if 'outcome' in k:\n match['encho']=point_utils.outcome_cleaner(k['outcome'])\n else: \n match['encho']=False\n match['winner'],match['difference']=point_utils.outcome_from_points(match['aka points'],match['shiro points'])\n\n master_matches.append(match)",
"Clean up and save matches for display",
"data=pd.DataFrame(master_matches).reset_index(drop=True)\n\nsave_utils.save(data)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
Chemcy/vnpy
|
vn.tutorial/performance/Performance of Receiving Tick Data.ipynb
|
mit
|
[
"vnpy接收行情数据性能测试与改进优化\nby Jerry He, 2016.12,\n讨论:https://zhuanlan.zhihu.com/p/24662087\n近来,量化交易平台vnpy因其开源、功能强大、开发容易、可定制性强的特点,目前已经被广泛应用在量化交易中。\n行情数据落地是量化交易平台必须解决的一个基础问题,它有两个方面的作用:一是供策略开发时进行分析、回测;二是为实盘程序时提供近期的历史数据。前者可以通过传统效率更高的实现方式(比如我们有基于C++和leveldb实现的行情数据接收、转发、历史数据获取程序)实现,也可以通过向数据提供方购买获取。但是对于后者,直接基于vnpy落地近期的数据是更为简易的方式。\nvnpy包含行情落地模块dataRecorder,已经实现了tick数据、分钟bar数据保存功能。\n本工作主要包括:\n- vnpy原落地函数的性能考查\n- 针对CTP接口,原落地函数的修正与优化\n以下所有性能测试时间单位均为毫秒。\n测试基于windows 7, i7 3.4GHz.",
"from datetime import datetime, time\nimport time as gtime\nimport pymongo\nfrom dateutil.parser import parse",
"重构vnpy接收行情数据代码,以用于测试",
"TICK_DB_NAME='Test'\n\nEMPTY_STRING = ''\nEMPTY_UNICODE = u''\nEMPTY_INT = 0\nEMPTY_FLOAT = 0.0\n\nclass DrTickData(object):\n \"\"\"Tick数据\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\" \n self.vtSymbol = EMPTY_STRING # vt系统代码\n self.symbol = EMPTY_STRING # 合约代码\n self.exchange = EMPTY_STRING # 交易所代码\n\n # 成交数据\n self.lastPrice = EMPTY_FLOAT # 最新成交价\n self.volume = EMPTY_INT # 最新成交量\n self.openInterest = EMPTY_INT # 持仓量\n \n self.upperLimit = EMPTY_FLOAT # 涨停价\n self.lowerLimit = EMPTY_FLOAT # 跌停价\n \n # tick的时间\n self.date = EMPTY_STRING # 日期\n self.time = EMPTY_STRING # 时间\n self.datetime = None # python的datetime时间对象\n \n # 五档行情\n self.bidPrice1 = EMPTY_FLOAT\n self.bidPrice2 = EMPTY_FLOAT\n self.bidPrice3 = EMPTY_FLOAT\n self.bidPrice4 = EMPTY_FLOAT\n self.bidPrice5 = EMPTY_FLOAT\n \n self.askPrice1 = EMPTY_FLOAT\n self.askPrice2 = EMPTY_FLOAT\n self.askPrice3 = EMPTY_FLOAT\n self.askPrice4 = EMPTY_FLOAT\n self.askPrice5 = EMPTY_FLOAT \n \n self.bidVolume1 = EMPTY_INT\n self.bidVolume2 = EMPTY_INT\n self.bidVolume3 = EMPTY_INT\n self.bidVolume4 = EMPTY_INT\n self.bidVolume5 = EMPTY_INT\n \n self.askVolume1 = EMPTY_INT\n self.askVolume2 = EMPTY_INT\n self.askVolume3 = EMPTY_INT\n self.askVolume4 = EMPTY_INT\n self.askVolume5 = EMPTY_INT \n \ndef insertData(db,collection,data):\n client[db][collection].insert(data.__dict__)\n\ndef procecssTickEvent(tick, insertDB=False):\n \"\"\"处理行情推送\"\"\"\n vtSymbol = tick.vtSymbol\n\n # 转化Tick格式\n drTick = DrTickData()\n d = drTick.__dict__\n for key in d.keys():\n if key != 'datetime':\n d[key] = tick.__dict__[key]\n drTick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S.%f') \n \n # 更新Tick数据\n if insertDB:\n insertData(TICK_DB_NAME, vtSymbol, drTick) ",
"创建一个用于测试的Tick数据",
"client=pymongo.MongoClient()\ndata=client['VnTrader_Tick_Db']['rb1705'].find_one({})\ndel data['_id']\n\nclass InputTick: pass\ntick=InputTick()\ntick.__dict__.update(data)\nprint tick.__dict__",
"测试原版函数性能",
"def profiling(count,func=None):\n if func==None: func=lambda: procecssTickEvent(tick)\n t0=gtime.time()\n for i in range(count):\n func()\n total_time=(gtime.time()-t0)\n return total_time*1000/count\n\ntest_count=10000\n\noriginal_nodb=profiling(test_count)\nclient.drop_database(TICK_DB_NAME)\noriginal_db=profiling(test_count,func=lambda: procecssTickEvent(tick,insertDB=True))\nprint '原版不保存数据到mongodb单次耗时:%.4f' %original_nodb\nprint '原版含保存数据到mongodb单次耗时:%.4f' %original_db",
"改进版本\n原版程序使用CTP接口保存期货数据时,存在几个问题:\n- 非交易时间收到的野数据没有被过滤掉\n- 当前各交易所提供的date字段混乱,有的使用真实日期,有的使用交易日,导致计算的datetime字段也是有问题的\n针对以上问题的改进版本如下:",
"#过滤掉的时间区间,注意集合竞价tick被过滤了。\ninvalid_sections=[(time(2,30,59),time(9,0,0)),\n (time(11,30,59),time(13,0,0)),\n (time(15,15,0),time(21,0,0))]\n\n#本地时间在此区间时对收到的Tick数据不处理,避免有时期货公司会抽风把数据重推一次。\ninvalid_local_section=(time(5,0,0),time(8,30,0))\n\ndef procecssTickEvent(tick, insertDB=False):\n \"\"\"处理行情推送\"\"\"\n # 1. 本地时间检查\n local_datetime=datetime.now()\n local_time=local_datetime.time()\n if local_time>invalid_local_section[0] and local_time<invalid_local_section[1]:\n return\n\n # 2. 转化Tick格式\n drTick = DrTickData()\n d = drTick.__dict__\n for key in d.keys():\n if key != 'datetime':\n d[key] = tick.__dict__[key]\n\n #防御时间格式变为 ”9:00:00.5\"\n if tick.time[2] != ':': \n tick.time = '0' + tick.time\n \n tick_hour = int(tick.time[0:2]) \n local_hour = local_time.hour\n real_date=local_datetime\n if tick_hour == 23 and local_hour == 0:#行情时间慢于系统时间\n real_date+=timedelta(-1)\n elif tick_hour == 0 and local_hour == 23:#系统时间慢于行情时间\n real_date+=timedelta(1)\n\n tick.time = tick.time.ljust(12,'0')\n drTick.datetime = datetime(real_date.year,real_date.month,real_date.day,\n int(tick.time[0:2]), int(tick.time[3:5]), int(tick.time[6:8]), int(tick.time[9:12])*1000)\n\n tmpTime=drTick.datetime.time()\n for sec in invalid_sections:\n if tmpTime>sec[0] and tmpTime<sec[1]:\n return\n \n # 3. 更新Tick数据\n if insertDB:\n insertData(TICK_DB_NAME, tick.vtSymbol, drTick) \n\nprocecssTickEvent(tick)\n\nnew_nodb=profiling(test_count)\nclient.drop_database(TICK_DB_NAME)\nnew_db=profiling(test_count,func=lambda: procecssTickEvent(tick,insertDB=True))\nprint '新版不保存数据到mongodb单次耗时:%.4f' %original_nodb\nprint '新版含保存数据到mongodb单次耗时:%.4f' %original_db",
"保存为文本文件效率",
"def insertData(db,collection,data):\n for key in data.__dict__:\n fout.write(str(data.__dict__[key])+',')\n\nfout=open('D:/test.txt','w')\nnew_db_text=profiling(test_count,func=lambda: procecssTickEvent(tick,insertDB=True))\nprint '新版含保存数据到text file单次耗时:%.4f' %original_db\nfout.close()",
"时间类型转化效率\n注意到不保存数据到数据的版本中,新版相比老版耗时反而降低了,这主要是由于时间转化函数的改写。\n如下三种时间转化方法效率差别巨大:",
"time_convert1=profiling(10000,lambda:parse('20161212 21:21:21.5'))\ntime_convert2=profiling(10000,lambda:datetime.strptime('20161212 21:21:21.5', '%Y%m%d %H:%M:%S.%f'))\ndef customized_parse(s):\n s=s.ljust(21,'0')\n return datetime(int(s[0:4]),int(s[4:6]),int(s[6:8]),int(s[9:11]), int(s[12:14]), int(s[15:17]), int(s[18:21])*1000 )\ntime_convert3=profiling(10000,lambda:customized_parse('20161212 21:21:21.5')) \nprint '转化方法1耗时:%.4f' %time_convert1\nprint '转化方法2耗时:%.4f' %time_convert2\nprint '转化方法3耗时:%.4f' %time_convert3",
"总结",
"import pandas as pd\ndf=pd.DataFrame([{u'无数据写入':original_nodb,u'mongodb写入':original_db},\n {u'无数据写入': new_nodb, u'mongodb写入': new_db, u'text文件写入':new_db_text}\n ],index=['原版','新版'])\ndf",
"总的来看,行情数据落地原版与新版一次落地耗时都为0.2ms左右。函数中,耗时主要来源于mongodb的插入,占约为90%的耗时。通过尝试简单的text写入作为数据存储方式,耗时得到了大幅降低,获得了单次0.04ms耗时的效果,采取其它更高效的格式有望进一步降低耗时。但考虑到无数据写入时的耗时为约0.02ms,所以期望的最优耗时也就在0.02ms左右。\n总的来说,基于mongodb的方案能够实时存储的条目数在每秒几百条量级;进一步优化可能达到几千条量级。此水平应该己能满足绝大多数的需求。"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
Adamage/python-training
|
Lesson_01_variables_and_data_types.ipynb
|
apache-2.0
|
[
"Python Training - Lesson 1 - Variables and Data Types\nVariables\nA variable refers to a certain value with specific type. For example, we may want to store a number, a fraction, or a name, date, maybe a list of numbers. All those need to be reachable using some name, some reference, which we create when we create a variable.\nAfter we create a variable with a value, we can peek at what's inside using \"print\" method.",
"my_name = 'Adam'\nprint my_name\n\nmy_age = 92\nyour_age = 23\nage_difference = my_age - your_age\nprint age_difference",
"How to assign values to variables?\nSingle assignment",
"a = 1",
"Multiple assignment",
"a, b, c = 1, 2, 3\nprint a, b, c\n\na = b = c = d = \"The same string\"\nprint a, b, c, d",
"What is a reference? What is a value?\nYou could ask: does Python use call-by-value, or call-by-reference? Neither of those, actually. Variables in Python are \"names\", that ALWAYS bind to some object, because mostly everything in Python is an object, a complex type. So assigning a variable means, binding this \"name\" to an object.\nActually, each time you create a number, you are not using a classic approach, like for example in C++:\nint my_integer = 1;\nWhen we look at an integer in Python, it's actually an object of type 'int'. To check the type of an object, use the \"type\" method.",
"type(my_age)",
"To be completely precise, let's look at creating two variables that store some names. To see where in memory does the object go, we can use method \"id\". To see the hex representation of this memory, as you will usually see, we can use the method \"id\".",
"some_person = \"Andrew\"\nperson_age = 22\nprint some_person, type(some_person), hex(id(some_person))\nprint person_age, type(person_age), hex(id(person_age))",
"Now, let's change this name to something else.",
"some_person = \"Jamie\"\nperson_age = 24\nprint some_person, type(some_person), hex(id(some_person))\nprint person_age, type(person_age), hex(id(person_age))",
"The important bit is that, even though we use the same variable \"person_age\", the memory address changed. The object holding integer '22' is still living somewhere on the process heap, but is no longer bound to any name, and probably will be deleted by the \"Garbage Collector\". The binding that exists now, if from name \"person_age\" to the int object \"24\".\nThe same can be said about variable 'some_person'.\nMutability and immutability\nThe reason we need to talk about this, is that when you use variables in Python, you have to understand that such a \"binding\" can be shared! When you modify one, the other shared bindings will be modified as well! This is true for \"mutable\" objects. There are also \"immutable\" objects, that behave in a standard, standalone, not-changeable way.\nImmutable types: int, float, decimal, complex, bool, string, tuple, range, frozenset, bytes\nMutable types: list, dict, set, bytearray, user-defined classes",
"shared_list = [11,22]\nmy_list = shared_list\nyour_list = shared_list\nprint shared_list, my_list, your_list",
"Now, when we modify the binding of 'shared_list' variable, both of our variables will change also!",
"shared_list.append(33)\nprint shared_list, my_list, your_list",
"This can be very confusing later on, if you do not grasp this right now. Feel free to play around :)\nData types\nWhat is a data type? It is a way of telling our computer, that we want to store a specific kind of information in a particular variable. This allows us to access tools and mechanisms that are allowed for that type.\nWe already mentioned that actually every time we create a variable, we create a complex type variable, or an object.\nThis is called creating an object, or instantiating an object. Each object comes from a specific template, or how we call it in Object Oriented Programming, from a class.\nSo when you assign a variable, you instantiate an object from a class.\nIn Python, every data type is a class!\nAlso, we will use some built-in tools for inspection - type() and isinstance() functions. The function type() will just say from which class does this object come from. THe function isinstance() will take an object reference, and then a class name, and will tell you if this is an instance of this class.\nLet's review data types used in Python (most of them).\nNumeric types\nThese types allow you to store numbers. Easy.\nint\nIntegers. If you create a really big integer, it will become a 'long integer', or 'long'.",
"a = 111\nprint a, type(a)\n\nb = 111111111111111111111111111111111\nprint b, type(b)",
"float\nFloating decimal point numbers. Used usually for everything that is not an 'int'.",
"c = 11.33333\nd = 11111.33\nprint c, type(c)\nprint d, type(d)",
"complex\nComplex numbers. Advanced sorceries of mathematicians. In simple terms, numbers that have two components. Historically, they were named 'real' component (regular numbers) and 'imaginary' component - marked in Python using the 'j' letter.",
"c = 2 + 3j\nprint c, type(c)",
"Numeric operations",
"# Addition\nprint(1+1)\n\n# Multiplication\nprint(2*2)\n\n# Division\nprint(4/2)\n\n# Remainder of division\nprint(5%2)\n\n# Power\nprint(2**4)",
"Strings\nRepresents text, or to be more specific, sequences of 'Unicode' characters. To let Python know we are using strings, put them in quotes, either single, or double.",
"a = \"Something\"\nb = 'Something else'\nprint type(a), type(b)",
"Even though strings are not numbers, you can do a lot of operations on them using the usual operators.",
"name = 'Adam'\nprint name + name\nprint name * 3",
"Actually, strings are 'lists' of characters. We will explore lists in just a moment, but I want you to become familiar with a new notation. It is based on the order of sequence. When I say, \"Give me the second character of this string\", I can write is as such:",
"print 'Second character is: ' + name[1]",
"Since we are counting from 0, the second character has index = 1.\nNow, say I want characters from second, to fourth.",
"print 'From second to fourth: ' + name[1:4]\n\nprint 'The last character (or first counting from the end) is: ' + name[-1]\n\nprint 'All characters, but skip every second: ' + name[0:4:2]",
"These operations are called 'slicing'.\nWe can also find substrings in other substrings. THe result is the index, at which this substring occurs.",
"some_string = \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAxxAAAAAAAAAAAAAAAAAAAA\"\nsubstring = \"xx\"\nlocation = some_string.find(substring)\n\nprint(\"Lets see what we found:\")\nprint(some_string[location:location+len(substring)])",
"We can also replace substrings in a bigger string. Very convenient. But more complex replacements or searches are done using regular expressions, which we will cover later",
"some_string = \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAxxAAAAAAAAAAAAAAAAAAAA\"\nsubstring = \"xx\"\n\nprint(some_string.replace( substring , \"___REPLACED___\"))",
"Boolean\nIt represents the True and False values. Variables of this type, can be only True or False.\nIt is useful to know, that in Python we can check any variable to be True or False, even lists!\nWe use the bool() function.",
"a = True\nb = False\n\nprint(\"Is a equal to b ?\")\nprint(a==b)\n\nprint(\"Logical AND\")\nprint(a and b)\n\nprint(\"Logical OR\")\nprint(a or b)\n\nprint(\"Logical value of True\")\nprint( bool(a) )\n\nprint(\"Logical value of an empty list\")\nprint( bool([]) )\n\nprint(\"Logical value of an empty string\")\nprint( bool(\"\") )\n\nprint(\"Logical value of integer 0\")\nprint( bool(0) )",
"List\nPrepare to use this data type A LOT. Lists can store any objects, and have as many elements, as you like. The most important thing about lists, is that their elements are ordered. You can create a list by making an empty list, converting something else to a list, or defining elements of a list right there, when you declare it.\nCreating lists.",
"empty_list = []\nlist_from_something_else = list('I feel like Im going to explode')\nlist_elements_defined_when_list_is_created = [1, 2, 3, 4]\nprint empty_list\nprint list_from_something_else\nprint list_elements_defined_when_list_is_created",
"Selecting from lists",
"l = [\"a\", \"b\", \"c\", \"d\", \"e\"]\nprint l[0]\nprint l[-1]\nprint l[1:3]",
"Adding and removing from a list",
"l = []\nl.append(1)\nprint l\nl[0] = 222\nprint l\n\nl.remove(1)\nprint l\n\nl = [1,2,3,3,4,5,3,2,3,2]\n\n# Make a new list from a part of that list\nnew = l[4:7]\nprint new",
"Iterating over a list\nBut lists are not only used to hold some sequences! You can iterate over a list. This means no more, no less, then doing something for each of the elements in a given range, or for all of them. We will cover the so-called 'for' loop in next lessons, but I guess you can easily imagine what this minimal example would do.",
"# Do something for all of elements.\nfor element in [1, 2, 3]:\n print element + 20\n\n# Do something for numbers coming from a range of numbers.\nfor number in range(0,3):\n print number + 20\n\n# Do something for all of elements, but written in a short way.\nsome_list = ['a', 'b', 'c']\nprint [element*2 for element in some_list]",
"Even though the short notation is a more advanced topic, it is very elegant and 'pythonic'. This way of writing down the process of iteration is called 'list comprehensions'.\nTuple\nA tuple is a simple data structure - it behaves pretty much like a list, except for one fact - you can not change elements of tuple after it is created! You create it the same as a list, but using normal brackets.",
"some_tuple = (1,3,4)\nprint some_tuple\nprint type(some_tuple)\nprint len(some_tuple)\nprint some_tuple[0]\nprint some_tuple[-1]\nprint some_tuple[1:2]\n\n\nother_tuple = 1, 2, 3\nprint other_tuple\nprint type(other_tuple)\n\n# This will cause an error! You can not modify a tuple.\nsome_tuple[1] = 22",
"Dictionary\nThis data structure is very useful. In essence, it stores pairs of values, first of which is always a \"key\", a unique identifier, and the \"value\", which is the connected object.\nA dictionary performs a mapping between keys and values. Because the key is always unique (has to be, we will find out in a minute), there is always exactly one key with specific content.\nA dictionary is also very efficient - finding a value in a dictionary takes only one operation, whereas searching through a list one by one could require going through the whole list. \nThis means that for any situation, where you need to store lot's of values, that will be often used, it is much better to store them in a dictionary.\nAlso, I recommend to read on Wikipedia on \"hash maps\".\nCreating dictionaries",
"empty_dictionary = {}\nprint empty_dictionary\nprint type(empty_dictionary)\n\ndictionary_from_direct_definition = {\"key1\": 1, \"key2\": 33}\nprint dictionary_from_direct_definition\n\n# Let's create a dictionary from a list of tuples\ndictionary_from_a_collection = dict([(\"a\", 1), (\"b\", 2)])\nprint dictionary_from_a_collection\n\n# Let's create a dictionary from two lists\nsome_list_with_strings = [\"a\", \"b\", \"c\"]\nsome_list_with_numbers = [1,2,3]\ndictionary_from_two_lists = dict(zip(some_list_with_strings, some_list_with_numbers))\nprint dictionary_from_two_lists\nprint type(dictionary_from_two_lists)\n\n# Let's create a dictionary from a dictionary comprehension\ndict_from_comprehension = {key:value for key, value in zip(some_list_with_strings, some_list_with_numbers)}\nprint dict_from_comprehension",
"Using dictionaries\nAdd key-value pairs",
"d = {}\nd[\"a\"] = 1\nd[\"bs\"] = 22\nd[\"ddddd\"] = 31\nprint d\n\nd.update({\"b\": 2, \"c\": 3})\nprint d",
"Remove items",
"del d[\"b\"]\nprint d\n\nd.pop(\"c\")\nprint d",
"Inspect a dictionary",
"# How many keys?\nprint d.keys()\nprint len(d)\nprint len(d.keys())\n\n# How many values?\nprint d.values()\nprint len(d.values())",
"Iterate over dictionary",
"for key, value in d.items():\n print key, value",
"Example of looking for a specific thing in a list, and in a dictionary:",
"l = [\"r\", \"p\", \"s\", \"t\"]\nd = {a: a for a in l}\n\n# Find \"t\" in list.\nfor letter in l:\n if letter == \"t\":\n print \"Found it!\"\n else:\n print \"Not yet!\"\n\n# Find \"t\" in dictionary keys.\nprint \"In dictionary - found it! \" + d[\"t\"]\n",
"Sets\nA set behaves pretty much like a mixture of a dictionary and a list. It has two features:\n- it only has unique values\n- it does not respect order of things - it has no order, like a dictionary",
"some_sequence = [1,1,1,1,2,2,2,3,3,3]\nsome_set = set(some_sequence)\nprint some_set\n\nsome_string = \"What's going ooooon?\"\nanother_set = set(some_string)\nprint another_set\n\nsome_dictionary = {\"a\": 2, \"b\": 2}\nprint some_dictionary\n\n\nyet_another_set = set(some_dictionary)\nprint yet_another_set\n\nprint set(some_dictionary.values())"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
d00d/quantNotebooks
|
Notebooks/quantopian_research_public/tutorials/pipeline/pipeline_tutorial_lesson_10.ipynb
|
unlicense
|
[
"from quantopian.pipeline import Pipeline\nfrom quantopian.research import run_pipeline\nfrom quantopian.pipeline.data.builtin import USEquityPricing\nfrom quantopian.pipeline.factors import SimpleMovingAverage, AverageDollarVolume",
"Custom Factors\nWhen we first looked at factors, we explored the set of built-in factors. Frequently, a desired computation isn't included as a built-in factor. One of the most powerful features of the Pipeline API is that it allows us to define our own custom factors. When a desired computation doesn't exist as a built-in, we define a custom factor.\nConceptually, a custom factor is identical to a built-in factor. It accepts inputs, window_length, and mask as constructor arguments, and returns a Factor object each day.\nLet's take an example of a computation that doesn't exist as a built-in: standard deviation. To create a factor that computes the standard deviation over a trailing window, we can subclass quantopian.pipeline.CustomFactor and implement a compute method whose signature is:\ndef compute(self, today, asset_ids, out, *inputs):\n ...\n\n*inputs are M x N numpy arrays, where M is the window_length and N is the number of securities (usually around ~8000 unless a mask is provided). *inputs are trailing data windows. Note that there will be one M x N array for each BoundColumn provided in the factor's inputs list. The data type of each array will be the dtype of the corresponding BoundColumn.\nout is an empty array of length N. out will be the output of our custom factor each day. The job of compute is to write output values into out.\nasset_ids will be an integer array of length N containing security ids corresponding to the columns in our *inputs arrays.\ntoday will be a pandas Timestamp representing the day for which compute is being called.\n\nOf these, *inputs and out are most commonly used.\nAn instance of CustomFactor that’s been added to a pipeline will have its compute method called every day. For example, let's define a custom factor that computes the standard deviation of the close price over the last 5 days. To start, let's add CustomFactor and numpy to our import statements.",
"from quantopian.pipeline import CustomFactor\nimport numpy",
"Next, let's define our custom factor to calculate the standard deviation over a trailing window using numpy.nanstd:",
"class StdDev(CustomFactor):\n def compute(self, today, asset_ids, out, values):\n # Calculates the column-wise standard deviation, ignoring NaNs\n out[:] = numpy.nanstd(values, axis=0)",
"Finally, let's instantiate our factor in make_pipeline():",
"def make_pipeline():\n std_dev = StdDev(inputs=[USEquityPricing.close], window_length=5)\n\n return Pipeline(\n columns={\n 'std_dev': std_dev\n }\n )",
"When this pipeline is run, StdDev.compute() will be called every day with data as follows:\n- values: An M x N numpy array, where M is 20 (window_length), and N is ~8000 (the number of securities in our database on the day in question).\n- out: An empty array of length N (~8000). In this example, the job of compute is to populate out with an array storing of 5-day close price standard deviations.",
"result = run_pipeline(make_pipeline(), '2015-05-05', '2015-05-05')\nresult",
"Default Inputs\nWhen writing a custom factor, we can set default inputs and window_length in our CustomFactor subclass. For example, let's define the TenDayMeanDifference custom factor to compute the mean difference between two data columns over a trailing window using numpy.nanmean. Let's set the default inputs to [USEquityPricing.close, USEquityPricing.open] and the default window_length to 10:",
"class TenDayMeanDifference(CustomFactor):\n # Default inputs.\n inputs = [USEquityPricing.close, USEquityPricing.open]\n window_length = 10\n def compute(self, today, asset_ids, out, close, open):\n # Calculates the column-wise mean difference, ignoring NaNs\n out[:] = numpy.nanmean(close - open, axis=0)",
"<i>Remember in this case that close and open are each 10 x ~8000 2D numpy arrays.</i>\nIf we call TenDayMeanDifference without providing any arguments, it will use the defaults.",
"# Computes the 10-day mean difference between the daily open and close prices.\nclose_open_diff = TenDayMeanDifference()",
"The defaults can be manually overridden by specifying arguments in the constructor call.",
"# Computes the 10-day mean difference between the daily high and low prices.\nhigh_low_diff = TenDayMeanDifference(inputs=[USEquityPricing.high, USEquityPricing.low])",
"Further Example\nLet's take another example where we build a momentum custom factor and use it to create a filter. We will then use that filter as a screen for our pipeline.\nLet's start by defining a Momentum factor to be the division of the most recent close price by the close price from n days ago where n is the window_length.",
"class Momentum(CustomFactor):\n # Default inputs\n inputs = [USEquityPricing.close]\n\n # Compute momentum\n def compute(self, today, assets, out, close):\n out[:] = close[-1] / close[0]",
"Now, let's instantiate our Momentum factor (twice) to create a 10-day momentum factor and a 20-day momentum factor. Let's also create a positive_momentum filter returning True for securities with both a positive 10-day momentum and a positive 20-day momentum.",
"ten_day_momentum = Momentum(window_length=10)\ntwenty_day_momentum = Momentum(window_length=20)\n\npositive_momentum = ((ten_day_momentum > 1) & (twenty_day_momentum > 1))",
"Next, let's add our momentum factors and our positive_momentum filter to make_pipeline. Let's also pass positive_momentum as a screen to our pipeline.",
"def make_pipeline():\n\n ten_day_momentum = Momentum(window_length=10)\n twenty_day_momentum = Momentum(window_length=20)\n\n positive_momentum = ((ten_day_momentum > 1) & (twenty_day_momentum > 1))\n\n std_dev = StdDev(inputs=[USEquityPricing.close], window_length=5)\n\n return Pipeline(\n columns={\n 'std_dev': std_dev,\n 'ten_day_momentum': ten_day_momentum,\n 'twenty_day_momentum': twenty_day_momentum\n },\n screen=positive_momentum\n )",
"Running this pipeline outputs the standard deviation and each of our momentum computations for securities with positive 10-day and 20-day momentum.",
"result = run_pipeline(make_pipeline(), '2015-05-05', '2015-05-05')\nresult",
"Custom factors allow us to define custom computations in a pipeline. They are frequently the best way to perform computations on partner datasets or on multiple data columns. The full documentation for CustomFactors is available here.\nIn the next lesson, we'll use everything we've learned so far to create a pipeline for an algorithm."
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
diana-hep/carl
|
examples/Likelihood ratios of mixtures of normals.ipynb
|
bsd-3-clause
|
[
"Likelihood ratios of mixtures of normals\nKyle Cranmer, Juan Pavez, Gilles Louppe, March 2016.",
"%matplotlib inline\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport theano\nimport theano.tensor as T",
"Toy problem\nLet us consider two 1D distributions $p_0$ and $p_1$ for which we want to approximate the ratio $r(x) = \\frac{p_0(x)}{p_1(x)}$ of their densities.\n\n$p_1$ is defined as a mixture of two gaussians;\n$p_0$ is defined as a mixture of the same two gaussians + a bump.",
"from carl.distributions import Normal\nfrom carl.distributions import Mixture\n\ncomponents = [\n Normal(mu=-2.0, sigma=0.75), # c0\n Normal(mu=0.0, sigma=2.0), # c1\n Normal(mu=1.0, sigma=0.5) # c2 (bump)\n]\n\nbump_coefficient = 0.05\ng = theano.shared(bump_coefficient) \np0 = Mixture(components=components, weights=[0.5 - g / 2., 0.5 - g / 2., g])\np1 = Mixture(components=components[:2], weights=[0.5, 0.5])",
"Note: for $p_0$, weights are all tied together through the Theano shared variable g. This means that changes to the value stored in g also automatically change the weight values and the resulting mixture.\nNext we generate an artificial observed dataset X_true.",
"X_true = p0.rvs(5000, random_state=777)\n\nreals = np.linspace(-5, 5, num=1000)\nplt.plot(reals, p0.pdf(reals.reshape(-1, 1)), label=r\"$p(x|\\gamma=0.05)$\", color=\"b\")\nplt.plot(reals, p1.pdf(reals.reshape(-1, 1)), label=r\"$p(x|\\gamma=0)$\", color=\"r\")\nplt.hist(X_true[:, 0], bins=100, normed=True, label=\"data\", alpha=0.2, color=\"b\")\nplt.xlim(-5, 5)\nplt.legend(loc=\"best\", prop={'size': 8})\n#plt.savefig(\"fig1a.pdf\")\nplt.show()",
"Density ratio estimation\nThe density ratio $r(x)$ can be approximated using calibrated classifiers, either directly by learning to classify $x \\sim p_0$ from $x \\sim p_1$, calibrating the resulting classifier, or by decomposing the ratio of the two mixtures as pairs of simpler density ratios and calibrating each corresponding pair-wise classifier.",
"from sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.neural_network import MLPRegressor\nfrom carl.ratios import ClassifierRatio\nfrom carl.ratios import DecomposedRatio\nfrom carl.learning import CalibratedClassifierCV\n\nn_samples = 200000\nclf = MLPRegressor(tol=1e-05, activation=\"logistic\", \n hidden_layer_sizes=(10, 10), learning_rate_init=1e-07, \n learning_rate=\"constant\", algorithm=\"l-bfgs\", random_state=1, \n max_iter=75) \n\n# No calibration\ncc_none = ClassifierRatio(base_estimator=clf, random_state=1)\ncc_none.fit(numerator=p0, denominator=p1, n_samples=n_samples)\n\n# Calibration + Direct approximation \ncv = StratifiedShuffleSplit(n_iter=1, test_size=0.5, random_state=1)\ncc_direct = ClassifierRatio(\n base_estimator=CalibratedClassifierCV(clf, bins=15, cv=cv), \n random_state=0)\ncc_direct.fit(numerator=p0, denominator=p1, n_samples=n_samples)\n\n# Calibration + Decomposition of the mixture\ncc_decomposed = DecomposedRatio(ClassifierRatio(\n base_estimator=CalibratedClassifierCV(clf, bins=20, cv=cv), \n random_state=0))\ncc_decomposed.fit(numerator=p0, denominator=p1, n_samples=n_samples)",
"Note: CalibratedClassifierRatio takes three arguments for controlling its execution:\n\nbase_estimator specifying the classifier to be used (note commented ExtraTreesRegressor),\ncalibration specifying the calibration algorithm (\"kde\", \"histogram\", or a user-defined distribution-like object),\ncv specifying how to allocate data for training and calibration.\n\nNext we plot $\\log r(x)$ vs. $x$ for the different cases.",
"plt.plot(reals, -p0.nll(reals.reshape(-1, 1)) \n +p1.nll(reals.reshape(-1, 1)), label=\"Exact ratio\")\n\nplt.plot(reals, cc_none.predict(reals.reshape(-1, 1), log=True), label=\"No calibration\")\nplt.plot(reals, cc_direct.predict(reals.reshape(-1, 1), log=True), label=\"Calibration\")\nplt.plot(reals, cc_decomposed.predict(reals.reshape(-1, 1), log=True), label=\"Calibration + Decomposition\")\n\nplt.xlim(-5, 5)\nplt.ylim(-0.5, 0.5)\nplt.legend(loc=\"best\", prop={'size': 8})\n#plt.savefig(\"fig1c.pdf\")\nplt.show()",
"Below is an alternative plot (that works in higher dimensions when the true likleihood is known) to check if the uncalibrated classifier is monotonically related to the true likelihood ratio.",
"plt.scatter(-p0.nll(reals.reshape(-1, 1)) + p1.nll(reals.reshape(-1, 1)), \n cc_none.classifier_.predict_proba(reals.reshape(-1, 1))[:, 0], alpha=0.5)\nplt.xlabel(\"r(x)\")\nplt.ylabel(\"s(x)\")\nplt.show()",
"Now we inspect the distribution of the exact $\\log {r}(x)$ and approximate $\\log \\hat{r}(x)$.",
"g.set_value(bump_coefficient)\nX0 = p0.rvs(200000)\nplt.hist(-p0.nll(X0) + p1.nll(X0), bins=100, histtype=\"step\", label=\"Exact\", normed=1)\nplt.hist(cc_decomposed.predict(X0, log=True), bins=100, histtype=\"step\", label=\"Approx.\", normed=1)\nplt.yscale(\"log\")\nplt.legend()\n#plt.savefig(\"fig1e.pdf\")\nplt.show()",
"Using density ratios for maximum likelihood fit\nNext let us construct the log-likelihood curve for the artificial dataset.",
"def nll_true(theta, X):\n g.set_value(theta[0])\n return (p0.nll(X) - p1.nll(X)).sum()\n\ndef nll_approx(theta, X):\n g.set_value(theta[0])\n return -np.sum(cc_decomposed.predict(X, log=True))\n\ng_scan = np.linspace(0.0, 2 * bump_coefficient, 50)\nnll_true_scan = np.array([nll_true([t], X_true) for t in g_scan])\nnll_approx_scan = np.array([nll_approx([t], X_true) for t in g_scan])\nplt.plot(g_scan, nll_true_scan-nll_true_scan.min(), label=\"Exact\")\nplt.plot(g_scan, nll_approx_scan-nll_approx_scan.min(), label=\"Approx.\")\nplt.legend()\n#plt.savefig(\"fig1f.pdf\")\nplt.show()",
"A nice approximation of the exact likelihood. \nEnsemble tests\nNow let us perform an ensemble test with 1000 repeated experiments. We will use this to check bias of the maximum likelihood estimator and the asymptotic distribution of $-2\\log \\Lambda(\\gamma)$ (ie. Wilks's theorem).",
"from sklearn.utils import check_random_state\nfrom scipy.optimize import minimize\n\nn_trials = 1000\n\ntrue_mles = []\ntrue_nll = []\napprox_mles = []\napprox_nll = []\n\nfor i in range(n_trials): \n # Generate new data\n g.set_value(bump_coefficient)\n X_true = p0.rvs(5000, random_state=i)\n \n # True MLE\n results = minimize(nll_true, x0=[0.1], args=(X_true,),\n constraints=[{'type':'ineq', 'fun': lambda x: x[0]},\n {'type':'ineq', 'fun': lambda x: 1. - x[0]},])\n\n true_mles.append(results.x[0])\n true_nll.append(2. * (nll_true([bump_coefficient], X_true) - results.fun))\n \n # Inference with ratios\n results = minimize(nll_approx, x0=[0.1], args=(X_true,),\n constraints=[{'type':'ineq', 'fun': lambda x: x[0]},\n {'type':'ineq', 'fun': lambda x: 1. - x[0]},])\n\n approx_mles.append(results.x[0])\n approx_nll.append(2. * (nll_approx([bump_coefficient], X_true) - results.fun))\n\ntrue_mles = np.array(true_mles)\ntrue_mles = true_mles[np.isfinite(true_mles)]\napprox_mles = np.array(approx_mles)\napprox_mles = approx_mles[np.isfinite(approx_mles)]\nnp.mean(true_mles), np.mean(approx_mles)\n\nh = plt.hist(true_mles, bins=30, normed=1, alpha=0.2, color=\"b\", label=\"Exact MLEs\")\nh = plt.hist(approx_mles, bins=30, normed=1, alpha=0.2, color=\"g\", label=\"Approx. MLEs\")\nplt.vlines(bump_coefficient, 0, h[0].max()+5, linestyles=\"dashed\", label=r\"$\\gamma = 0.5$\")\nplt.legend()\n#plt.savefig(\"fig2a.pdf\")\nplt.show()\n\nfrom scipy.stats import chi2\n\nbins = np.linspace(0, 9, 50)\ntest = np.linspace(0, 9, 100)\ntrue_counts, _, _ = plt.hist(2 * true_nll, bins=bins, normed=1, alpha=0.2, label=\"Exact\")\napprox_counts, _, _ = plt.hist(2 * approx_nll, bins=bins, normed=1, alpha=0.2, label=\"Approx.\")\nplt.plot(test, chi2.pdf(test, df=1), lw=2)\nplt.legend()\n#plt.savefig(\"fig2b.pdf\")\nplt.show()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
jupyter/jupyterlab
|
galata/test/galata/notebooks/simple_test.ipynb
|
bsd-3-clause
|
[
"Upload Notebook for Examples\nThis notebook is designed to provide examples of different types of outputs that can be used to test the JupyterLab frontend and other Jupyter frontends.",
"from IPython.display import display\nfrom IPython.display import (\n HTML, Image, Latex, Math, Markdown, SVG\n)",
"Text\nPlain text:",
"text = \"\"\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam urna\nlibero, dictum a egestas non, placerat vel neque. In imperdiet iaculis fermentum. \nVestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia \nCurae; Cras augue tortor, tristique vitae varius nec, dictum eu lectus. Pellentesque \nid eleifend eros. In non odio in lorem iaculis sollicitudin. In faucibus ante ut \narcu fringilla interdum. Maecenas elit nulla, imperdiet nec blandit et, consequat \nut elit.\"\"\"\nprint(text)",
"Text as output:",
"text",
"Standard error:",
"import sys; print('this is stderr', file=sys.stderr)",
"HTML",
"div = HTML('<div style=\"width:100px;height:100px;background:grey;\" />')\ndiv\n\nfor i in range(3):\n print(7**10)\n display(div)",
"Markdown",
"md = Markdown(\"\"\"\n### Subtitle\n\nThis is some *markdown* text with math $F=ma$.\n\n\"\"\")\nmd\n\ndisplay(md)",
"LaTeX\nExamples LaTeX in a markdown cell:\n\\begin{align}\n\\nabla \\times \\vec{\\mathbf{B}} -\\, \\frac1c\\, \\frac{\\partial\\vec{\\mathbf{E}}}{\\partial t} & = \\frac{4\\pi}{c}\\vec{\\mathbf{j}} \\ \\nabla \\cdot \\vec{\\mathbf{E}} & = 4 \\pi \\rho \\\n\\nabla \\times \\vec{\\mathbf{E}}\\, +\\, \\frac1c\\, \\frac{\\partial\\vec{\\mathbf{B}}}{\\partial t} & = \\vec{\\mathbf{0}} \\\n\\nabla \\cdot \\vec{\\mathbf{B}} & = 0\n\\end{align}",
"math = Latex(\"$F=ma$\")\nmath\n\nmaxwells = Latex(r\"\"\"\n\\begin{align}\n\\nabla \\times \\vec{\\mathbf{B}} -\\, \\frac1c\\, \\frac{\\partial\\vec{\\mathbf{E}}}{\\partial t} & = \\frac{4\\pi}{c}\\vec{\\mathbf{j}} \\\\ \\nabla \\cdot \\vec{\\mathbf{E}} & = 4 \\pi \\rho \\\\\n\\nabla \\times \\vec{\\mathbf{E}}\\, +\\, \\frac1c\\, \\frac{\\partial\\vec{\\mathbf{B}}}{\\partial t} & = \\vec{\\mathbf{0}} \\\\\n\\nabla \\cdot \\vec{\\mathbf{B}} & = 0\n\\end{align}\n\"\"\")\nmaxwells",
"SVG",
"svg_source = \"\"\"\n<svg width=\"400\" height=\"110\">\n <rect width=\"300\" height=\"100\" style=\"fill:#E0E0E0;\" /> \n</svg>\n\"\"\"\nsvg = SVG(svg_source)\nsvg\n\nfor i in range(3):\n print(10**i)\n display(svg)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ioam/scipy-2017-holoviews-tutorial
|
notebooks/03-exploration-with-containers.ipynb
|
bsd-3-clause
|
[
"<a href='http://www.holoviews.org'><img src=\"assets/hv+bk.png\" alt=\"HV+BK logos\" width=\"40%;\" align=\"left\"/></a>\n<div style=\"float:right;\"><h2>03. Exploration with Containers</h2></div>\n\nIn the first two sections of this tutorial we discovered how to declare static elements and compose them one by one into composite objects, allowing us to quickly visualize data as we explore it. However, many datasets contain numerous additional dimensions of data, such as the same measurement repeated across a large number of different settings or parameter values. To address these common situations, HoloViews provides ontainers that allow you to explore extra dimensions of your data using widgets, as animations, or by \"faceting\" it (splitting it into \"small multiples\") in various ways.\nTo begin with we will discover how we can quickly explore the parameters of a function by having it return an element and then evaluating the function over the parameter space.",
"import numpy as np\nimport holoviews as hv\nhv.extension('bokeh')\n%opts Curve Area [width=600]",
"Declaring elements in a function\nIf we write a function that accepts one or more parameters and constructs an element, we can build plots that do things like:\n\nLoading data from disk as needed\nQuerying data from an API\nCalculating data from a mathematical function\nGenerating data from a simulation\n\nAs a basic example, let's declare a function that generates a frequency-modulated signal and returns a Curve element:",
"def fm_modulation(f_carrier=110, f_mod=110, mod_index=1, length=0.1, sampleRate=3000):\n x = np.arange(0, length, 1.0/sampleRate)\n y = np.sin(2*np.pi*f_carrier*x + mod_index*np.sin(2*np.pi*f_mod*x))\n return hv.Curve((x, y), kdims=['Time'], vdims=['Amplitude'])",
"The function defines a number of parameters that will change the signal, but using the default parameters the function outputs a Curve like this:",
"fm_modulation()",
"HoloMaps\nThe HoloMap is the first container type we will start working with, because it is often the starting point of a parameter exploration. HoloMaps allow exploring a parameter space sampled at specific, discrete values, and can easily be created using a dictionary comprehension. When declaring a HoloMap, just ensure the length and ordering of the key tuple matches the key dimensions:",
"carrier_frequencies = [10, 20, 110, 220, 330]\nmodulation_frequencies = [110, 220, 330]\n\nhmap = hv.HoloMap({(fc, fm): fm_modulation(fc, fm) for fc in carrier_frequencies\n for fm in modulation_frequencies}, kdims=['fc', 'fm'])\nhmap",
"Note how the keys in our HoloMap map on to two automatically generated sliders. HoloViews supports two types of widgets by default: numeric sliders, or a dropdown selection menu for all non-numeric types. These sliders appear because a HoloMap can display only a single Element at one time, and the user must thus select which of the available elements to show at any one time.",
"# Exercise: Try changing the function below to return an ``Area`` or ``Scatter`` element,\n# in the same way `fm_modulation` returned a ``Curve`` element.\ndef fm_modulation2(f_carrier=220, f_mod=110, mod_index=1, length=0.1, sampleRate=3000):\n x = np.arange(0,length, 1.0/sampleRate)\n y = np.sin(2*np.pi*f_carrier*x + mod_index*np.sin(2*np.pi*f_mod*x))\n\n# Then declare a HoloMap like above and assign it to a ``exercise_hmap`` variable and display that\n",
"Apart from their simplicity and generality, one of the key features of HoloMaps is that they can be exported to a static HTML file, GIF, or video, because every combination of the sliders (parameter values) has been pre-computed already. This very convenient feature of pre-computation becomes a liability for very large or densely sampled parameter spaces, however, leading to the DynamicMap type discussed next.\nSummary\n\nHoloMaps allow declaring a parameter space\nThe default widgets provide a slider for numeric types and a dropdown menu for non-numeric types.\nHoloMap works well for small or sparsely sampled parameter spaces, exporting to static files\n\nDynamicMap\nA [DynamicMap]((holoviews.org/reference/containers/bokeh/DynamicMap.html) is very similar to a HoloMap except that it evaluates the function lazily. This property makes DynamicMap require a live, running Python server, not just an HTML-serving web site or email, and it may be slow if each frame is slower to compute than it is to display. However, because of these properties, DynamicMap allows exploring arbitrarily large parameter spaces, dynamically generating each element as needed to satisfy a request from the user. The key dimensions kdims must match the arguments of the function:",
"%%opts Curve (color='red')\ndmap = hv.DynamicMap(fm_modulation, kdims=['f_carrier', 'f_mod', 'mod_index'])\ndmap = dmap.redim.range(f_carrier=((10, 110)), f_mod=(10, 110), mod_index=(0.1, 2))\ndmap\n\n# Exercise: Declare a DynamicMap using the function from the previous exercise and name it ``exercise_dmap``\n\n\n# Exercise (Optional): Use the ``.redim.step`` method and a floating point range to modify the slider step\n",
"Faceting parameter spaces\nCasting\nHoloMaps and DynamicMaps let you explore a multidimensional parameter space by looking at one point in that space at a time, which is often but not always sufficient. If you want to see more data at once, you can facet the HoloMap to put some data points side by side or overlaid to facilitate comparison. One easy way to do that is to cast your HoloMap into a GridSpace, NdLayout, or NdOverlay container:",
"%%opts Curve [width=150]\nhv.GridSpace(hmap).opts()\n\n# Exercise: Try casting your ``exercise_hmap`` HoloMap from the first exercise to an ``NdLayout`` or \n# ``NdOverlay``, guessing from the name what the resulting organization will be before testing it.\n",
"Faceting with methods\nUsing the .overlay, .grid and .layout methods we can facet multi-dimensional data by a specific dimension:",
"hmap.overlay('fm')",
"Using these methods with a DynamicMap requires special attention, because a dynamic map can return an infinite number of different values along its dimensions, unlike a HoloMap. Obviously, HoloViews could not comply with such a request, but these methods are perfectly legal with DynamicMap if you also define which specific dimension values you need, using the .redim.values method:",
"%%opts Curve [width=150]\ndmap.redim.values(f_mod=[10, 20, 30], f_carrier=[10, 20, 30]).overlay('f_mod').grid('f_carrier').opts()\n\n# Exercise: Facet the ``exercise_dmap`` DynamicMap using ``.overlay`` and ``.grid``\n# Hint: Use the .redim.values method to set discrete values for ``f_mod`` and ``f_carrier`` dimensions\n",
"Optional\nSlicing and indexing\nHoloMaps and other containers also allow you to easily index or select by key, allowing you to:\n\nselect a specific key: obj[10, 110]\nselect a slice: obj[10, 200:]\nselect multiple values: obj[[10, 110], 110]",
"%%opts Curve [width=300]\nhmap[10, 110] + hmap[10, 200:].overlay() + hmap[[10, 110], 110].overlay()",
"You can do the same using the select method:",
"(hmap.select(fc=10, fm=110) +\n hmap.select(fc=10, fm=(200, None)).overlay() +\n hmap.select(fc=[10, 110], fm=110).overlay())\n\n# Exercise: Try selecting two carrier frequencies and two modulation frequencies on the ``exercise_hmap``\n",
"Onwards\n\nLearn more about using HoloMaps and other containers in the Dimensioned Containers user guide\nLearn more about working with DynamicMap in the Live Data user guide.\n\nThe following section will talk about building containers from data stored in tabular (spreadsheet-like) formats, which is a very common situation given special support."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
fweik/espresso
|
doc/tutorials/ferrofluid/ferrofluid_part1.ipynb
|
gpl-3.0
|
[
"Ferrofluid - Part 1\nTable of Contents\n\nIntroduction\nThe Model\nStructure of this tutorial\nCompiling ESPResSo for this Tutorial\nA Monolayer-Ferrofluid System in ESPResSo\nSetup\nSampling\nSampling with animation\nSampling without animation\nCluster distribution\n\n\n\nIntroduction\nFerrofluids are colloidal suspensions of ferromagnetic single-domain particles in a liquid carrier. As the single particles contain only one magnetic domain, they can be seen as small permanent magnets. To prevent agglomeration of the particles, due to van-der-Waals or magnetic attraction, they are usually sterically or electrostatically stabilized (see <a href='#fig_1'>figure 1</a>). The former is achieved by adsorption of long chain molecules onto the particle surface, the latter by adsorption of charged coating particles. The size of the ferromagnetic particles are in the region of 10 nm. With the surfactant layer added they can reach a size of a few hundred nanometers. Have in mind that if we refer to the particle diameter $\\sigma$ we mean the diameter of the magnetic core plus two times the thickness of the surfactant layer.\nSome of the liquid properties, like the viscosity, the phase behavior or the optical birefringence can be altered via an external magnetic field or simply the fluid can be guided by such an\nfield. Thus ferrofluids possess a wide range of biomedical applications like magnetic drug\ntargeting or magnetic thermoablation and technical applications like fine positioning systems or adaptive bearings and\ndampers.\nIn <a href='#fig_2'>figure 2</a> the picture of a ferrofluid exposed to the magnetic field of a permanent magnet is shown. The famous energy minimizing thorn-like surface is clearly visible.\n<a id='fig_1'></a><figure>\n<img src=\"figures/Electro-Steric_Stabilization.jpg\" style=\"float: center; width: 49%\">\n<center>\n<figcaption>Figure 1: Schematic representation of electrostatically stabilization (picture top) and steric stabilization (picture bottom) <a href='#[3]'>[3]</a></figcaption>\n</center>\n</figure>\n<a id='fig_2'></a><figure>\n<img src='figures/Ferrofluid_Magnet_under_glass_edit.jpg' alt='ferrofluid on glass plate under which a strong magnet is placed' style='width: 600px;'/>\n<center>\n<figcaption>Figure 2: Real Ferrofluid exposed to an external magnetic field (neodymium magnet) <a href='#[4]'>[4]</a></figcaption>\n</center>\n</figure>\nThe Model\nFor simplicity in this tutorial we simulate spherical particles in a monodisperse ferrofluid system which means all particles have the same diameter $\\sigma$ and dipole moment $\\mu$. The point dipole moment is placed at the center of the particles and is constant both in magnitude and direction (in the coordinate system of the particle). This can be justified as the Néel relaxation times are usually negligible for the usual sizes of ferrofluid particles.\nThus the magnetic interaction potential between two single particles is the dipole-dipole interaction potential which reads\n\\begin{equation}\n u_{\\text{DD}}(\\vec{r}{ij}, \\vec{\\mu}_i, \\vec{\\mu}_j) = \\gamma \\left(\\frac{\\vec{\\mu}_i \\cdot \\vec{\\mu}_j}{r{ij}^3} - 3\\frac{(\\vec{\\mu}i \\cdot \\vec{r}{ij}) \\cdot (\\vec{\\mu}j \\cdot \\vec{r}{ij})}{r_{ij}^5}\\right)\n\\end{equation}\nwith $\\gamma = \\frac{\\mu_0}{4 \\pi}$ and $\\mu_0$ the vacuum permeability.\nFor the steric interaction in this tutorial we use the purely repulsive Weeks-Chandler-Andersen (WCA) potential which is a Lennard-Jones potential with cut-off radius $r_{\\text{cut}}$ at the minimum of the potential $r_{\\text{cut}} = r_{\\text{min}} = 2^{\\frac{1}{6}}\\cdot \\sigma$ and shifted by $\\varepsilon_{ij}$ such that the potential is continuous at the cut-off radius. Thus the potential has the shape\n\\begin{equation}\n u_{\\text{sr}}^{\\text{WCA}}(r_{ij}) = \\left{\n \\begin{array}{ll}\n 4\\varepsilon_{ij}\\left[ \\left( \\frac{\\sigma}{r_{ij}} \\right)^{12} - \\left( \\frac{\\sigma}{r_{ij}} \\right)^6 \\right] + \\varepsilon_{ij} & r_{ij} < r_{\\text{cut}} \\\n 0 & r_{ij} \\geq r_{\\text{cut}} \\\n \\end{array}\n \\right.\n\\end{equation}\nwhere $r_{ij}$ are the distances between two particles.\nThe purely repulsive character of the potential can be justified by the fact that the particles in real ferrofluids are sterically or electrostatically stabilized against agglomeration.\nThe whole interaction potential reads\n\\begin{equation}\n u(\\vec{r}{ij}, \\vec{\\mu}_i, \\vec{\\mu}_j) = u{\\text{sr}}(\\vec{r}{ij}) + u{\\text{DD}}(\\vec{r}_{ij}, \\vec{\\mu}_i, \\vec{\\mu}_j)\n\\end{equation}\nThe liquid carrier of the system is simulated through a Langevin thermostat.\nFor ferrofluid systems there are three important parameters. The first is the volume fraction in three dimensions or the area fraction in two dimensions or quasi two dimensions. The second is the dipolar interaction parameter $\\lambda$\n\\begin{equation}\n \\lambda = \\frac{\\tilde{u}{\\text{DD}}}{u_T} = \\gamma \\frac{\\mu^2}{k{\\text{B}}T\\sigma^3}\n\\end{equation}\nwhere $u_\\mathrm{T} = k_{\\text{B}}T$ is the thermal energy and $\\tilde{u}_{DD}$ is the absolute value of the dipole-dipole interaction energy at close contact (cc) and head-to-tail configuration (htt) (see <a href='#fig_4'>figure 4</a>) per particle, i.e. in formulas it reads\n\\begin{equation}\n \\tilde{u}{\\text{DD}} = \\frac{ \\left| u{\\text{DD}}^{\\text{htt, cc}} \\right| }{2}\n\\end{equation}\nThe third parameter takes a possible external magnetic field into account and is called Langevin parameter $\\alpha$. It is the ratio between the energy of a dipole moment in the external magnetic field $B$ and the thermal energy\n\\begin{equation}\n \\alpha = \\frac{\\mu_0 \\mu}{k_{\\text{B}} T}B\n\\end{equation}\n<a id='fig_4'></a><figure>\n<img src='figures/headtotailconf.png' alt='schematic representation of head to tail configuration' style='width: 200px;'/>\n<center>\n<figcaption>Figure 4: Schematic representation of the head-to-tail configuration of two magnetic particles at close contact.</figcaption>\n</center>\n</figure>\nStructure of this tutorial\nThe aim of this tutorial is to introduce the basic features of ESPResSo for ferrofluids or dipolar fluids in general. In part I and part II we will do this for a monolayer-ferrofluid, in part III for a three dimensional system. In part I we will examine the clusters which are present in all interesting ferrofluid systems. In part II we will examine the influence of the dipole-dipole-interaction on the magnetization curve of a ferrofluid. In part III we calculate estimators for the initial susceptibility using fluctuation formulas and sample the magnetization curve.\nWe assume the reader is familiar with the basic concepts of Python and MD simulations.\nRemark: The equilibration and sampling times used in this tutorial would be not sufficient for scientific purposes, but they are long enough to get at least a qualitative insight of the behaviour of ferrofluids. They have been shortened so we achieve reasonable computation times for the purpose of a tutorial.\nCompiling ESPResSo for this Tutorial\nFor this tutorial the following features of ESPResSo are needed\n```c++\ndefine EXTERNAL_FORCES\ndefine ROTATION\ndefine DIPOLES\ndefine LENNARD_JONES\n```\nPlease uncomment them in the <tt>myconfig.hpp</tt> and compile ESPResSo using this <tt>myconfig.hpp</tt>.\nA Monolayer-Ferrofluid System in ESPResSo\nFor interesting ferrofluid systems, where the fraction of ferromagnetic particles in the liquid carrier and their dipole moment are not vanishingly small, the ferromagnetic particles form clusters of different shapes and sizes. If the fraction and/or dipole moments are big enough the clusters can interconnect with each other and form a whole space occupying network.\nIn this part we want to investigate the number of clusters as well as their shape and size in our simulated monolayer ferrofluid system. It should be noted that a monolayer is a quasi three dimensional system (q2D), i.e. two dimensional for the positions and three dimensional for the orientation of the dipole moments.\nSetup\nWe start with checking for the presence of ESPResSo features and importing all necessary packages.",
"import espressomd\nespressomd.assert_features('DIPOLES', 'LENNARD_JONES')\n\nfrom espressomd.magnetostatics import DipolarP3M\nfrom espressomd.magnetostatic_extensions import DLC\n\nfrom espressomd.cluster_analysis import ClusterStructure\nfrom espressomd.pair_criteria import DistanceCriterion\n\n\nimport numpy as np",
"Now we set up all simulation parameters.",
"# Lennard-Jones parameters\nLJ_SIGMA = 1\nLJ_EPSILON = 1\nLJ_CUT = 2**(1. / 6.) * LJ_SIGMA\n\n# Particles\nN_PART = 1200\n\n# Area fraction of the mono-layer\nPHI = 0.1\n\n# Dipolar interaction parameter lambda = mu_0 m^2 /(4 pi sigma^3 KT)\nDIP_LAMBDA = 4\n\n# Temperature\nKT = 1.0\n\n# Friction coefficient\nGAMMA = 1.0\n\n# Time step\nTIME_STEP = 0.01",
"Note that we declared a <tt>lj_cut</tt>. This will be used as the cut-off radius of the Lennard-Jones potential to obtain a purely repulsive WCA potential.\nNow we set up the system. The length of the simulation box is calculated using the desired area fraction and the area all particles occupy. Then we create the ESPResSo system and pass the simulation step. For the Verlet list skin parameter we use the built-in tuning algorithm of ESPResSo.\nExercise:\nHow large does BOX_SIZE have to be for a system of N_PART particles with a volume (area) fraction PHI?\nDefine BOX_SIZE.\n$$\nL_{\\text{box}} = \\sqrt{\\frac{N A_{\\text{sphere}}}{\\varphi}}\n$$\npython\nBOX_SIZE = (N_PART * np.pi * (LJ_SIGMA / 2.)**2 / PHI)**0.5",
"# System setup\n# BOX_SIZE = ...\n\n\nprint(\"Box size\", BOX_SIZE)\n# Note that the dipolar P3M and dipolar layer correction need a cubic\n# simulation box for technical reasons.\nsystem = espressomd.System(box_l=(BOX_SIZE, BOX_SIZE, BOX_SIZE))\nsystem.time_step = TIME_STEP",
"Now we set up the interaction between the particles as a non-bonded interaction and use the Lennard-Jones potential as the interaction potential. Here we use the above mentioned cut-off radius to get a purely repulsive interaction.",
"# Lennard-Jones interaction\nsystem.non_bonded_inter[0, 0].lennard_jones.set_params(epsilon=LJ_EPSILON, sigma=LJ_SIGMA, cutoff=LJ_CUT, shift=\"auto\")",
"Now we generate random positions and orientations of the particles and their dipole moments. \nHint:\nIt should be noted that we seed the random number generator of numpy. Thus the initial configuration of our system is the same every time this script will be executed. You can change it to another one to simulate with a different initial configuration.\nExercise:\nHow does one set up randomly oriented dipole moments?\nHint: Think of the way that different methods could introduce a bias in the distribution of the orientations.\nCreate a variable dip as a N_PART x 3 numpy array, which contains the randomly distributed dipole moments.\n```python\nRandom dipole moments\nnp.random.seed(seed=1)\ndip_phi = 2. * np.pi * np.random.random((N_PART, 1))\ndip_cos_theta = 2 * np.random.random((N_PART, 1)) - 1\ndip_sin_theta = np.sin(np.arccos(dip_cos_theta))\ndip = np.hstack((\n dip_sin_theta * np.sin(dip_phi),\n dip_sin_theta * np.cos(dip_phi),\n dip_cos_theta))\n```",
"# Random dipole moments\n# ...\n# dip = ...\n\n# Random positions in the monolayer\npos = BOX_SIZE * np.hstack((np.random.random((N_PART, 2)), np.zeros((N_PART, 1))))",
"Now we add the particles with their positions and orientations to our system. Thereby we activate all degrees of freedom for the orientation of the dipole moments. As we want a two dimensional system we only allow the particles to translate in $x$- and $y$-direction and not in $z$-direction by using the <tt>fix</tt> argument.",
"# Add particles\nsystem.part.add(pos=pos, rotation=N_PART * [(1, 1, 1)], dip=dip, fix=N_PART * [(0, 0, 1)])",
"Be aware that we do not set the magnitude of the magnetic dipole moments to the particles. As in our case all particles have the same dipole moment it is possible to rewrite the dipole-dipole interaction potential to\n\\begin{equation}\n u_{\\text{DD}}(\\vec{r}{ij}, \\vec{\\mu}_i, \\vec{\\mu}_j) = \\gamma \\mu^2 \\left(\\frac{\\vec{\\hat{\\mu}}_i \\cdot \\vec{\\hat{\\mu}}_j}{r{ij}^3} - 3\\frac{(\\vec{\\hat{\\mu}}i \\cdot \\vec{r}{ij}) \\cdot (\\vec{\\hat{\\mu}}j \\cdot \\vec{r}{ij})}{r_{ij}^5}\\right)\n\\end{equation}\nwhere $\\vec{\\hat{\\mu}}_i$ is the unit vector of the dipole moment $i$ and $\\mu$ is the magnitude of the dipole moments.\nThus we can only prescribe the initial orientation of the dipole moment to the particles and take the magnitude of the moments into account when calculating the dipole-dipole interaction with Dipolar P3M, by modifying the original Dipolar P3M prefactor $\\gamma$ such that\n\\begin{equation}\n \\tilde{\\gamma} = \\gamma \\mu^2 = \\frac{\\mu_0}{4\\pi}\\mu^2 = \\lambda \\sigma^3 k_{\\text{B}}T\n\\end{equation}\nOf course it would also be possible to prescribe the whole dipole moment vectors to every particle and leave the prefactor of Dipolar P3M unchanged ($\\gamma$). In fact we have to do this if we want to simulate polydisperse systems.\nNow we choose the steepest descent integrator to remove possible overlaps of the particles.",
"# Set integrator to steepest descent method\nsystem.integrator.set_steepest_descent(\n f_max=0, gamma=0.1, max_displacement=0.05)",
"Exercise:\nPerform a steepest descent energy minimization.\nTrack the relative energy change $E_{\\text{rel}}$ per minimization loop (where the integrator is run for 10 steps) and terminate once $E_{\\text{rel}} \\le 0.05$, i.e. when there is less than a 5% difference in the relative energy change in between iterations.\n```python\nimport sys\nenergy = system.analysis.energy()['total']\nrelative_energy_change = 1.0\nwhile relative_energy_change > 0.05:\n system.integrator.run(10)\n energy_new = system.analysis.energy()['total']\n # Prevent division by zero errors:\n if energy < sys.float_info.epsilon:\n break\n relative_energy_change = (energy - energy_new) / energy\n print(f\"Minimization, relative change in energy: {relative_energy_change}\")\n energy = energy_new\n```\nFor the simulation of our system we choose the velocity Verlet integrator.\nAfter that we set up the thermostat which is, in our case, a Langevin thermostat to simulate in an NVT ensemble.\nHint:\nIt should be noted that we seed the Langevin thermostat, thus the time evolution of the system is partly predefined. Partly because of the numeric accuracy and the automatic tuning algorithms of Dipolar P3M and DLC where the resulting parameters are slightly different every time. You can change the seed to get a guaranteed different time evolution.",
"# Switch to velocity Verlet integrator\nsystem.integrator.set_vv()\nsystem.thermostat.set_langevin(kT=KT, gamma=GAMMA, seed=1)",
"To calculate the dipole-dipole interaction we use the Dipolar P3M method (see Ref. <a href='#[1]'>[1]</a>) which is based on the Ewald summation. By default the boundary conditions of the system are set to conducting which means the dielectric constant is set to infinity for the surrounding medium. As we want to simulate a two dimensional system we additionally use the dipolar layer correction (DLC) (see Ref. <a href='#[2]'>[2]</a>). As we add <tt>DipolarP3M</tt> to our system as an actor, a tuning function is started automatically which tries to find the optimal parameters for Dipolar P3M and prints them to the screen. The last line of the output is the value of the tuned skin.",
"# Setup dipolar P3M and dipolar layer correction\ndp3m = DipolarP3M(accuracy=5E-4, prefactor=DIP_LAMBDA * LJ_SIGMA**3 * KT)\ndlc = DLC(maxPWerror=1E-4, gap_size=BOX_SIZE - LJ_SIGMA)\nsystem.actors.add(dp3m)\nsystem.actors.add(dlc)\n\n# tune verlet list skin\nsystem.cell_system.tune_skin(min_skin=0.4, max_skin=2., tol=0.2, int_steps=100)\n\n# print skin value\nprint('tuned skin = {}'.format(system.cell_system.skin))",
"Now we equilibrate the dipole-dipole interaction for some time",
"# Equilibrate\nprint(\"Equilibration...\")\nEQUIL_ROUNDS = 20\nEQUIL_STEPS = 1000\nfor i in range(EQUIL_ROUNDS):\n system.integrator.run(EQUIL_STEPS)\n print(\n f\"progress: {(i + 1) * 100. / EQUIL_ROUNDS}%, dipolar energy: {system.analysis.energy()['dipolar']}\",\n end=\"\\r\")\nprint(\"\\nEquilibration done\")",
"Sampling\nThe system will be sampled over 100 loops.",
"LOOPS = 100",
"As the system is two dimensional, we can simply do a scatter plot to get a visual representation of a system state. To get a better insight of how a ferrofluid system develops during time we will create a video of the development of our system during the sampling. If you only want to sample the system simply go to Sampling without animation\nSampling with animation\nTo get an animation of the system development we have to create a function which will save the video and embed it in an html string.",
"import matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom tempfile import NamedTemporaryFile\nimport base64\n\nVIDEO_TAG = \"\"\"<video controls>\n <source src=\"data:video/x-m4v;base64,{0}\" type=\"video/mp4\">\n Your browser does not support the video tag.\n</video>\"\"\"\n\n\ndef anim_to_html(anim):\n if not hasattr(anim, '_encoded_video'):\n with NamedTemporaryFile(suffix='.mp4') as f:\n anim.save(f.name, fps=20, extra_args=['-vcodec', 'libx264'])\n with open(f.name, \"rb\") as g:\n video = g.read()\n anim._encoded_video = base64.b64encode(video).decode('ascii')\n plt.close(anim._fig)\n return VIDEO_TAG.format(anim._encoded_video)\n\n\nanimation.Animation._repr_html_ = anim_to_html\n\n\ndef init():\n # Set x and y range\n ax.set_ylim(0, BOX_SIZE)\n ax.set_xlim(0, BOX_SIZE)\n x_data, y_data = [], []\n part.set_data(x_data, y_data)\n return part,",
"Exercise:\nIn the following an animation loop is defined, however it is incomplete.\nExtend the code such that in every loop the system is integrated for 100 steps.\nAfterwards x_data and y_data have to be populated by the folded $x$- and $y$- positions of the particles.\n(You may copy and paste the incomplete code template to the empty cell below.)\n```python\ndef run(i):\n # < excercise >\n# Save current system state as a plot\nx_data, y_data = # < excercise >\nax.figure.canvas.draw()\npart.set_data(x_data, y_data)\nprint(\"progress: {:3.0f}%\".format((i + 1) * 100. / LOOPS), end=\"\\r\")\nreturn part,\n\n```\n```python\ndef run(i):\n system.integrator.run(100)\n# Save current system state as a plot\nx_data, y_data = system.part[:].pos_folded[:, 0], system.part[:].pos_folded[:, 1]\nax.figure.canvas.draw()\npart.set_data(x_data, y_data)\nprint(\"progress: {:3.0f}%\".format((i + 1) * 100. / LOOPS), end=\"\\r\")\nreturn part,\n\n```\nNow we use the <tt>animation</tt> class of <tt>matplotlib</tt> to save snapshots of the system as frames of a video which is then displayed after the sampling is finished. Between two frames are 100 integration steps.\nIn the video chain-like and ring-like clusters should be visible, as well as some isolated monomers.",
"fig, ax = plt.subplots(figsize=(10, 10))\npart, = ax.plot([], [], 'o')\n\nanimation.FuncAnimation(fig, run, frames=LOOPS, blit=True, interval=0, repeat=False, init_func=init)",
"Cluster analysis\nTo quantify the number of clusters and their respective sizes, we now want to perform a cluster analysis.\nFor that we can use ESPREsSo's cluster analysis class.\nExercise:\nSetup a cluster analysis object (ClusterStructure class) and assign its instance to the variable cluster_structure.\nAs criterion for the cluster analysis use a distance criterion where particles are assumed to be part of a cluster if the neaest neighbors are closer than $1.3\\sigma_{\\text{LJ}}$.\n```python\nSetup cluster analysis\ncluster_structure = ClusterStructure(pair_criterion=DistanceCriterion(cut_off=1.3 * LJ_SIGMA))\n```\nNow we sample our system for some time and do a cluster analysis in order to get an estimator of the cluster observables.\nFor the cluster analysis we create two empty lists. The first for the number of clusters and the second for their respective sizes.",
"n_clusters = []\ncluster_sizes = []",
"Sampling without animation\nThe following code just samples the system and does a cluster analysis every <tt>loops</tt> (100 by default) simulation steps.\nExercise:\nWrite an integration loop which runs a cluster analysis on the system, saving the number of clusters n_clusters and the size distribution cluster_sizes.\nTake the following as a starting point:\n```python\nfor i in range(LOOPS):\n # Run cluster analysis\n cluster_structure.run_for_all_pairs()\n# Gather statistics:\nn_clusters.append(# < excercise >)\nfor c in cluster_structure.clusters:\n cluster_sizes.append(# < excercise >)\nsystem.integrator.run(100)\nprint(\"progress: {:3.0f}%\".format((float(i)+1)/LOOPS * 100), end=\"\\r\")\n\n```\n```python\nfor i in range(LOOPS):\n # Run cluster analysis\n cluster_structure.run_for_all_pairs()\n# Gather statistics:\nn_clusters.append(len(cluster_structure.clusters))\nfor c in cluster_structure.clusters:\n cluster_sizes.append(c[1].size())\nsystem.integrator.run(100)\nprint(\"progress: {:3.0f}%\".format((float(i) + 1) / LOOPS * 100), end=\"\\r\")\n\n```\nYou may want to get a visualization of the current state of the system. For that we plot the particle positions folded to the simulation box using <tt>matplotlib</tt>.",
"import matplotlib.pyplot as plt\nplt.figure(figsize=(10, 10))\nplt.xlim(0, BOX_SIZE)\nplt.ylim(0, BOX_SIZE)\nplt.xlabel('x-position', fontsize=20)\nplt.ylabel('y-position', fontsize=20)\nplt.plot(system.part[:].pos_folded[:, 0], system.part[:].pos_folded[:, 1], 'o')\nplt.show()",
"In the plot chain-like and ring-like clusters should be visible. Some of them are connected via Y- or X-links to each other. Also some monomers should be present.\nCluster distribution\nAfter having sampled our system we now can calculate estimators for the expectation value of the cluster sizes and their distribution.\nExercise:\nUse numpy to calculate a histogram of the cluster sizes and assign it to the variable size_dist.\nTake only clusters up to a size of 19 particles into account.\nHint: In order not to count clusters with size 20 or more, one may include an additional bin containing these.\nThe reason for that is that numpy defines the histogram bins as half-open intervals with the open border at the upper bin edge.\nConsequently clusters of larger sizes are attributed to the last bin.\nBy not using the last bin in the plot below, these clusters can effectively be neglected.\npython\nsize_dist = np.histogram(cluster_sizes, range=(2, 21), bins=19)\nNow we can plot this histogram and should see an exponential decrease in the number of particles in a cluster along the size of a cluster, i.e. the number of monomers in it",
"plt.figure(figsize=(10, 10))\nplt.grid()\nplt.xticks(range(0, 20))\nplt.plot(size_dist[1][:-2], size_dist[0][:-1] / float(LOOPS))\nplt.xlabel('size of clusters', fontsize=20)\nplt.ylabel('distribution', fontsize=20)\nplt.show()",
"References\n<a id='[1]'></a>[1] Juan J. Cerdà, V. Ballenegger, O. Lenz, and Ch. Holm. P3M algorithm for dipolar interactions. Journal of Chemical Physics, 129:234104, 2008.\n<a id='[2]'></a>[2] A. Bródka. Ewald summation method with electrostatic layer correction for\ninteractions of point dipoles in slab geometry. Chemical Physics Letters 400(1–3): 62–67, 2004. <small>DOI:</small><a href=\"https://doi.org/10.1016/j.cplett.2004.10.086\">10.1016/j.cplett.2004.10.086</a>\nImage sources:\n<a id='[3]'></a>[3] <a href=\"https://commons.wikimedia.org/wiki/User:Ayouril\">Ayouril</a>, <a href=\"https://commons.wikimedia.org/wiki/File:Electro-Steric_Stabilization.jpg\">Electro-Steric Stabilization</a>, <a href=\"https://creativecommons.org/licenses/by-sa/3.0/legalcode\" rel=\"license\">CC BY-SA 3.0</a> \n<a id='[4]'></a>[4] <a href=\"https://en.wikipedia.org/wiki/User:Gmaxwell\">Gregory F. Maxwell</a> <<a href=\"mailto:gmaxwell@gmail.com\">gmaxwell@gmail.com</a>> <a href=\"https://en.wikipedia.org/wiki/Pretty_Good_Privacy\">PGP</a>:<a href=\"http://pgp.nic.ad.jp/pks/lookup?op=vindex&search=0xB0413BFA\">0xB0413BFA</a>, <a href=\"https://commons.wikimedia.org/wiki/File:Ferrofluid_Magnet_under_glass_edit.jpg#file\">Ferrofluid Magnet under glass edit</a>, <a href=\"https://creativecommons.org/licenses/by-sa/3.0/legalcode\" rel=\"license\">CC BY-SA 3.0</a>"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
Naereen/notebooks
|
agreg/TP_Programmation_2017-18/TP3__Python.ipynb
|
mit
|
[
"<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#TP-3---Programmation-pour-la-préparation-à-l'agrégation-maths-option-info\" data-toc-modified-id=\"TP-3---Programmation-pour-la-préparation-à-l'agrégation-maths-option-info-1\"><span class=\"toc-item-num\">1 </span>TP 3 - Programmation pour la préparation à l'agrégation maths option info</a></span></li><li><span><a href=\"#Arbres-binaires-de-recherche\" data-toc-modified-id=\"Arbres-binaires-de-recherche-2\"><span class=\"toc-item-num\">2 </span>Arbres binaires de recherche</a></span><ul class=\"toc-item\"><li><span><a href=\"#Exercice-1-:-ABR\" data-toc-modified-id=\"Exercice-1-:-ABR-2.1\"><span class=\"toc-item-num\">2.1 </span>Exercice 1 : ABR</a></span><ul class=\"toc-item\"><li><span><a href=\"#Définition-du-type-Abr-et-exemples-de-valeurs\" data-toc-modified-id=\"Définition-du-type-Abr-et-exemples-de-valeurs-2.1.1\"><span class=\"toc-item-num\">2.1.1 </span>Définition du type Abr et exemples de valeurs</a></span></li><li><span><a href=\"#Compter-les-clés-d'un-Abr\" data-toc-modified-id=\"Compter-les-clés-d'un-Abr-2.1.2\"><span class=\"toc-item-num\">2.1.2 </span>Compter les clés d'un Abr</a></span></li></ul></li><li><span><a href=\"#Exercice-2-:-trouve\" data-toc-modified-id=\"Exercice-2-:-trouve-2.2\"><span class=\"toc-item-num\">2.2 </span>Exercice 2 : <code>trouve</code></a></span></li><li><span><a href=\"#Exercice-3-:-insertion\" data-toc-modified-id=\"Exercice-3-:-insertion-2.3\"><span class=\"toc-item-num\">2.3 </span>Exercice 3 : <code>insertion</code></a></span></li><li><span><a href=\"#Exercice-4-:-suppression\" data-toc-modified-id=\"Exercice-4-:-suppression-2.4\"><span class=\"toc-item-num\">2.4 </span>Exercice 4 : <code>suppression</code></a></span></li><li><span><a href=\"#Exercice-5-:-fusion\" data-toc-modified-id=\"Exercice-5-:-fusion-2.5\"><span class=\"toc-item-num\">2.5 </span>Exercice 5 : <code>fusion</code></a></span></li><li><span><a href=\"#Exercice-6\" data-toc-modified-id=\"Exercice-6-2.6\"><span class=\"toc-item-num\">2.6 </span>Exercice 6</a></span><ul class=\"toc-item\"><li><span><a href=\"#Avantages-et-les-inconvénients-des-ABR\" data-toc-modified-id=\"Avantages-et-les-inconvénients-des-ABR-2.6.1\"><span class=\"toc-item-num\">2.6.1 </span>Avantages et les inconvénients des ABR</a></span></li><li><span><a href=\"#Autres-structures-de-données-(clef,-valeur)\" data-toc-modified-id=\"Autres-structures-de-données-(clef,-valeur)-2.6.2\"><span class=\"toc-item-num\">2.6.2 </span>Autres structures de données (clef, valeur)</a></span></li></ul></li></ul></li><li><span><a href=\"#Tas-binaire-min-(ou-max)\" data-toc-modified-id=\"Tas-binaire-min-(ou-max)-3\"><span class=\"toc-item-num\">3 </span>Tas binaire min (ou max)</a></span><ul class=\"toc-item\"><li><span><a href=\"#Solution-concise-:-tas-binaire-avec-des-arbres\" data-toc-modified-id=\"Solution-concise-:-tas-binaire-avec-des-arbres-3.1\"><span class=\"toc-item-num\">3.1 </span>Solution concise : tas binaire avec des arbres</a></span></li><li><span><a href=\"#Exercice-7-:-arbre-tournoi\" data-toc-modified-id=\"Exercice-7-:-arbre-tournoi-3.2\"><span class=\"toc-item-num\">3.2 </span>Exercice 7 : arbre tournoi</a></span></li><li><span><a href=\"#Exercice-8-:-parent,-fils_gauche-et-fils_droit\" data-toc-modified-id=\"Exercice-8-:-parent,-fils_gauche-et-fils_droit-3.3\"><span class=\"toc-item-num\">3.3 </span>Exercice 8 : <code>parent</code>, <code>fils_gauche</code> et <code>fils_droit</code></a></span></li><li><span><a href=\"#Exercice-9-:-echange\" data-toc-modified-id=\"Exercice-9-:-echange-3.4\"><span class=\"toc-item-num\">3.4 </span>Exercice 9 : <code>echange</code></a></span></li><li><span><a href=\"#Exercice-10-:-insertion\" data-toc-modified-id=\"Exercice-10-:-insertion-3.5\"><span class=\"toc-item-num\">3.5 </span>Exercice 10 : <code>insertion</code></a></span></li><li><span><a href=\"#Exercice-11-:-creation\" data-toc-modified-id=\"Exercice-11-:-creation-3.6\"><span class=\"toc-item-num\">3.6 </span>Exercice 11 : <code>creation</code></a></span></li><li><span><a href=\"#Exercice-12-:-diminue_clef\" data-toc-modified-id=\"Exercice-12-:-diminue_clef-3.7\"><span class=\"toc-item-num\">3.7 </span>Exercice 12 : <code>diminue_clef</code></a></span></li><li><span><a href=\"#Exercice-13-:-extraire_min\" data-toc-modified-id=\"Exercice-13-:-extraire_min-3.8\"><span class=\"toc-item-num\">3.8 </span>Exercice 13 : <code>extraire_min</code></a></span></li><li><span><a href=\"#Exercice-14-:-tri-par-tas\" data-toc-modified-id=\"Exercice-14-:-tri-par-tas-3.9\"><span class=\"toc-item-num\">3.9 </span>Exercice 14 : tri par tas</a></span></li></ul></li><li><span><a href=\"#Union-Find\" data-toc-modified-id=\"Union-Find-4\"><span class=\"toc-item-num\">4 </span>Union-Find</a></span><ul class=\"toc-item\"><li><span><a href=\"#Exercice-15-:-Union-Find-avec-tableaux\" data-toc-modified-id=\"Exercice-15-:-Union-Find-avec-tableaux-4.1\"><span class=\"toc-item-num\">4.1 </span>Exercice 15 : Union-Find avec tableaux</a></span></li><li><span><a href=\"#Exercice-16-:-Union-Find-avec-forêts\" data-toc-modified-id=\"Exercice-16-:-Union-Find-avec-forêts-4.2\"><span class=\"toc-item-num\">4.2 </span>Exercice 16 : Union-Find avec forêts</a></span></li><li><span><a href=\"#Exercice-17-:-Bonus-&-discussions\" data-toc-modified-id=\"Exercice-17-:-Bonus-&-discussions-4.3\"><span class=\"toc-item-num\">4.3 </span>Exercice 17 : Bonus & discussions</a></span></li><li><span><a href=\"#Bonus-:-algorithme-de-Kruskal\" data-toc-modified-id=\"Bonus-:-algorithme-de-Kruskal-4.4\"><span class=\"toc-item-num\">4.4 </span>Bonus : algorithme de Kruskal</a></span><ul class=\"toc-item\"><li><span><a href=\"#Représentations-de-graphe-pondérés\" data-toc-modified-id=\"Représentations-de-graphe-pondérés-4.4.1\"><span class=\"toc-item-num\">4.4.1 </span>Représentations de graphe pondérés</a></span></li><li><span><a href=\"#Algorithme-de-Kruskal\" data-toc-modified-id=\"Algorithme-de-Kruskal-4.4.2\"><span class=\"toc-item-num\">4.4.2 </span>Algorithme de Kruskal</a></span></li></ul></li></ul></li><li><span><a href=\"#Conclusion\" data-toc-modified-id=\"Conclusion-5\"><span class=\"toc-item-num\">5 </span>Conclusion</a></span></li></ul></div>\n\nTP 3 - Programmation pour la préparation à l'agrégation maths option info\n\nEn Python.",
"import sys\nprint(sys.version)",
"Arbres binaires de recherche\nExercice 1 : ABR\nDéfinition du type Abr et exemples de valeurs\nComme au TP1 et TP2, plutôt que d'utiliser des classes, je préfère utiliser des dictionnaires avec une seule clé et une seule valeur pour représenter des structures arborescentes.\nLa valeur est ici None s'il n'y a rien à stocker (pour une feuille Leaf par exemple), ou un tuple k, v, l, r pour un Node.\nCela simule la syntaxe de OCaml pour la création de valeurs, et le pattern matching (filtrage) est simulé par des if/elif/else testant la clé.",
"Leaf = {\"Leaf\": None}\n\ndef Node(k: int, v: str, l: dict, r: dict) -> dict:\n \"\"\" Crée un arbre binaire avec un noeud de clé k, de valeur v, de fils gauche l et de fils droit r.\"\"\"\n return {\"Node\": (k, v, l, r)}",
"On peut aussi utiliser une approche objet, et définir une classe.\nL'avantage sera un accès plus rapide aux différentes valeurs stockées : arbre.l donne le fils gauche.",
"class NodeClass():\n def __init__(self, k: int, v: str, l, r):\n self.k = k\n self.v = v\n self.l = l\n self.r = r",
"Note sur le typage en Python\nEn Python, avec le module typing, on peut définir des alias de types (non récursifs), comme en OCaml.\nOn peut ensuite ajouter indications de typage, à des définitions de variables ou de fonctions.",
"from typing import Dict, List, Any, Union, Tuple\n\nAbr = Dict[str, Union[str, Tuple[int, str, Any, Any]]]",
"Exemples d'arbres binaires",
"exemple_arbre_binaire1: Abr = Node(1, \"a\", Leaf, Leaf)\nprint(exemple_arbre_binaire1)\n\nexemple_arbre_binaire2: Abr = Node(2, \"b\", exemple_arbre_binaire1, Leaf)\nprint(exemple_arbre_binaire2)\n\nexemple_arbre_binaire3: Abr = Node(3, \"c\", exemple_arbre_binaire1, exemple_arbre_binaire2)\nprint(exemple_arbre_binaire3)",
"Pour afficher joliment des structures arborescentes on peut utiliser pprint.pprint :",
"from pprint import pprint\n\npprint(exemple_arbre_binaire2)\n\npprint(exemple_arbre_binaire3)",
"En OCaml on pouvait définir un type récursif comme cela (observez comment abr intervient dans la définition de anode qui intervient dans la définition de abr...), mais en Python ce n'est pas possible.\n```ocaml\ntype 'a abr =\n | Leaf\n | Node of 'a anode\nand 'b anode = {\n key : int;\n value : 'b;\n left : 'b abr; ( pour toute clé [k] dans [left], [k] < [key] )\n right : 'b abr ( pour toute clé [k] dans [right], [key] < [k] )\n}\n;;\n```\nCompter les clés d'un Abr\nCompter les clés est facile :",
"def nb_keys(a: Abr) -> int:\n \"\"\" Calcule le nombre de clés d'un arbre binaire a.\"\"\"\n # on simule \"match a with ...\" avec des if/elif/else\n if \"Leaf\" in a: # simule | Leaf ->\n return 0\n elif \"Node\" in a:\n _, _, l, r = a[\"Node\"]\n return 1 + nb_keys(l) + nb_keys(r)\n else: # ce cas n'arrive jamais\n return -1",
"En OCaml, l'avantage du pattern matching est qu'il nous prévient si le test n'est pas exhaustif.\nEn Python, on ne peut pas avoir ce genre d'aide.\nocaml\nlet rec nb_keys (a : 'a abr) : int =\n match a with\n (* si on oublie le cas de base : | Leaf -> 0 *)\n | Node n -> 1 + nb_keys n.left + nb_keys n.right\n;;\n(* Cette fonction sera bien typée, mais pas correcte :\nl'interprète ou le compilateur prévient par un Warning :\nWarning 8: this pattern-matching is not exhaustive.\nHere is an example of a case that is not matched:\nLeaf\n*)\nIl faut prendre l'habitude de vérifier vos fonctions sur tous les exemples définis précédemment :",
"print(\"L'arbre binaire suivant :\")\npprint(exemple_arbre_binaire1)\nprint(\"contient\", nb_keys(exemple_arbre_binaire1), \"clés.\")\n\nprint(\"L'arbre binaire suivant :\")\npprint(exemple_arbre_binaire2)\nprint(\"contient\", nb_keys(exemple_arbre_binaire2), \"clés.\")\n\nprint(\"L'arbre binaire suivant :\")\npprint(exemple_arbre_binaire3)\nprint(\"contient\", nb_keys(exemple_arbre_binaire3), \"clés.\")",
"Extraire une liste des clés\nBonus : on peut extraire une liste des clés, très facilement :",
"def list_keys(a: Abr) -> List[str]:\n \"\"\" Extraire la liste des clés d'un arbre binaire a (parcours en profondeur).\"\"\"\n # on simule \"match a with ...\" avec des if/elif/else\n if \"Leaf\" in a: # simule | Leaf ->\n return []\n elif \"Node\" in a:\n v, k, l, r = a[\"Node\"]\n return [k] + list_keys(l) + list_keys(r)\n else: # ce cas n'arrive jamais\n return []\n\nprint(\"L'arbre binaire suivant :\")\npprint(exemple_arbre_binaire1)\nprint(\"contient ces clés :\", list_keys(exemple_arbre_binaire1))\n\nprint(\"L'arbre binaire suivant :\")\npprint(exemple_arbre_binaire2)\nprint(\"contient ces clés :\", list_keys(exemple_arbre_binaire2))\n\nprint(\"L'arbre binaire suivant :\")\npprint(exemple_arbre_binaire3)\nprint(\"contient ces clés :\", list_keys(exemple_arbre_binaire3))",
"Exercice 2 : trouve",
"def trouve(a: Abr, x: int) -> Union[int, None]:\n # Optional[int] is equivalent to Union[int]\n if \"Leaf\" in a:\n return None\n elif \"Node\" in a:\n k, v, l, r = a[\"Node\"]\n if k == x: # on a trouvé la valeur v associée à la clé k = x\n return v\n elif x < k:\n # on cherche dans le sous arbre de gauche si la valeur cherchée\n # est plus petite que la clé du noeud\n return trouve(l, x)\n elif x > k:\n # sinon on cherche dans le sous arbre de droite\n return trouve(r, x)\n # ce dernier cas ne doit pas arriver\n else: raise ValueError(\"Incapable de trouver {} dans l'arbre binaire {}\".format(x, a))\n\nprint(\"En cherchant la clé 1 dans l'arbre binaire suivant\")\npprint(exemple_arbre_binaire1)\nprint(\"on trouve la valeur associée =\", trouve(exemple_arbre_binaire1, 1))\n\nprint(\"En cherchant la clé 3 dans l'arbre binaire suivant\")\npprint(exemple_arbre_binaire3)\nprint(\"on trouve la valeur associée =\", trouve(exemple_arbre_binaire3, 3))",
"Exemple de recherche qui ne fonctionne pas (sans renvoyer d'erreur, juste un None) :",
"print(\"En cherchant la clé 42 dans l'arbre binaire suivant\")\npprint(exemple_arbre_binaire3)\nprint(\"on trouve la valeur associée =\", trouve(exemple_arbre_binaire3, 42))",
"Si on veut que la recherche échoue si la clé cherchée n'est pas présente, on peut changer le code et écrire :",
"def trouve_avec_erreur(a: Abr, x: int) -> int: # notez que le type de retour est juste un entier désormais\n if \"Leaf\" in a:\n raise ValueError(\"Incapable de trouver {} dans l'arbre binaire {}\".format(x, a))\n elif \"Node\" in a:\n k, _, l, r = a[\"Node\"]\n if k == x: # on a trouvé la valeur v associée à la clé k = x\n return v\n elif x < k: # on cherche dans le sous arbre de gauche\n return trouve_avec_erreur(l, x)\n elif x > k: # on cherche dans le sous arbre de droite\n return trouve_avec_erreur(r, x)\n # ce dernier cas ne doit pas arriver\n else: raise ValueError(\"Incapable de trouver {} dans l'arbre binaire {}\".format(x, a))\n\nprint(\"En cherchant la clé 42 dans l'arbre binaire suivant\")\npprint(exemple_arbre_binaire3)\nprint(\"on trouve la valeur associée =\", trouve_avec_erreur(exemple_arbre_binaire3, 42))",
"La traceback (affichage de l'erreur) nous montre même les appels récursifs qui ont menés à l'erreur. (ici, deux appels récursifs sur le fils droit, puisque 42 était plus grand que les deux clés successives 3 et 2).\nExercice 3 : insertion",
"def insertion(a: Abr, k: int, v: str) -> Abr:\n if \"Leaf\" in a: # a arbre vide, on crée un noeud ayant deux fils vides\n return Node(k, v, Leaf, Leaf)\n elif \"Node\" in a:\n ka, va, l, r = a[\"Node\"]\n if k == ka: # on ne change pas les fils\n return Node(k, v, l, r) # change va -> v\n elif k < ka: # on change le fils gauche\n return Node(ka, va, insertion(l, k, v), r)\n elif k > ka: # on change le fils droit\n return Node(ka, va, l, insertion(r, k, v))\n # ce dernier cas ne doit pas arriver\n else:\n raise ValueError(\"Incapable d'insérer {}:{} dans l'arbre binaire {}\".format(k, v, a))",
"Quelques tests :",
"pprint(insertion(insertion(Leaf, 2, \"deux\"), 1, \"un\"))\nprint(\"Valeur trouvée pour la clé 1 =\", trouve(insertion(insertion(Leaf, 2, \"deux\"), 1, \"un\"), 1))\n\npprint(insertion(insertion(Leaf, 2, \"deux\"), 1, \"un\"))\nprint(\"Valeur trouvée pour la clé 2 =\", trouve(insertion(insertion(Leaf, 2, \"deux\"), 1, \"un\"), 2))\n\npprint(insertion(insertion(Leaf, 2, \"deux\"), 1, \"un\"))\nprint(\"Valeur trouvée pour la clé 3 =\", trouve(insertion(insertion(Leaf, 2, \"deux\"), 1, \"un\"), 3))",
"Exercice 4 : suppression\nminimum a renvoie le couple (key, value) de l'arbre a avec key minimal dans a.\nLance une exception si a est vide.",
"def minimum(a: Abr) -> Tuple[int, str]:\n if \"Leaf\" in a:\n raise ValueError(\"Arbre vide\")\n elif \"Node\" in a:\n k, v, l, r = a[\"Node\"]\n if \"Leaf\" in l: # sous-arbre gauche vide : le minimum est le noeud actuel\n return (k, v)\n else: # sinon, le minimum de l'arbre actuel est à chercher dans le sous-arbre gauche\n return minimum(l)\n else:\n raise ValueError(\"Incapable de trouver le minimum dans l'arbre binaire {}\".format(a))",
"On pourrait être plus élégant que ces tests \"Leaf\" in a et \"Node\" in a, et utiliser deux fonctions is_leaf(a) et is_node(a). Cela permettrait de cacher un peu plus les détails d'implémentations.",
"def is_leaf(a: Abr) -> bool:\n return \"Leaf\" in a # ou Leaf == a\n\ndef is_node(a: Abr) -> bool:\n return \"Node\" in a\n\nminimum(insertion (insertion(Leaf, 1, \"un\"), 2, \"deux\"))\n\nminimum(insertion (insertion(Leaf, 2, \"deux\"), 1, \"un\"))",
"La suppression se fait dans le cas où la clé x est trouvée :",
"def suppression(a: Abr, x: int) -> Abr:\n if \"Leaf\" in a: # rien à supprimer\n return Leaf\n elif \"Node\" in a:\n k, v, l, r = a[\"Node\"]\n if k == x: # trouvé\n if \"Leaf\" in r: # sous-arbre droit vide : on renvoie le sous-arbre gauche\n return l\n else:\n # on va remonter le minimum de sous-arbre droit au noeud actuel et aller supprimer ce minimum\n k_min, v_min = minimum(r)\n return Node(k_min, v_min, l, suppression(r, k_min))\n # de façon équivalente, on pourrait écrire\n # k_max, v_max = maximum(l)\n # return Node(k_max, v_max, suppression(l, k_max), r)\n elif x < k: # à chercher à gauche\n return Node(k, v, suppression(l, k), r)\n elif x > k: # à chercher à droite\n return Node(k, v, l, suppression(r, k))\n else: # n'arrive jamais\n raise ValueError(\"Incapable de supprimer {} dans l'arbre binaire {}\".format(x, a))",
"Deux exemples :",
"print(trouve (suppression (insertion (insertion(Leaf, 2, \"deux\"), 1, \"un\"), 1), 1))\nprint(trouve (suppression (insertion (insertion(Leaf, 2, \"deux\"), 1, \"un\"), 1), 2))",
"Exercice 5 : fusion\ndecoupe a k sépare l'arbre a en deux arbres (a1, a2) tels que l'union des clés-valeurs de a1 et a2 est égale à l'ensemble des clés-valeurs de a (privé de l'association liée à k si elle était présente dans a).\n\nLes clés de a1 sont < à k.\nLes clés de a2 sont > à k.",
"def decoupe(a: Abr, x: int) -> Tuple[Abr, Abr]:\n \"\"\"\n [decoupe a k] sépare l'arbre [a] en deux arbres [(a1, a2)]\n tels que l'union des clés-valeurs de [a1] et [a2] est égale à\n l'ensemble des clés-valeurs de [a] (privé de l'association\n liée à [k] si elle était présente dans [a]).\n Les clés de [a1] sont < à [k].\n Les clés de [a2] sont > à [k].\n \"\"\"\n if \"Leaf\" in a: # rien à supprimer\n return (Leaf, Leaf)\n elif \"Node\" in a:\n k, v, l, r = a[\"Node\"]\n if k == x: # trouvé\n return (l, r)\n elif x < k: # à chercher à gauche\n left1, left2 = decoupe(l, x)\n return (left1, Node(k, v, left2, r))\n elif x > k: # à chercher à droite\n right1, right2 = decoupe(r, x)\n return (Node(k, v, l, right1), right2)\n else: # n'arrive jamais\n raise ValueError(\"Incapable de découper dans l'arbre binaire {}\".format(x, a))\n else: # n'arrive jamais\n raise ValueError(\"Incapable de découper dans l'arbre binaire {}\".format(x, a))",
"Et maintenant la fusion n'est pas très difficile :",
"def fusion(a1: Abr, a2: Abr) -> Abr:\n \"\"\" Fusionne les deux arbres binaires de recherche a1 et a2.\n Convention : si une clé est présente dans les deux arbres, nous gardons celle de [a1]\n \"\"\"\n if \"Leaf\" in a1: # rien à supprimer\n return a2\n elif \"Node\" in a1:\n k, v, l, r = a1[\"Node\"]\n left2, right2 = decoupe(a2, k)\n return Node(k, v, fusion(l, left2), fusion(r, right2))\n else: # n'arrive jamais\n raise ValueError(\"Incapable de fusionner les arbres binaires {} et {}\".format(a1, a2))\n\na1 = insertion (insertion(Leaf, 2, \"deux\"), 1, \"un\")\na2 = insertion (insertion(Leaf, 2, \"two\"), 3, \"three\")\n\nprint(trouve (fusion(a1, a2), 1)) # \"un\" depuis a1\nprint(trouve (fusion(a1, a2), 2)) # \"deux\" depuis a1 et pas \"two\" from a2\nprint(trouve (fusion(a1, a2), 3)) # \"three\" from a2\nprint(trouve (fusion(a1, a2), 4)) # None : pas trouvé !",
"Exercice 6\nAvantages et les inconvénients des ABR\n\nDiscussions durant la séance...\n\nAutres structures de données (clef, valeur)\n\nLes tables de hashage\nautres idées ? envoyez moi un mail : lilian.besson at ens-rennes.fr ou ouvrez un ticket sur GitHub\n\n\nTas binaire min (ou max)\nUn tas binaire est un arbre binaire dans lequel tous les étages sont remplis sauf éventuellement le dernier qui doit être bien tassé à gauche.\nUn tas binaire min signifie en plus que l'arbre est tournoi min c'est-à-dire que pour tout sous-arbre, la valeur de la racine est plus petite que les valeurs de tous les autres noeuds.\nUn tas binaire max vérifie la même propriété mais la valeur de la racine est plus grande que les valeurs de tous les autres noeuds, pour tout sous-arbre.\nOn les appelle également files de priorité (min, max).\nLa structure de tas min intervient dans de nombreux algorithmes comme ceux de Dijkstra et de Prim mais également dans les systèmes d'exploitation (ordonnancement des tâches et des processus).\nContrairement aux arbres binaires de recherche qui stockent des couples (clef, valeur), ici on stockera des couples (rang, valeur).\nSolution concise : tas binaire avec des arbres\nPour commencer, je préfère donner une première implémentation qui ne sera pas la plus efficace en mémoire, mais la plus simple à comprendre.\nLes tas binaires peuvent être représentés avec des arbres binaires, exactement comme dans l'exercice précédent.\n\nRéférence: Chris Okasaki, \"Purely Functional Data Structures\".\n\nOn utilise la même représentation que précédemment : un tas binaire est soit vide (E) soit un noeud ayant exactement deux fils (qui sont des tas binaires, éventuellement vides).\nUn noeud contient une valeur entière (v) à laquelle on donne un rang entier (rang), un fils gauche (l) et un fils droit (d) :",
"E = {\"E\": None}\nT = lambda rang, v, l, r: {\"T\": (rang, v, l, r)}\n\nTasBinaire = Union[\n Dict[str, None], # pour E = {\"E\": None}\n Dict[str, Tuple[int, str, dict, dict]] # pour T = {\"T\" : (k, v, l, r)}\n # ici si on pouvait écrire des types récursifs, il faudrait écrire\n # Dict[str, Tuple[int, str, TasBinaire, TasBinaire]]\n]\n\ndef rank(t: TasBinaire) -> int:\n \"\"\" Rang d'un tas binaire, r lu depuis le champ T(r, _, _, _).\"\"\"\n if \"E\" in t:\n return 0\n else:\n r, _, _, _ = t[\"T\"]\n return r",
"La première primitive est la création d'un tas avec la clé x, et deux sous-tas a et b.\nLe rang est minimisé.",
"def make(x: int, a: TasBinaire, b: TasBinaire) -> TasBinaire:\n \"\"\" Créer un tas binaire de clé x avec deux sous-tas a et b, minimisant le rang.\"\"\"\n ra = rank(a)\n rb = rank(b)\n if ra >= rb:\n return T(rb + 1, x, a, b)\n else:\n return T(ra + 1, x, b, a)",
"Exemples de tas binaires",
"tas1 = make(10, E, E)\npprint(tas1)\n\ntas2 = make(120, tas1, E)\npprint(tas2)\n\ntas3 = make(150, tas2, E)\npprint(tas3)",
"On peut vérifier si un tas est vide, ou créer le tas vide.",
"empty: TasBinaire = E\n\ndef is_empty(a: TasBinaire) -> bool:\n \"\"\" Teste si le tas binaire a est vide ou non.\"\"\"\n if \"E\" in a:\n return True\n else:\n return False\n\n# plus rapidement\ndef is_empty(a: TasBinaire) -> bool:\n return \"E\" in a\n\nis_empty(E)\n\nis_empty(tas1), is_empty(tas2), is_empty(tas3)",
"La fusion est assez naturelle : on procède par récurrence, en joignant deux tas et en continuant la fusion pour les tas plus petits.\nOn garde la plus petite clé à la racine, pour conserver la propriété tournoi.",
"def merge(h1: TasBinaire, h2: TasBinaire) -> TasBinaire:\n \"\"\" Fusionne les deux tas binaires h1 et h2.\"\"\"\n if \"E\" in h1:\n return h2\n elif \"E\" in h2:\n return h1\n else:\n r1, x, a1, b1 = h1[\"T\"]\n r2, y, a2, b2 = h2[\"T\"]\n if x <= y:\n return make(x, a1, merge(b1, h2))\n else:\n return make(y, a2, merge(h1, b2))",
"On peut désormais créer les tas précédemment définis correctement, pour qu'ils soient bien équilibrés :",
"tas1 = make(10, E, E)\npprint(tas1)\n\ntas2 = merge(tas1, make(120, E, E))\npprint(tas2)\n\ntas3 = merge(make(150, E, E), tas2)\npprint(tas3)\n\nprint(\"Fusion du tas vide et du tas1 suivant\")\npprint(tas1)\npprint(merge(E, tas1))\n\nprint(\"Fusion de tas1 et tas2 suivants\")\npprint(tas1)\npprint(tas2)\npprint(merge(tas1, tas2))\n\nprint(\"Fusion de tas2 et tas3 suivants\")\npprint(tas2)\npprint(tas3)\npprint(merge(tas2, tas3))",
"On voit que les fusions respectent bien la propriété du tas binaire.\nL'insertion correspond à la fusion d'un tas avec une seule clé et du tas courant :",
"def insert(x: int, h: TasBinaire) -> TasBinaire:\n \"\"\" Solution naive pour insérer une nouvelle valeur x dans le tas binaire h.\"\"\"\n return merge(T(1, x, E, E), h)\n # return merge(make(x, E, E), h) # équivalent",
"La lecture de la plus petite clé est triviale :",
"class Empty(Exception):\n pass\n\ndef mini(a: TasBinaire) -> int:\n \"\"\" Calcule le minimum du tas binaire a (première valeur).\"\"\"\n if \"E\" in a:\n raise Empty\n else:\n _, x, _, _ = a[\"T\"]\n return x",
"Et l'extraction n'est pas compliquée : il suffit de fusionner les deux sous-tas, ce qui va produire un tas tournoi avec les clés restantes.",
"def extract_min(a: TasBinaire) -> Tuple[int, TasBinaire]:\n \"\"\" Extraie le minimum et renvoie un tas binaire sans cette valeur.\"\"\"\n if \"E\" in a:\n raise Empty\n else:\n _, x, a, b = a[\"T\"]\n return (x, merge(a, b))",
"Et maintenant pour le tri par tas :\n\nOn crée un tas vide,\nDans lequel on insère les valeurs du tableau à trier, une par une,\nPuis on déconstruit le tas en extrayant le minimum, un par un, et en les stockant dans un tableau,\nLe tableau obtenu est trié dans l'ordre croissant.",
"def triParTas(a: List[int]) -> List[int]:\n \"\"\" Tri par tas\"\"\"\n n = len(a)\n tas = E # tas vide\n for i in range(n):\n tas = insert(a[i], tas)\n a2 = [-1] * n # aussi [-1 for i in range(n)]\n for i in range(n):\n m, t = extract_min(tas)\n a2[i] = m\n tas = t\n return a2",
"Complexité :\n\nL'étape 1. est en $\\mathcal{O}(1)$,\nL'étape 2. est en $\\mathcal{O}(\\log n)$ pour chacune des $n$ valeurs,\nL'étape 3. est aussi en $\\mathcal{O}(\\log n)$ pour chacune des $n$ valeurs,\n\n$\\implies$ L'algorithme de tri par tas est en $\\mathcal{O}(n \\log n)$ en temps et en $\\mathcal{O}(n)$ en mémoire externe.\nUn premier exemple :",
"triParTas([])\n\ntriParTas([10, 3, 4, 1, 2, 7, 8, 5, 9, 6])",
"Quelques essais numériques rapides",
"def isSorted(tableau: List) -> bool:\n return tableau == sorted(tableau)\n\nimport numpy as np\n\ndef randomTableau(n: int) -> List[int]:\n return list(np.random.randint(-n*10, n*10, n))\n\n%time isSorted(triParTas([10, 3, 4, 1, 2, 7, 8, 5, 9, 6]*100))\n\n%time isSorted(triParTas(randomTableau(10*100)))\n\n%time isSorted(triParTas([10, 3, 4, 1, 2, 7, 8, 5, 9, 6]*1000))\n\n%time isSorted(triParTas(randomTableau(10*1000)))\n\n%time isSorted(triParTas([10, 3, 4, 1, 2, 7, 8, 5, 9, 6]*10000))\n\n%time isSorted(triParTas(randomTableau(10*10000)))\n\n%time isSorted(sorted(randomTableau(10*10000)))\n\n%time isSorted(triParTas([10, 3, 4, 1, 2, 7, 8, 5, 9, 6]*100000))\n\n%time isSorted(triParTas(randomTableau(10*100000)))",
"On observe que la fonction implémentée a une complexité sur-linéaire, et sous-quadratique. En affichant plus de valeurs, on pourrait reconnaître un profil pseudo-linéaire (ie, $\\Theta(n \\log(n))$).",
"import numpy as np\nimport timeit\nvaleurs_n = np.logspace(2, 6, num=30, dtype=int)\ntemps = []\nfor n in valeurs_n:\n code = \"triParTas(randomTableau({}))\".format(n)\n essais = 100 if n < 1e4 else 10\n print(\"- Chronométrons le code\", code)\n temps_n = timeit.timeit(\n code,\n number=essais, globals=globals()\n )\n print(\"qui a pris un temps moyen de\", temps_n, \"secondes pour\", essais, \"essais.\")\n temps.append(temps_n)\n\nimport matplotlib.pyplot as plt\n\nplt.figure(figsize=(10, 7))\nplt.plot(valeurs_n, temps, \"ro-\")\nplt.xlabel(\"Taille n du tableau à trier\")\nplt.ylabel(\"Temps de calcul en secondes\")\nplt.title(\"Pour notre fonction implémentant le tri par tas\")\n# plt.legend()\nplt.show()",
"Cette première courbe semble linéaire, ou quasi-linéaire.\nPour vérifier que le temps de calcul a vraiment un profil ressemblant à une courbe $cst * n * \\log(n)$, une méthode rapide et simple est la suivante :\n\non affiche $f(n) / n$ (ici en <span style=\"color: red;\">rouge o-</span>), en espérant que son profil ressemble bien à un $cst * log(n)$,\non calcul cst comme la dernière valeur de $f(n) / (n * log(n)$,\net on affiche $\\log(n)$ (ici en <span style=\"color: blue;\">bleu +:</span>).",
"plt.figure(figsize=(10, 7))\nplt.plot(valeurs_n[15:], np.array(temps[15:]) / valeurs_n[15:], \"ro-\", label=\"Temps de calcul / n\")\ncst = temps[-1] / valeurs_n[-1]\ncst = cst / np.log(valeurs_n[-1])\nplt.plot(valeurs_n[15:], np.log(valeurs_n[15:]) * cst, \"b+:\", label=\"cst * log(n)\")\nplt.xlabel(\"Taille n du tableau à trier\")\nplt.ylabel(\"Temps de calcul en secondes\\ndivisé par la valeur de n\")\nplt.title(\"Pour notre fonction implémentant le tri par tas\")\nplt.legend()\nplt.savefig(\"TP3__Python__temps_calcul_normalise_triParTas.png\")\nplt.show()",
"Là on constate que la courbe $f(n) / n$ ressemble bien à un logarithme.\nBien sûr, tout cela est juste expérimental pour de petites valeurs, cela ne suffit PAS DU TOUT à prouver que $f(n) = \\Theta(n \\log(n))$.\n\n\nExercice 7 : arbre tournoi\nIci je donne une autre implémentation, généralement celle présentée dans les ouvrages de références en algorithmique.\nPlutôt que d'utiliser une structure arborescente explicite (avec des pointeurs vers des fils gauche et droit et un type récursif), on peut utiliser utiliser un tableau de taille $n$ pour représenter en place les $n$ éléments du tas min.\nLe fils gauche de la racine i (eg si on indice à partir de i=1) sera à l'indice $2i$ (eg T[2]) et le fils droit de la racine i (eg i=1) sera à l'indice $2i+1$ (eg T[3]).\nLa référence pour cette implémentation vient du Cormen, des éléments sont aussi dans Beauquier & Bernstel, et sur Internet sur la page Wikipédia des tas binaires.\n\nHypothèse : on ne stocke que des entiers positifs, et le tableau, a contiendra -1 pour un élément non utilisé.\nOn doit retenir le nombre n d'éléments dans l'arbre, qui peut être modifié.\n\nOn peut donc utiliser une petite classe avec deux champs n et a :",
"Arbre = List[int]\n\nclass ArbreTournoi():\n def __init__(self, n: int, a: Arbre):\n self.n = n\n self.a = a",
"Par exemple, l'arbre suivant s'écrit comme suit :",
"arbre_test = ArbreTournoi(7, [1,2,3,4,5,6,7])\narbre_test2 = ArbreTournoi(6, [2,1,3,4,5,6,-1, -1])\n\ndef capacite(an: ArbreTournoi) -> int:\n \"\"\" Capacité d'un arbre tournoi, taille du tableau a (nb max de valeurs).\"\"\"\n return len(an.a)\n\ndef nb_element(an: ArbreTournoi) -> int:\n \"\"\" Nombre d'éléments d'un arbre tournoi.\"\"\"\n n = an.n\n m = len(an.a)\n assert n <= m\n return n\n\nprint(capacite (arbre_test))\nprint(nb_element (arbre_test))\n\nprint(capacite(arbre_test2))\nprint(nb_element (arbre_test2))\n\ndef a_racine(an: ArbreTournoi) -> bool:\n return an.n > 0\n\ndef est_racine(n: int) -> bool:\n return n == 0\n\ndef racine(an: ArbreTournoi) -> Tuple[int, int]:\n if 0 >= an.n:\n raise ValueError(\"Pas de racine\")\n return (0, an.a[0])\n\nracine(arbre_test)\n\ndef a_noeud(an: ArbreTournoi, i: int) -> bool:\n return an.n > i\n\ndef noeud(an: ArbreTournoi, i: int) -> Tuple[int, int]:\n if i >= an.n:\n raise ValueError(\"Pas de noeud i = {} et an.n = {}\".format(i, an.n))\n return (i, an.a[i])\n\ndef valeur(an: ArbreTournoi, i: int) -> int:\n return (noeud(an, i))[1]\n\nnoeud(arbre_test, 0)\n\ndef a_gauche(an: ArbreTournoi, i: int) -> bool:\n return an.n > 2*i + 1\n\ndef gauche(an: ArbreTournoi, i: int) -> Tuple[int, int]:\n if 2*i + 1 >= an.n:\n raise ValueError(\"Pas de fils gauche i = {}, 2*i+1 = {} and an.n = {}\".format(i, (2*i+1), an.n))\n return (2*i + 1, an.a[2*i + 1])\n\ngauche(arbre_test, 0)\n\ndef a_droite(an: ArbreTournoi, i: int) -> bool:\n return an.n > 2*i + 2\n\ndef droite(an: ArbreTournoi, i: int) -> Tuple[int, int]:\n if 2*i + 2 >= an.n:\n raise ValueError(\"Pas de fils droit i = {}, 2i+2={} and an.n = {}\".format(i, (2*i+2), an.n))\n return (2*i + 2, an.a[2*i + 2])",
"Une et deux descentes à droite, par exemple :",
"droite(arbre_test, 0)\n\ni, _ = droite(arbre_test, 0)\ndroite(arbre_test, i)",
"On parcourt les sous-arbres pour trouver le minimum :",
"def min_sous_arbre(an: ArbreTournoi, i: int) -> int:\n ag = a_gauche(an, i)\n ad = a_droite(an, i)\n if not ag and not ad:\n return float(\"+inf\")\n elif ag and not ad:\n g, vg = gauche(an, i)\n return min(vg, min_sous_arbre(an, g))\n elif not ag and ad:\n d, vd = droite(an, i)\n return min(vd, min_sous_arbre(an, d))\n else: # a droite et a gauche\n g, vg = gauche(an, i)\n d, vd = droite(an, i)\n return min(min(vg, vd), min(min_sous_arbre(an, g), min_sous_arbre(an, d)))\n\nprint(\"arbre_test: n = {}, a = {}\".format(arbre_test.n, arbre_test.a))\nmin_sous_arbre(arbre_test, 0)",
"Et enfin la solution pour la fonction testant si un arbre binaire est un tas tournoi, qui doit être récursive et descendre dans les deux sous-arbres gauche et droite tant qu'ils existent :",
"def est_tournoi(an: ArbreTournoi) -> bool:\n def depuis(i: int) -> bool:\n r, vr = noeud(an, i)\n min_v = min_sous_arbre(an, i)\n res = vr < min_v\n if res and a_gauche(an, i):\n g, vg = gauche(an, i)\n res = depuis(g)\n if res and a_droite(an, i):\n d, vd = droite(an, i)\n res = depuis(d)\n return res\n return depuis(0)\n\nest_tournoi(arbre_test)\n\nprint(\"arbre_test2: n = {}, a = {}\".format(arbre_test2.n, arbre_test2.a))\nest_tournoi(arbre_test2)",
"Exercice 8 : parent, fils_gauche et fils_droit",
"def parent(an: ArbreTournoi, i: int) -> Tuple[int, int]:\n return noeud(an, ((i - 1) // 2))\n\nprint(\"arbre_test: n = {}, a = {}\".format(arbre_test.n, arbre_test.a))\n\nprint(noeud(arbre_test, 1))\nprint(parent(arbre_test, 1))\nprint(noeud(arbre_test, 2))\nprint(parent(arbre_test, 2))\n\nprint(noeud(arbre_test, 4))\nprint(parent(arbre_test, 4))\nprint(gauche(arbre_test, 1))\nprint(droite(arbre_test, 1)) # 4\n\nprint(noeud(arbre_test, 5))\nprint(parent(arbre_test, 5))\nprint(gauche(arbre_test, 2)) # 5\nprint(droite(arbre_test, 2))",
"Exercice 9 : echange",
"def echange(a: List[Any], i: int, j: int) -> None:\n a[i], a[j] = a[j], a[i] # very Pythonic\n \"\"\"\n vi, vj = a[i], a[j]\n a[i] = vj\n a[j] = vi\n \"\"\"\n \"\"\"\n tmp = a[i]\n a[i] = a[j]\n a[j] = tmp\n \"\"\"",
"Exercice 10 : insertion\nSi besoin, en insérant un élément dans un tableau déjà plein, on doit doubler sa capacité. Ce n'est pas compliqué : d'abord on double le tableau, puis on fait l'insertion normale.",
"def double_capacite(an: ArbreTournoi) -> ArbreTournoi:\n c = capacite(an)\n a2 = [-1] * (2*c) # [-1 for _ in range(2*c)]\n for i in range(an.n):\n a2[i] = an.a[i]\n return ArbreTournoi(an.n, a2)",
"L'opération élémentaire s'appelle une \"percolation haute\" : pour rétablir si nécessaire la propriété d'ordre du tas binaire : tant que x n'est pas la racine de l'arbre et que x est strictement inférieur (tas min) à son père on échange les positions entre x et son père.",
"def percolation_haute(an: ArbreTournoi, i: int) -> None:\n p, _ = parent(an, i)\n while valeur(an, p) > valeur(an, i):\n echange(an.a, i, p)\n i = p\n p, _ = parent(an, i)",
"Maintenant, l'insertion a proprement dite :",
"def insertion(an: ArbreTournoi, x: int) -> ArbreTournoi:\n n, c = an.n, capacite(an)\n if n == c:\n an2 = double_capacite(an)\n return insertion(an2, x)\n else:\n an2 = ArbreTournoi(n + 1, an.a[:]) # tableau[:] fait une copie\n an2.a[n] = x # ajoute la valeur x à la fin\n percolation_haute(an2, n) # percolation haute depuis la fin du tas\n return an2\n\nprint(\"arbre_test: n = {}, a = {}\".format(arbre_test.n, arbre_test.a))\n\na2 = insertion(arbre_test, 40)\nprint(\"a2: n = {}, a = {}\".format(a2.n, a2.a))\n# on l'a vu doubler !\na3 = insertion(a2, 20)\nprint(\"a3: n = {}, a = {}\".format(a3.n, a3.a))\n\na4 = ArbreTournoi(6, [100, 123, 135, 136, 354, 462])\nprint(\"a4: n = {}, a = {}\".format(a4.n, a4.a))\n\na5 = insertion(a4, 40)\nprint(\"a5: n = {}, a = {}\".format(a5.n, a5.a))\n\na6 = insertion(a5, 20)\nprint(\"a6: n = {}, a = {}\".format(a6.n, a6.a))",
"Exercice 11 : creation\nLa sémantique de cette fonction est de créer un tas min à partir d'un tableau de valeur.",
"def creation(a: List[int]) -> ArbreTournoi:\n n = len(a)\n a_vide = [-1] * n\n an = ArbreTournoi(0, a_vide)\n for i in range(n):\n an = insertion(an, a[i])\n return an\n\narbre_test3 = creation([20, 1, 3, 5, 7])\nprint(\"arbre_test3: n = {}, a = {}\".format(arbre_test3.n, arbre_test3.a))",
"Notez que cet arbre est bien tournoi, mais n'est pas trié.",
"print(\"Est tournoi ?\", est_tournoi(arbre_test3))\n\nprint(\"Est trié ?\", arbre_test3.a == sorted(arbre_test3.a))",
"Exercice 12 : diminue_clef\nOn peut augmenter ou diminuer la priorité (la clé) d'un nœud mais il faut ensuite satisfaire la contrainte d'ordre. Si l'on augmente la clé on fera donc une percolation-haute à partir de notre nœud et si l'on diminue la clé on fera un percolation-basse.\n\nFaites le vous-même.\n\nExercice 13 : extraire_min\nOn fait une percolation basse pour déplacer la racine jusqu'à une feuille, puis on inverse la feuille avec la dernière valeur du tableau (la feuille la plus à droite), et on met une valeur arbitraire (-1) dedans et on diminue la taille du tas ({ n with n = n - 1 }).\nD'abord, on a besoin de récupérer un des deux fils si l'un des deux a une clé plus petite.",
"def indice_min_fils(an: ArbreTournoi, i: int) -> int:\n g = i\n d = i\n if a_gauche(an, i):\n g, _ = gauche(an, i)\n if a_droite(an, i):\n d, _ = droite(an, i)\n if valeur(an, g) < valeur(an, d):\n return g\n else:\n return d",
"La percolation basse n'est pas trop compliquée :",
"def percolation_basse(an: ArbreTournoi, i: int) -> None:\n f = indice_min_fils(an, i)\n while valeur(an, f) < valeur(an, i):\n echange(an.a, i, f)\n i = f\n f = indice_min_fils(an, i)",
"Enfin l'extraction du minimum est facile.",
"def extraire_min(an: ArbreTournoi) -> Tuple[int, ArbreTournoi]:\n an2 = ArbreTournoi(an.n, an.a[:]) # copie !\n if a_gauche(an2, 0):\n m = an2.a[0] # racine\n an2.n = an2.n - 1 # on enlève un élément\n echange(an2.a, 0, an2.n) # on place la racine à la fin, à un endroit désormais inutilisé\n an2.a[an2.n] = -1 # on place -1 la valeur par défaut, donc on efface\n percolation_basse(an2, 0) # on redescend la nouvelle racine tant que possible\n return m, an2\n else:\n _, racine_an = racine(an2)\n # arbre vide une fois qu'on a extraie la racine\n return (racine_an, ArbreTournoi(0, []))",
"Et pour un exemple :",
"a = creation([20, 1, 3, 5, 7])\nprint(\"a: n = {}, a = {}\".format(a.n, a.a))\n\nm, a = extraire_min(a) # m = 1\nprint(\"m = {}, et a: n = {}, a = {}\".format(m, a.n, a.a))\n\nm, a = extraire_min(a) # m = 3\nprint(\"m = {}, et a: n = {}, a = {}\".format(m, a.n, a.a))\n\nm, a = extraire_min(a) # m = 5\nprint(\"m = {}, et a: n = {}, a = {}\".format(m, a.n, a.a))\n\nm, a = extraire_min(a) # m = 7\nprint(\"m = {}, et a: n = {}, a = {}\".format(m, a.n, a.a))\n\nm, a = extraire_min(a) # m = 20\nprint(\"m = {}, et a: n = {}, a = {}\".format(m, a.n, a.a))",
"Remarquez comment le redimensionement du tableau n'arrive qu'à la fin.\nExercice 14 : tri par tas\nLa meilleure façon de vérifier notre implémentation est d'implémenter le tri par tas :\n\non construit un tas depuis la liste de valeur,\non extrait le minimum successivement.",
"def triParTas2(a: List[int]) -> List[int]:\n n = len(a)\n avide = [-1] * n\n an = creation(a) # tas contenant les valeurs de a, bien placées\n for i in range(n):\n m, an2 = extraire_min(an)\n avide[i] = m # minimum du tas an, placé en ième position\n an = an2 # nouveau tas avec une valeur en moins\n return avide\n\narray1 = [10, 3, 4, 1, 2, 7, 8, 5, 9, 6]\narray2 = triParTas2(array1)\nprint(\"array1 =\", array1)\nprint(\"trié en array2 =\", array2, \"par triParsTas2\")\nassert sorted(array1) == array2",
"Union-Find\nExercice 15 : Union-Find avec tableaux\nVersion simple avec des tableaux simples.",
"Representant = Union[None, int]\n# Representant = Optional[int]\n\nUnionFind = List[Representant]",
"En OCaml, on pourrait écrire ces types :\nocaml\ntype representant = Aucun | Element of int;; (* [int option] pourrait suffire *)\ntype unionfind = representant array;;",
"def create_uf(n: int) -> UnionFind:\n return [None] * n\n\ndef makeset(uf: UnionFind, i: int) -> None:\n if len(uf) < i:\n uf = uf + [None] * (i - len(uf))\n if uf[i] is None:\n uf[i] = i\n else:\n raise ValueError(\"makeset avec uf = {} et i = {} : impossible d'ajouter i car déja présent\".format(uf, i))",
"L'union est assez rapide aussi :",
"def union(uf: UnionFind, i: int, j: int) -> None:\n n = len(uf)\n if uf[i] is None or uf[j] is None:\n raise ValueErrorrror(\"Élement i = {} ou j = {} absent de l'UnionFind uf = {}\".format(i, j, uf))\n for k in range(n):\n # tous les éléments dont le représentant est j vont avoir comme représentant i\n if uf[k] == j:\n uf[j] = i",
"La recherche est aussi très facile, il suffit de donner la valeur stockée en case i :",
"def find(uf: UnionFind, i: int) -> int:\n if uf[i] is None:\n raise ValueError(\"Élement i = {} absent de l'UnionFind uf = {}\".format(i, uf))\n else:\n return uf[i]",
"Tests :",
"uf_test = create_uf(6)\nprint(\"UnionFind uf vide = {}\".format(uf_test))\n\nfor i in range(0, 5+1):\n makeset(uf_test, i)\nprint(\"UnionFind uf rempli par i=0..5 = {}\".format(uf_test))\n\nprint(\"find(uf_test, 5) =\", find(uf_test, 5))\nunion(uf_test, 0, 1)\nprint(\"Union de 0 et 1 dans uf, désormais uf = {}\".format(uf_test))\n\nunion(uf_test, 2, 3)\nprint(\"Union de 2 et 3 dans uf, désormais uf = {}\".format(uf_test))\n\nunion(uf_test, 1, 5)\nprint(\"Union de 1 et 5 dans uf, désormais uf = {}\".format(uf_test))\n\nprint(\"find(uf_test, 0) =\", find(uf_test, 0))\nprint(\"find(uf_test, 1) =\", find(uf_test, 1))\n\nprint(\"find(uf_test, 0) = find(uf_test, 5) ? \", find(uf_test, 0) == find(uf_test, 5))\nprint(\"find(uf_test, 3) = find(uf_test, 5) ? \", find(uf_test, 3) == find(uf_test, 5))",
"Exercice 16 : Union-Find avec forêts\nVersion avancée avec des forêts.",
"aucun = False\nracine = True\nPosition = Union[bool, int]\n# malheureusement, Python va calculer ce type comme étant int,\n# et n'affichera pas ça comme Position ou Union[bool, int],\n# car bool est un sous type de int\n\nUnionFindForest = List[Position]",
"Créer une union-find vide revient à créer un tableau avec chaque élément n'ayant pas de représentant, donc valant aucun.",
"def create_uf(n: int) -> UnionFindForest:\n return [aucun] * n",
"Rajouter un singleton ${i}$ dans l'union-find revient à mettre la case i du tableau à racine :",
"def makeset(uf: UnionFindForest, i: int) -> None:\n if uf[i] == aucun:\n uf[i] = racine # i devient son propre représentant\n else:\n raise ValueError(\"Élément i = {} déjà présent dans l'UnionFindForest uf = {}\".format(i, uf))",
"La recherche est un peu plus compliquée et on propose une première optimisation, qui va servir à \"aplatir\" la forêt.",
"def find(uf: UnionFindForest, i: int) -> int:\n uf_i = uf[i]\n if uf[i] == aucun:\n raise ValueError(\"Élément i = {} absent de l'UnionFindForest uf = {}\".format(i, uf))\n elif uf[i] == racine: # i est son propre représentant\n return i\n else:\n j = uf[i] # représentant courant de i\n r = find(uf, j) # trouve le représentant de j\n uf[i] = r # modifie la forêt pour faire pointer le représentant de i vers r\n return r",
"Pour l'union, on fait ici le choix arbitraire de préférer la racine de i, on devrait préférer celle de l'arbre le plus petit pour \"écraser\" la forêt. Cf. [Papadimitriou] ou [Cormen] (ou Wikipédia).",
"def union(uf: UnionFindForest, i: int, j: int) -> None:\n if uf[i] == aucun or uf[j] == aucun:\n raise ValueError(\"Élément i = {} ou j = {} absent de l'UnionFindForest uf = {}\".format(i, j, uf))\n else:\n r_i = find(uf, i)\n uf[r_i] = j\n # c'est un des choix possibles, on peut faire l'inverse\n # r_j = find(uf, j)\n # uf[r_j] = i\n # on peut aussi chercher la racine de l'arbre le plus petit\n # pour \"écraser\" (aplatir) la forêt, mais c'est plus compliqué.",
"On vérfie avec le même test que pour la première implémentation :",
"uf_test = create_uf(6)\nprint(\"UnionFindForest uf vide = {}\".format(uf_test))\n\nfor i in range(0, 5+1):\n makeset(uf_test, i)\nprint(\"UnionFindForest uf rempli par i=0..5 = {}\".format(uf_test))\n\nprint(\"find(uf_test, 5) =\", find(uf_test, 5))\nunion(uf_test, 0, 1)\nprint(\"Union de 0 et 1 dans uf, désormais uf = {}\".format(uf_test))\n\nunion(uf_test, 2, 3)\nprint(\"Union de 2 et 3 dans uf, désormais uf = {}\".format(uf_test))\n\nunion(uf_test, 1, 5)\nprint(\"Union de 1 et 5 dans uf, désormais uf = {}\".format(uf_test))\n\nprint(\"find(uf_test, 0) =\", find(uf_test, 0))\nprint(\"find(uf_test, 1) =\", find(uf_test, 1))\n\nprint(\"find(uf_test, 0) = find(uf_test, 5) ? \", find(uf_test, 0) == find(uf_test, 5))\nprint(\"find(uf_test, 3) = find(uf_test, 5) ? \", find(uf_test, 3) == find(uf_test, 5))",
"Exercice 17 : Bonus & discussions\nEn classe.\nJe recommande aussi la lecture de ce document (en anglais), si tout ça vous intéresse et si vous envisagez d'en faire un développement. Ce document contient notamment une analyse bien propre de la complexité amortie de l'opération Find pour l'algorithme optimisé, qui donne une complexité en $\\mathcal{O}(\\alpha(n))$ (pour $n$ valeurs dans la structure Union-Find, et si $\\alpha$ est la fonction inverse d'Ackermann, cf. Theorem 4 page 9).\nBonus : algorithme de Kruskal\nReprésentations de graphe pondérés",
"from typing import Optional\nSommet = int\nPoids = int\nAreteMatrix = Optional[Poids]\nGrapheMatrix = List[List[AreteMatrix]]\n\nDestination = Tuple[Sommet, Poids]\nGrapheList = List[List[Destination]] # liste d'adjacence\n\ndef taille_GrapheMatrix(g: GrapheMatrix) -> int:\n n = len(g)\n assert all([ len(g[i]) == n for i in range(n) ])\n return n\n\ndef taille_GrapheList(g: GrapheList) -> int:\n n = len(g)\n return n",
"Il est facile d'obtenir la liste d'arête, représentées comme un triplet (i, j, p) si l'arête $i \\arrow j$ de poids $p$ est présente dans le graphe.\nPar exemple avec les graphes représentés par listes d'adjancences :",
"Arete = Tuple[Sommet, Sommet, Poids]\n\ndef liste_aretes_GrapheList(g: GrapheList) -> List[Arete]:\n n = taille_GrapheList(g)\n resultat = [\n (i, j, p)\n for i in range(n)\n for (j, p) in g[i]\n ]\n return resultat\n\ngraphe_test: GrapheList = [\n [(1, 11), (2, 2), (3, 1)],\n [(2, 7)],\n [],\n [(4, 5)],\n [(1, 1)]\n]\n\nliste_aretes_GrapheList(graphe_test)",
"L'algorithme de Kruskal a besoin de trier les arêtes selon leur poids, par ordre croissant, et cela se fait facilement avec le fonction sorted de la libraire standard, à laquelle on donne un argument (optionnel) key qui est (ici) une fonction extrayant le poids p du triplet (i, j, p).",
"aretes = liste_aretes_GrapheList(graphe_test)\n\nsorted(aretes,\n key = lambda a: a[2]\n )",
"Algorithme de Kruskal\nJe ne redonne pas d'explications ici, allez voir Wikipédia ou un livre d'algorithmique de référence ([Cormen] ou [Beauquier, Berstel, Chrétienne] par exemple).\nVoir aussi cette visualisation, et cette autre implémentation en Python (donnée pour le cours d'ALGO1 en L3SIF en 2019).\n\n\"L'algorithme de Kruskal est un algorithme de recherche d'arbre recouvrant de poids minimum ou arbre couvrant minimum dans un graphe connexe non-orienté et pondéré. Il a été conçu en 1956 par Joseph Kruskal.\"",
"def kruskal(g: GrapheList) -> List[Arete]:\n aretes = liste_aretes_GrapheList(g)\n aretes = sorted(aretes,\n key = lambda a: a[2]\n )\n n = taille_GrapheList(g)\n uf = create_uf(n)\n for i in range(n):\n makeset(uf, i)\n # uf contient chaque sommet dans des singletons\n resultat = [] # liste des arêtes de l'arbre couvrant\n for (i, j, p) in aretes:\n if (find(uf, i) != find(uf, j)):\n resultat.append((i, j, p))\n union(uf, i, j)\n return resultat",
"Cet algorithme donne bien un arbre couvrant, il faudrait vérifier sa minimalité.",
"graphe_test\n\nkruskal(graphe_test)",
"Conclusion\nFin. À la séance prochaine."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
pysg/pyther
|
Modelo de impregnacion/modelo2/Activité 10_Viernes2-checkpoint.ipynb
|
mit
|
[
"import numpy as np\nimport pandas as pd\nimport math\nimport cmath\nfrom scipy.optimize import root\nfrom scipy.integrate import odeint\nfrom __future__ import division\nfrom scipy import *\nfrom scipy.optimize import curve_fit\nfrom pylab import *\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport pylab as pp\nfrom scipy import integrate, interpolate\nfrom scipy import optimize",
"Evaluation des modèles pour l'extraction supercritique\nL'extraction supercritique est de plus en plus utilisée afin de retirer des matières organiques de différents liquides ou matrices solides. Cela est dû au fait que les fluides supercritiques ont des avantages non négligeables par rapport aux autres solvants, ils ont des caractèreistiques comprises entre celles des gaz et celles des solides. En changeant la température et la pression ils peuvent capter des composés différents, ils sont donc très efficaces. \nLe méchanisme de l'extraction supercritique est le suivant : \n- Transport du fluide vers la particule, en premier lieu sur sa surface et en deuxième lieu a l'intérieur de la particule par diffusion\n- Dissolution du soluté avec le fluide supercritique \n- Transport du solvant de l'intérieur vers la surface de la particule \n- Transport du solvant et des solutés de la surface de la particule vers la masse du solvant \nA - Le modèle de Reverchon : \nAfin d'utiliser ce modèle, définissons les variables qui vont y être admises, ci-dessous la nomenclature du modèle :\n \nLe modèle : \nIl est basé sur l'intégration des bilans de masses différentielles tout le long de l'extraction, avec les hypothèses suivants : \n- L'écoulement piston existe à l'intérieur du lit, comme le montre le schéma ci-contre : \n\n- La dispersion axiale du lit est négligeable\n- Le débit, la température et la pression sont constants\nCela nous permet d'obtenir les équations suivantes :\n- $uV.\\frac{\\partial c_{c}}{\\partial t}+eV.\\frac{\\partial c_{c}}{\\partial t}+ AK(q-q) = 0$\n- $(1-e).V.uV\\frac{\\partial c_{q}}{\\partial t}= -AK(q-q*)$\n\nLes conditions initiales sont les suivantes : C = 0, q=q0 à t = 0 et c(0,t) à h=0\n\nLa phase d'équilibre est : $c = k.q*$\nSachant que le fluide et la phase sont uniformes à chaque stage, nous pouvons définir le modèle en utilisant les équations différentielles ordinaires (2n). Les équations sont les suivantes :\n- $(\\frac{W}{p}).(Cn- Cn-1) + e (\\frac{v}{n}).(\\frac{dcn}{dt})+(1-e).(\\frac{v}{n}).(\\frac{dcn}{dt}) = 0$\n- $(\\frac{dqn}{dt} = - (\\frac{1}{ti})(qn-qn*)$\n- Les conditions initiales sont : cn = 0, qn = q0 à t = 0 \nEjemplo ODE",
"import numpy as np\nfrom scipy import integrate\nfrom matplotlib.pylab import *",
"Ejemplo 2 funciona",
"import numpy as np\nfrom scipy import integrate\nimport matplotlib.pyplot as plt\n\ndef vdp1(t, y):\n return np.array([y[1], (1 - y[0]**2)*y[1] - y[0]])\n\nt0, t1 = 0, 20 # start and end\nt = np.linspace(t0, t1, 100) # the points of evaluation of solution\ny0 = [2, 0] # initial value\ny = np.zeros((len(t), len(y0))) # array for solution\ny[0, :] = y0\n\nr = integrate.ode(vdp1).set_integrator(\"dopri5\") # choice of method\nr.set_initial_value(y0, t0) # initial values\n\nfor i in range(1, t.size):\n y[i, :] = r.integrate(t[i]) # get one more value, add it to the array\n if not r.successful():\n raise RuntimeError(\"Could not integrate\")\n\nplt.plot(t, y)\nplt.show()",
"Fonction\nModelo Reverchon\nMathematical Modeling of Supercritical Extraction of Sage Oil",
"P = 9 #MPa\nT = 323 # K\nQ = 8.83 #g/min\ne = 0.4\nrho = 285 #kg/m3\nmiu = 2.31e-5 # Pa*s\ndp = 0.75e-3 # m\nDl = 0.24e-5 #m2/s\nDe = 8.48e-12 # m2/s\nDi = 6e-13\nu = 0.455e-3 #m/s\nkf = 1.91e-5 #m/s\nde = 0.06 # m\nW = 0.160 # kg\nkp = 0.2\n\nr = 0.31 #m\n\nn = 10\nV = 12\n\n#C = kp * qE\nC = 0.1\nqE = C / kp\n\nCn = 0.05\nCm = 0.02\n\n\nt = np.linspace(0,10, 1)\n\nti = (r ** 2) / (15 * Di)\n\n\ndef reverchon(x,t):\n \n #Ecuaciones diferenciales del modelo Reverchon \n #dCdt = - (n/(e * V)) * (W * (Cn - Cm) / rho + (1 - e) * V * dqdt)\n #dqdt = - (1 / ti) * (q - qE)\n \n q = x[0]\n C = x[1]\n qE = C / kp\n dqdt = - (1 / ti) * (q - qE)\n dCdt = - (n/(e * V)) * (W * (C - Cm) / rho + (1 - e) * V * dqdt)\n \n return [dqdt, dCdt] \n\n\nreverchon([1, 2], 0)\n\nx0 = [0, 0]\nt = np.linspace(0, 3000, 500)\n\nresultado = odeint(reverchon, x0, t)\n\nqR = resultado[:, 0]\nCR = resultado[:, 1]\nplt.plot(t, CR)\nplt.title(\"Modelo Reverchon\")\nplt.xlabel(\"t [=] min\")\nplt.ylabel(\"C [=] $kg/m^3$\")\n\nx0 = [0, 0]\nt = np.linspace(0, 3000, 500)\n\nresultado = odeint(reverchon, x0, t)\n\nqR = resultado[:, 0]\nCR = resultado[:, 1]\nplt.plot(t, qR)\nplt.title(\"Modelo Reverchon\")\nplt.xlabel(\"t [=] min\")\nplt.ylabel(\"C solid–fluid interface [=] $kg/m^3$\")\n\nprint(CR)\n\nr = 0.31 #m\nx0 = [0, 0]\nt = np.linspace(0, 3000, 500)\n\nresultado = odeint(reverchon, x0, t)\n\nqR = resultado[:, 0]\nCR = resultado[:, 1]\nplt.plot(t, CR)\nplt.title(\"Modelo Reverchon\")\nplt.xlabel(\"t [=] min\")\nplt.ylabel(\"C [=] $kg/m^3$\")\n\nr = 0.231 #m\nx0 = [0, 0]\nt = np.linspace(0, 3000, 500)\n\nresultado = odeint(reverchon, x0, t)\n\nqR = resultado[:, 0]\nCR = resultado[:, 1]\nplt.plot(t, CR)\nplt.title(\"Modelo Reverchon\")\nplt.xlabel(\"t [=] min\")\nplt.ylabel(\"C [=] $kg/m^3$\")\n\nfig,axes=plt.subplots(2,2)\naxes[0,0].plot(t,CR)\naxes[1,0].plot(t,qR)\n",
"Trabajo futuro\n\nRealizar modificaciones de los parametros para observar cómo afectan al comportamiento del modelo.\nRealizar un ejemplo de optimización de parámetros utilizando el modelo de Reverchon.\n\nReferencias\n[1] E. Reverchon, Mathematical modelling of supercritical extraction of sage oil, AIChE J. 42 (1996) 1765–1771.\nhttps://onlinelibrary.wiley.com/doi/pdf/10.1002/aic.690420627\n[2] Amit Rai, Kumargaurao D.Punase, Bikash Mohanty, Ravindra Bhargava, Evaluation of models for supercritical fluid extraction, International Journal of Heat and Mass Transfer Volume 72, May 2014, Pages 274-287. https://www.sciencedirect.com/science/article/pii/S0017931014000398\nAjuste de parámetros con ODEs: modelo Reverchon\nExplicaciones : \n- Poner los datos experimentales \n- Definir las ecuaciones diferenciales ordinarias del systema con los diferentes parametros\n- Calcular el valor de la ecuacion diferencial en cada punto, se necesita una otra funcion para integrar la ecuacion \n- Despues tenemos que definir una funcion de minimos cuadrados para cada valor de y : minimos cuadrados es una tecnica de analisis numerico enmarcada dentro de la optimizacion matematica y se intenta encontrar la funcion continua entre los variables independentes y dependentes \n- Para resolverlo se necesita las varoles iniciales para los parametros y los ecuacions ordinarias, para obtener los paramètros de la funcion. Despues se necesita hacer una interpolacion para los valores de las ODEs y para hacerlo se usa splines (spline es una funcion definida per partes por los polynomios), en Python splines es un método que se usa cuando hay problemas de interpolacion.",
"\n\n#Datos experimentales\nx_data = np.linspace(0,9,10)\ny_data = np.array([0.000,0.416,0.489,0.595,0.506,0.493,0.458,0.394,0.335,0.309])\n\ndef f(y, t, k): \n \"\"\" sistema de ecuaciones diferenciales ordinarias \"\"\"\n return (-k[0]*y[0], k[0]*y[0]-k[1]*y[1], k[1]*y[1])\n\ndef my_ls_func(x,teta):\n f2 = lambda y, t: f(y, t, teta)\n # calcular el valor de la ecuación diferencial en cada punto\n r = integrate.odeint(f2, y0, x)\n return r[:,1]\n\ndef f_resid(p):\n # definir la función de minimos cuadrados para cada valor de y\"\"\"\n \n return y_data - my_ls_func(x_data,p)\n\n#resolver el problema de optimización\nguess = [0.2, 0.3] #valores inicales para los parámetros\ny0 = [1,0,0] #valores inciales para el sistema de ODEs\n(c, kvg) = optimize.leastsq(f_resid, guess) #get params\n\nprint(\"parameter values are \",c)\n\n# interpolar los valores de las ODEs usando splines\nxeval = np.linspace(min(x_data), max(x_data),30) \ngls = interpolate.UnivariateSpline(xeval, my_ls_func(xeval,c), k=3, s=0)\n\n\nxeval = np.linspace(min(x_data), max(x_data), 200)\n#Gráficar los resultados\npp.plot(x_data, y_data,'.r',xeval,gls(xeval),'-b')\npp.xlabel('t [=] min',{\"fontsize\":16})\npp.ylabel(\"C\",{\"fontsize\":16})\npp.legend(('Datos','Modelo'),loc=0)\npp.show()\n\nf_resid(guess)\n\n#Datos experimentales\nx_data = np.linspace(0,9,10)\ny_data = np.array([0.000,0.416,0.489,0.595,0.506,0.493,0.458,0.394,0.335,0.309])\nprint(y_data)\n\n# def f(y, t, k): \n# \"\"\" sistema de ecuaciones diferenciales ordinarias \"\"\"\n \n# return (-k[0]*y[0], k[0]*y[0]-k[1]*y[1], k[1]*y[1])\ndef reverchon(x,t,Di):\n \n #Ecuaciones diferenciales del modelo Reverchon \n #dCdt = - (n/(e * V)) * (W * (Cn - Cm) / rho + (1 - e) * V * dqdt)\n #dqdt = - (1 / ti) * (q - qE)\n \n q = x[0]\n C = x[1]\n qE = C / kp\n ti = (r**2) / (15 * Di)\n dqdt = - (1 / ti) * (q - qE)\n dCdt = - (n/(e * V)) * (W * (C - Cm) / rho + (1 - e) * V * dqdt)\n \n return [dqdt, dCdt] \n\ndef my_ls_func(x,teta):\n f2 = lambda y, t: reverchon(y, t, teta)\n # calcular el valor de la ecuación diferencial en cada punto\n rr = integrate.odeint(f2, y0, x)\n print(f2)\n return rr[:,1]\n\ndef f_resid(p):\n # definir la función de minimos cuadrados para cada valor de y\"\"\"\n \n return y_data - my_ls_func(p,x_data)\n\n#resolver el problema de optimización\nguess = np.array([0.2]) #valores inicales para los parámetros\ny0 = [0,0] #valores inciales para el sistema de ODEs\n(c, kvg) = optimize.leastsq(f_resid, guess) #get params\n\nprint(\"parameter values are \",c)\n\n# interpolar los valores de las ODEs usando splines\nxeval = np.linspace(min(x_data), max(x_data),30) \ngls = interpolate.UnivariateSpline(xeval, my_ls_func(xeval,c), k=3, s=0)\n\n\nxeval = np.linspace(min(x_data), max(x_data), 200)\n#Gráficar los resultados\npp.plot(x_data, y_data,'.r',xeval,gls(xeval),'-b')\npp.xlabel('t [=] min',{\"fontsize\":16})\npp.ylabel(\"C\",{\"fontsize\":16})\npp.legend(('Datos','Modelo'),loc=0)\npp.show()\n\ndef my_ls_func(x,teta):\n f2 = lambda y, t: reverchon(y, t, teta)\n # calcular el valor de la ecuación diferencial en cada punto\n r = integrate.odeint(f2, y0, x)\n print(f2)\n return r[:,1]\n\nmy_ls_func(y0,guess)\n\nf_resid(guess)",
"Méthode des moindres carrés"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
bretthandrews/marvin
|
docs/sphinx/jupyter/my-first-query.ipynb
|
bsd-3-clause
|
[
"My First Query\nOne of the most powerful features of Marvin 2.0 is ability to query the newly created DRP and DAP databases. You can do this in two ways:\n1. via the Marvin-web Search page or\n2. via Python (in the terminal/notebook/script) with Marvin-tools.\nThe best part is that both interfaces use the same underlying query structure, so your input search will be the same. Here we will run a few queries with Marvin-tools to learn the basics of how to construct a query and also test drive some of the more advanced features that are unique to the Marvin-tools version of querying.",
"# Python 2/3 compatibility\nfrom __future__ import print_function, division, absolute_import\n\nfrom marvin import config\nconfig.mode = 'remote'\nconfig.setRelease('MPL-4')\n\nfrom marvin.tools.query import Query",
"Let's search for galaxies with M$\\star$ > 3 $\\times$ 10$^{11}$ M$\\odot$.\nTo specify our search parameter, M$_\\star$, we must know the database table and name of the parameter. In this case, MaNGA uses the NASA-Sloan Atlas (NSA) for target selection so we will use the Sersic profile determination for stellar mass, which is the sersic_mass parameter of the nsa table, so our search parameter will be nsa.sersic_mass. You can also use nsa.sersic_logmass\nGenerically, the search parameter will take the form table.parameter.",
"myquery1 = 'nsa.sersic_mass > 3e11'\n# or\nmyquery1 = 'nsa.sersic_logmass > 11.47'\n\nq1 = Query(searchfilter=myquery1)\nr1 = q1.run()",
"Running the query produces a Results object (r1):",
"# show results\nr1.results",
"We will learn how to use the features of our Results object a little bit later, but first let's revise our search to see how more complex search queries work.\nMultiple Search Criteria\nLet's add to our search to find only galaxies with a redshift less than 0.1.\nRedshift is the z parameter and is also in the nsa table, so its full search parameter designation is nsa.z.",
"myquery2 = 'nsa.sersic_mass > 3e11 AND nsa.z < 0.1'\n\nq2 = Query(searchfilter=myquery2)\nr2 = q2.run()\n\nr2.results",
"Compound Search Statements\nWe were hoping for a few more than 3 galaxies, so let's try to increase our search by broadening the criteria to also include galaxies with 127 fiber IFUs and a b/a ratio of at least 0.95.\nTo find 127 fiber IFUs, we'll use the name parameter of the ifu table, which means the full search parameter is ifu.name. However, ifu.name returns the IFU design name, such as 12701, so we need to to set the value to 127*.\nThe b/a ratio is in nsa table as the ba90 parameter.\nWe're also going to join this to or previous query with an OR operator and use parentheses to group our individual search statements into a compound search statement.",
"myquery3 = '(nsa.sersic_mass > 3e11 AND nsa.z < 0.1) OR (ifu.name=127* AND nsa.ba90 >= 0.95)'\n\nq3 = Query(searchfilter=myquery3)\nr3 = q3.run()\n\nr3.results",
"Design Your Own Search\nOK, now it's your turn to try designing a search.\nExercise: Write a search filter that will find galaxies with a redshift less than 0.02 that were observed with the 1901 IFU?",
"# Enter your search here",
"You should get 8 results:\n[NamedTuple(mangaid='1-22438', plate=7992, name='1901', z=0.016383046284318),\n NamedTuple(mangaid='1-113520', plate=7815, name='1901', z=0.0167652331292629),\n NamedTuple(mangaid='1-113698', plate=8618, name='1901', z=0.0167444702237844),\n NamedTuple(mangaid='1-134004', plate=8486, name='1901', z=0.0185601413249969),\n NamedTuple(mangaid='1-155903', plate=8439, name='1901', z=0.0163660924881697),\n NamedTuple(mangaid='1-167079', plate=8459, name='1901', z=0.0157109703868628),\n NamedTuple(mangaid='1-209729', plate=8549, name='1901', z=0.0195561610162258),\n NamedTuple(mangaid='1-277339', plate=8254, name='1901', z=0.0192211158573627)]\nFinding the Available Parameters\nNow you might want to go out and try all of the interesting queries that you've been saving up, but you don't know what the parameters are called or what database table they are in. \nYou can find all of the availabale parameters by:\n1. clicking on in the Return Parameters dropdown menu on the left side of the Marvin-web Search page,\n2. reading the Marvin Docs page, or\n3. via Marvin-tools (see next two cells)",
"# You might have to do an svn update to get this to work (otherwise try the next cell)\nq = Query()\nq.get_available_params()\n\n# try this if the previous cell didn't return a list of parameters\nfrom marvin.api.api import Interaction\nfrom pprint import pprint\nurl = config.urlmap['api']['getparams']['url']\nii = Interaction(route=url)\nmykeys = ii.getData()\npprint(mykeys)",
"Go ahead and try to create some new searches on your own from the parameter list. Please feel free to also try out the some of the same search on the Marvin-web Search page.\nReturning Bonus Parameters\nOften you want to run a query and see the value of parameters that you didn't explicitly search on. For instance, you want to find galaxies above a redshift of 0.1 and would like to know their RA and DECs.\nIn Marvin-tools, this is as easy as specifying the returnparams option with either a string (for a single bonus parameter) or a list of strings (for multiple bonus parameters).",
"myquery5 = 'nsa.z > 0.1'\nbonusparams5 = ['cube.ra', 'cube.dec']\n# bonusparams5 = 'cube.ra' # This works too\n\nq5 = Query(searchfilter=myquery5, returnparams=bonusparams5)\nr5 = q5.run()\n\nr5.results",
"Next time, we'll take a closer look at the Results class and its built in MaNGA convenience functions.\nUseful Resources\nCheck out these pages on the Marvin Docs site for more Query tips and tricks.\n\nQuery\nExample Queries\nResults \nBoolean Search Tutorial"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
metpy/MetPy
|
v1.0/_downloads/c1a3b4ec1d09d4debc078297d433a9b2/Point_Interpolation.ipynb
|
bsd-3-clause
|
[
"%matplotlib inline",
"Point Interpolation\nCompares different point interpolation approaches.",
"import cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nfrom matplotlib.colors import BoundaryNorm\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom metpy.cbook import get_test_data\nfrom metpy.interpolate import (interpolate_to_grid, remove_nan_observations,\n remove_repeat_coordinates)\nfrom metpy.plots import add_metpy_logo\n\ndef basic_map(proj, title):\n \"\"\"Make our basic default map for plotting\"\"\"\n fig = plt.figure(figsize=(15, 10))\n add_metpy_logo(fig, 0, 80, size='large')\n view = fig.add_axes([0, 0, 1, 1], projection=proj)\n view.set_title(title)\n view.set_extent([-120, -70, 20, 50])\n view.add_feature(cfeature.STATES.with_scale('50m'))\n view.add_feature(cfeature.OCEAN)\n view.add_feature(cfeature.COASTLINE)\n view.add_feature(cfeature.BORDERS, linestyle=':')\n return fig, view\n\n\ndef station_test_data(variable_names, proj_from=None, proj_to=None):\n with get_test_data('station_data.txt') as f:\n all_data = np.loadtxt(f, skiprows=1, delimiter=',',\n usecols=(1, 2, 3, 4, 5, 6, 7, 17, 18, 19),\n dtype=np.dtype([('stid', '3S'), ('lat', 'f'), ('lon', 'f'),\n ('slp', 'f'), ('air_temperature', 'f'),\n ('cloud_fraction', 'f'), ('dewpoint', 'f'),\n ('weather', '16S'),\n ('wind_dir', 'f'), ('wind_speed', 'f')]))\n\n all_stids = [s.decode('ascii') for s in all_data['stid']]\n\n data = np.concatenate([all_data[all_stids.index(site)].reshape(1, ) for site in all_stids])\n\n value = data[variable_names]\n lon = data['lon']\n lat = data['lat']\n\n if proj_from is not None and proj_to is not None:\n\n try:\n\n proj_points = proj_to.transform_points(proj_from, lon, lat)\n return proj_points[:, 0], proj_points[:, 1], value\n\n except Exception as e:\n\n print(e)\n return None\n\n return lon, lat, value\n\n\nfrom_proj = ccrs.Geodetic()\nto_proj = ccrs.AlbersEqualArea(central_longitude=-97.0000, central_latitude=38.0000)\n\nlevels = list(range(-20, 20, 1))\ncmap = plt.get_cmap('magma')\nnorm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)\n\nx, y, temp = station_test_data('air_temperature', from_proj, to_proj)\n\nx, y, temp = remove_nan_observations(x, y, temp)\nx, y, temp = remove_repeat_coordinates(x, y, temp)",
"Scipy.interpolate linear",
"gx, gy, img = interpolate_to_grid(x, y, temp, interp_type='linear', hres=75000)\nimg = np.ma.masked_where(np.isnan(img), img)\nfig, view = basic_map(to_proj, 'Linear')\nmmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm)\nfig.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)",
"Natural neighbor interpolation (MetPy implementation)\nReference <https://github.com/Unidata/MetPy/files/138653/cwp-657.pdf>_",
"gx, gy, img = interpolate_to_grid(x, y, temp, interp_type='natural_neighbor', hres=75000)\nimg = np.ma.masked_where(np.isnan(img), img)\nfig, view = basic_map(to_proj, 'Natural Neighbor')\nmmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm)\nfig.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)",
"Cressman interpolation\nsearch_radius = 100 km\ngrid resolution = 25 km\nmin_neighbors = 1",
"gx, gy, img = interpolate_to_grid(x, y, temp, interp_type='cressman', minimum_neighbors=1,\n hres=75000, search_radius=100000)\nimg = np.ma.masked_where(np.isnan(img), img)\nfig, view = basic_map(to_proj, 'Cressman')\nmmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm)\nfig.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)",
"Barnes Interpolation\nsearch_radius = 100km\nmin_neighbors = 3",
"gx, gy, img1 = interpolate_to_grid(x, y, temp, interp_type='barnes', hres=75000,\n search_radius=100000)\nimg1 = np.ma.masked_where(np.isnan(img1), img1)\nfig, view = basic_map(to_proj, 'Barnes')\nmmb = view.pcolormesh(gx, gy, img1, cmap=cmap, norm=norm)\nfig.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)",
"Radial basis function interpolation\nlinear",
"gx, gy, img = interpolate_to_grid(x, y, temp, interp_type='rbf', hres=75000, rbf_func='linear',\n rbf_smooth=0)\nimg = np.ma.masked_where(np.isnan(img), img)\nfig, view = basic_map(to_proj, 'Radial Basis Function')\nmmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm)\nfig.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)\n\nplt.show()"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
google-research/google-research
|
jax_dft/examples/recover_potential_from_density_and_energy.ipynb
|
apache-2.0
|
[
"<a href=\"https://colab.research.google.com/github/google-research/google-research/blob/master/jax_dft/examples/recover_potential_from_density_and_energy.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\nCopyright 2020 Google LLC.\nLicensed under the Apache License, Version 2.0 (the \"License\");",
"#@title Default title text\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"Setup",
"!pip install --upgrade jax jaxlib\n\n# Install jax-dft\n!git clone https://github.com/google-research/google-research.git\n!pip install google-research/jax_dft\n\nimport jax\nfrom jax.config import config\nimport jax.numpy as jnp\nfrom jax_dft import scf\nfrom jax_dft import utils\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Set the default dtype as float64\nconfig.update('jax_enable_x64', True)\n\n# Set plot style\nimport matplotlib as mpl\n\nCOLORS = [\n '#0072b2',\n '#de8f05',\n '#009e73',\n '#cc79a7',\n '#a24f00',\n '#9467bd',\n '#56b4e9',\n '#bcbd22',\n '#7f7f7f',\n]\n\n\ndef set_matplotlib_style():\n \"\"\"Sets the matplotlib style for the colab notebook.\"\"\"\n mpl.rcParams['image.cmap'] = 'inferno'\n # Set width and size for lines and markers.\n mpl.rcParams['lines.linewidth'] = 2.5\n mpl.rcParams['lines.markersize'] = 9\n mpl.rcParams['lines.markeredgewidth'] = 0\n # Set fontsize.\n mpl.rcParams['font.size'] = 18\n mpl.rcParams['axes.labelsize'] = 20\n mpl.rcParams['axes.titlesize'] = 20\n mpl.rcParams['axes.formatter.useoffset'] = False\n mpl.rcParams['legend.fontsize'] = 14\n mpl.rcParams['xtick.labelsize'] = 14\n mpl.rcParams['ytick.labelsize'] = 14\n # default plot colors recommended by @zan\n # adapted from the seaborn colorblind colors.\n # https://seaborn.pydata.org/tutorial/color_palettes.html\n # Purposely avoids red both for colorblind reasons\n # and because red often reads as more important/bad or more eye-catching than\n # other colors.\n # These colors have been vetted by an individual with red/green color\n # confusion.\n mpl.rcParams['axes.prop_cycle'] = mpl.cycler(color=COLORS)\n mpl.rcParams['savefig.dpi'] = 120\n mpl.rcParams['savefig.bbox'] = 'tight'\n\nset_matplotlib_style()\n\ndef show_density_potential(\n grids, density, potential, do_show=True, grey=False, axs=None):\n if axs is None:\n _, axs = plt.subplots(nrows=2)\n axs[0].plot(grids, density, c='0.5' if grey else COLORS[0])\n axs[1].plot(grids, potential, c='0.5' if grey else COLORS[1])\n axs[0].set_ylabel(r'$n(x)$')\n axs[1].set_ylabel(r'$v(x)$')\n axs[1].set_xlabel(r'$x$')\n if do_show:\n plt.show()",
"Run\nDefine grids",
"grids = np.linspace(-5, 5, 201)\ndx = utils.get_dx(grids)",
"Quantum Harmonic Oscillator\n$v(x)=\\frac{1}{2}k x^2$, where $k=1$.\nThe ground state energy is $0.5$ Hartree.",
"qho_potential = 0.5 * grids ** 2\n\nqho_density, qho_energy, _ = (\n scf.solve_noninteracting_system(\n qho_potential,\n num_electrons=1,\n grids=grids))\n\nprint(f'total energy: {qho_energy}')\nshow_density_potential(grids, qho_density, qho_potential, grey=True)",
"Perturbed Quantum Harmonic Oscillator\nLet's add a perturbation on the potential. We will see that the density is of course not the original density.",
"perturbed_potential = qho_potential + np.exp(-(grids - 0.5) ** 2 / 0.04)\nperturbed_density, perturbed_energy, _ = (\n scf.solve_noninteracting_system(\n perturbed_potential,\n num_electrons=1,\n grids=grids))\n\nprint(f'total energy: {perturbed_energy}')\n_, axs = plt.subplots(nrows=2)\nshow_density_potential(\n grids, qho_density, qho_potential, grey=True, do_show=False, axs=axs)\nshow_density_potential(\n grids, perturbed_density, perturbed_potential, axs=axs)",
"Adjust potential from loss\n$L=\\int (n - n_\\mathrm{QHO})^2 dx + (E - E_\\mathrm{QHO})^2$",
"# Note the use of `jnp` not `np` here.\ndef density_loss(output, target):\n return jnp.sum((output - target) ** 2) * dx\n\ndef energy_loss(output, target):\n return (output - target) ** 2\n\nprint(f'Current density loss {density_loss(perturbed_density, qho_density)}')\nprint(f'Current energy loss {energy_loss(perturbed_energy, qho_energy)}')\nprint(f'Current total loss {density_loss(perturbed_density, qho_density) + energy_loss(perturbed_energy, qho_energy)}')\n\ndef loss_fn(potential):\n density, energy, _ = scf.solve_noninteracting_system(\n potential, num_electrons=1, grids=grids)\n return density_loss(density, qho_density) + energy_loss(energy, qho_energy)\n\nprint(f'Loss with perturbed potential {loss_fn(perturbed_potential)}')\nprint(f'Loss with QHO potential {loss_fn(qho_potential)}')",
"You can get the gradient $\\frac{\\partial L_n}{\\partial v}$ via automatic differentiation from jax.grad",
"grad_fn = jax.jit(jax.grad(loss_fn)) # Compile with jit for fast grad.\n\nplt.plot(grids, grad_fn(perturbed_potential), '--', c=COLORS[2])\nplt.xlabel(r'$x$')\nplt.ylabel(r'$\\frac{\\partial L_n}{\\partial v}$')\nplt.show()",
"Now we have the gradient. Let's update the potential from the graident of loss with respect to the potential.\n$$v\\leftarrow v - \\epsilon\\frac{\\partial L}{\\partial v}$$",
"potential = perturbed_potential\nloss_history = []\npotential_history = []\nrecord_interval = 1000\nfor i in range(5001):\n if i % record_interval == 0:\n loss_value = loss_fn(potential)\n print(f'step {i}, loss {loss_value}')\n loss_history.append(loss_value)\n potential_history.append(potential)\n potential -= 30 * grad_fn(potential)",
"Visualize the learning curve",
"history_size = len(loss_history)\n\nplt.plot(np.arange(history_size) * record_interval, loss_history)\nplt.axhline(y=0, color='0.5', ls='--')\nplt.xlabel('step')\nplt.ylabel(r'$L$')\nplt.show()",
"and how the potential and corresponding density change.",
"_, axs = plt.subplots(\n nrows=2, ncols=history_size, figsize=(2.5 * history_size, 4),\n sharex=True, sharey='row')\nfor i, ax in enumerate(axs[0]):\n ax.plot(grids, qho_density, c='0.5')\n density, _, _ = scf.solve_noninteracting_system(\n potential_history[i], num_electrons=1, grids=grids)\n ax.plot(grids, density, '--', c=COLORS[0])\n ax.set_title(rf'$L=${loss_fn(potential_history[i]):1.1e}')\n\nfor i, ax in enumerate(axs[1]):\n ax.plot(grids, qho_potential, c='0.5')\n ax.plot(grids, potential_history[i], '--', c=COLORS[1])\n\n# Zoom in the potential.\naxs[1][0].set_xlim(-2, 2)\naxs[1][0].set_ylim(0.01, 3)\naxs[0][0].set_ylabel(r'$n(x)$')\naxs[1][0].set_ylabel(r'$v(x)$')\nplt.show()",
"Visualize the final result.",
"optimized_potential = potential_history[-1]\noptimized_density, optimized_total_eigen_energies, _ = (\n scf.solve_noninteracting_system(\n optimized_potential,\n num_electrons=1,\n grids=grids))\n\nprint(f'total energy: {optimized_total_eigen_energies}')\n\n_, axs = plt.subplots(nrows=2)\naxs[0].plot(grids, optimized_density - qho_density, c=COLORS[0])\naxs[0].set_ylabel(r'$\\Delta n(x)$')\naxs[1].plot(grids, optimized_potential - qho_potential, c=COLORS[1])\naxs[1].set_ylabel(r'$\\Delta v(x)$')\naxs[1].set_xlabel(r'$x$')\nplt.show()\n\n_, axs = plt.subplots(nrows=2)\nshow_density_potential(\n grids, qho_density, qho_potential, grey=True, do_show=False, axs=axs)\nshow_density_potential(\n grids, optimized_density, optimized_potential, axs=axs)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
Neuroglycerin/neukrill-net-work
|
notebooks/Alexnet based 40 aug visualisation.ipynb
|
mit
|
[
"import io\nimport json\nimport os.path\nimport theano as th\nimport numpy as np\nimport PIL.Image as img\nimport matplotlib.pyplot as plt\nfrom pylearn2.utils.serial import load as load_model\nfrom pylearn2.gui.get_weights_report import get_weights_report\nimport neukrill_net.image_directory_dataset as idd\nimport neukrill_net.encoding as enc\nfrom IPython.display import display, Image\n%matplotlib inline\n\nMODEL_PICKLE_PATH = '/home/mgraham/alexnet_based_40aug.pkl'\nSETTINGS_PATH = '/home/mgraham/projects/neukrill-net-work/settings.json'\nRUN_SETTINGS_PATH = '/home/mgraham/projects/neukrill-net-work/run_settings/alexnet_based_40aug.json'\nWEIGHT_IMAGE_SCALE = 8\nLAYER_ACTIV_SCALE = 2\nN_CONV_LAYER_ROWS = 8\nN_TEST_IMS = 200\nSEED = 1234\n\nmodel = load_model(os.path.expandvars(MODEL_PICKLE_PATH))\n\ninput_space = model.get_input_space()\ninput_axes = input_space.axes\ninput_height, input_width = input_space.shape\nwith open(RUN_SETTINGS_PATH, 'r') as f:\n run_settings = json.load(f)\nwith open(SETTINGS_PATH, 'r') as f:\n settings = json.load(f)\nif run_settings.has_key('prepreprocessing'):\n prepreprocessing = run_settings['prepreprocessing']\nelse:\n prepreprocessing = {'resize' : [input_height, input_width], 'resize_order': 1,\n 'normalise' : run_settings['preprocessing']['normalise']}\nnormalise_mu = prepreprocessing['normalise']['mu']\nnormalise_sigma = prepreprocessing['normalise']['sigma']\nprng = np.random.RandomState(SEED)\n\ndataset = idd.ListDataset(transformer=lambda x: None, settings_path=SETTINGS_PATH, \n run_settings_path=RUN_SETTINGS_PATH, \n training_set_mode='test', force=True,\n prepreprocessing=prepreprocessing)",
"Model summary",
"print('## Model structure summary\\n')\nprint(model)\nparams = model.get_params() \nn_params = {p.name : p.get_value().size for p in params}\ntotal_params = sum(n_params.values())\nprint('\\n## Number of parameters\\n')\nprint(' ' + '\\n '.join(['{0} : {1} ({2:.1f}%)'.format(k, v, 100.*v/total_params) \n for k, v in sorted(n_params.items(), key=lambda x: x[0])]))\nprint('\\nTotal : {0}'.format(total_params))",
"Train and valid set NLL trace",
"tr = np.array(model.monitor.channels['valid_y_nll'].time_record) / 3600.\nfig = plt.figure(figsize=(12,8))\nax1 = fig.add_subplot(111)\nax1.plot(model.monitor.channels['valid_y_nll'].val_record)\nax1.plot(model.monitor.channels['train_y_nll'].val_record)\nax1.set_xlabel('Epochs')\nax1.legend(['Valid', 'Train'])\nax1.set_ylabel('NLL')\nax1.set_ylim(0., 5.)\nax1.grid(True)\nax2 = ax1.twiny()\nax2.set_xticks(np.arange(0,tr.shape[0],20))\nax2.set_xticklabels(['{0:.2f}'.format(t) for t in tr[::20]])\nax2.set_xlabel('Hours')\nprint(\"Minimum validation set NLL {0}\".format(min(model.monitor.channels['valid_y_nll'].val_record)))",
"Visualising first layer weights",
"pv = get_weights_report(model=model)\nw_img = pv.get_img()\nw_img = w_img.resize((WEIGHT_IMAGE_SCALE*w_img.size[0], WEIGHT_IMAGE_SCALE*w_img.size[1]))\nw_img_data = io.BytesIO()\nw_img.save(w_img_data, format='png')\ndisplay(Image(data=w_img_data.getvalue(), format='png'))",
"Visualising activitations for example test images\nPlot an example image to check loaded correctly",
"plt.imshow(dataset.X[0])",
"Compile theano function for forward propagating through network and getting all layer activations",
"X = model.get_input_space().make_theano_batch()\nY = model.fprop( X, True )\nmodel_activ_func = th.function([X], Y)\n\ntest_idx = prng.choice(len(dataset.X), N_TEST_IMS, False)\ninput_arrs = np.array([dataset.X[i].astype(np.float32).reshape(input_height, input_width, 1) for i in test_idx])\ninput_arrs = (input_arrs - normalise_mu)/normalise_sigma\nif input_axes == ('c', 0, 1, 'b'):\n input_arrs = input_arrs.transpose((3,1,2,0))\ntrue_labels = [int(np.where(y)[0]) for y in dataset.y[test_idx]]\nactivs = model_activ_func(input_arrs)\n\ndef construct_activity_mosaic(layer_activ, pad=1, margin=5, n_rows=None):\n n_channels, w, h = layer_activ.shape\n if n_rows is None:\n n_rows = int(n_channels**0.5)\n n_cols = int(((1.*n_channels)/n_rows)+0.5)\n assert n_rows * n_cols >= n_channels, \"n_rows * n_cols ({0}) < n_channels ({1})\".format(n_rows*n_cols, n_channels)\n width = n_cols * (w + pad) - pad + 2 * margin\n height = n_rows * (h + pad) - pad + 2 * margin\n mosaic = np.ones((height, width))\n x, y = margin, margin\n r, c = 0, 0\n for i in range(n_channels):\n mosaic[y:y+h, x:x+w] = layer_activ[i].T\n x += w + pad\n c += 1\n if c == n_cols:\n c = 0\n r += 1\n y += h + pad\n x = margin\n return mosaic\n\nnorm_conv_activs = [activ[:,:,:,:] for activ in activs[:5]]\nnorm_conv_activs = [activ - activ.min(axis=(2,3))[:,:,None,None] for activ in norm_conv_activs]\nnorm_conv_activs = [activ / activ.max(axis=(2,3))[:,:,None,None] for activ in norm_conv_activs]\nnorm_fc_activ = activs[5] - activs[5].min()\nnorm_fc_activ = norm_fc_activ / norm_fc_activ.max()\nsoftmax_activ = activs[-1]\n\ni = 175\ntrue_y = true_labels[i]\n# input image\ninput_arr = input_arrs[:,:,:,i].reshape(1, input_height, input_width) * normalise_sigma + normalise_mu\ninput_arr = construct_activity_mosaic(1.-input_arr, 0, 1)\ninput_im = img.fromarray(np.uint8((1.-input_arr)*255))\ninput_im = input_im.resize((LAYER_ACTIV_SCALE*input_im.size[0], LAYER_ACTIV_SCALE*input_im.size[1]))\ninput_data = io.BytesIO()\ninput_im.save(input_data, format='png')\ndisplay(Image(data=input_data.getvalue(), format='png'))\n# convolutional layers\nfor norm_conv_activ in norm_conv_activs:\n mosaic_arr = construct_activity_mosaic(norm_conv_activ[i], 2, 5, N_CONV_LAYER_ROWS)\n mosaic_im = img.fromarray(np.uint8((mosaic_arr)*255))\n mosaic_im = mosaic_im.resize((LAYER_ACTIV_SCALE*mosaic_im.size[0], LAYER_ACTIV_SCALE*mosaic_im.size[1]))\n mosaic_data = io.BytesIO()\n mosaic_im.save(mosaic_data, format='png')\n display(Image(data=mosaic_data.getvalue(), format='png'))\n# fc layer\nlayer_arr = construct_activity_mosaic(norm_fc_activ[i,:].reshape(-1, 16, 1), 0, 2)\nlayer_im = img.fromarray(np.uint8((1-layer_arr)*255))\nlayer_im = layer_im.resize((2*LAYER_ACTIV_SCALE*layer_im.size[0], 2*LAYER_ACTIV_SCALE*layer_im.size[1]))\nlayer_data = io.BytesIO()\nlayer_im.save(layer_data, format='png')\ndisplay(Image(data=layer_data.getvalue(), format='png'))\n# softmax\nfig = plt.figure(figsize=(1, 10))\nax1 = fig.add_subplot(111)\nax1.barh(np.arange(softmax_activ[i,:].shape[0]), softmax_activ[i,:], 1., fill=True)\nax1.set_xticks([0, 1.])\nax1.set_yticks([])\nax1.annotate(\"True\", size=15,\n xy=(0.0, true_y+0.5), xycoords='data',\n xytext=(-0.9, true_y+0.5), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",connectionstyle=\"arc3\"),\n )\nplt.show()\n\npred_labels = np.array([np.argmax(softmax_activ[i]) for i in range(len(test_idx))])\n\nnp.arange(len(test_idx))[pred_labels != true_labels]\n\ntrue_y\n\npred_labels[i]\n\nsettings['classes'][37]"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
danijel3/ASRDemos
|
notebooks/MLP_Keras.ipynb
|
apache-2.0
|
[
"Simple MLP demo\nThis notebook demonstrates how to create a simple MLP for recognizing phonemes from speech. To do this, we will use a training dataset prepared in a different notebook titled VoxforgeDataPrep, so take a look at that before you start working on this demo.\nIn this example, we will use the excellent Keras library which depends upon either Theano or TensorFlow, so you will need to install those as well. Just follow the isntructions on the Keras website - it is recommended to use the freshest, Github versions of both Keras and Theano.\nI also have the convinence of using the GPU for the actual computation. This code will work just as well on the CPU, but it's much faster on a good GPU.\nWe start by importing numpy (for loading and working with the data) and the neccessary Keras classes. Feel free to add more here if you wish to experiment with them.",
"import numpy as np\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation\nfrom keras.optimizers import SGD, Adadelta\nfrom keras.callbacks import RemoteMonitor",
"First let's load our data. In the VoxforgeDataPrep notebook, we created to arrays - inputs and outputs. The input nas the dimensions (num_samples,num_features) and the output is simply 1D vector of ints of length (num_samples). In this step, we split the training data into actual training (90%) and dev (10%) and merge that with the test data. Finally we save the indices for all the sets (instead of actual arrays).",
"import sys\n\nsys.path.append('../python')\n\nfrom data import Corpus\n\nwith Corpus('../data/mfcc_train_small.hdf5',load_normalized=True,merge_utts=True) as corp:\n train,dev=corp.split(0.9)\n \ntest=Corpus('../data/mfcc_test.hdf5',load_normalized=True,merge_utts=True)\n\ntr_in,tr_out_dec=train.get()\ndev_in,dev_out_dec=dev.get()\ntst_in,tst_out_dec=test.get()",
"Next we define some constants for our program. Input and output dimensions can be inferred from the data, but the hidden layer size has to be defined manually.\nWe also redefine our outputs as a 1-of-N matrix instead of an int vector. The old outputs were simply a list of integers (from 0 to 39) defining the phoneme (as listed in ../data/phones.list) class for each sample given at input. The new matrix has dimensions (num_samples, num_classes) and is mostly 0 with a single 1 put in place corresponding to the class index in the old output vector.",
"input_dim=tr_in.shape[1]\noutput_dim=np.max(tr_out_dec)+1\n\nhidden_num=256\n\nbatch_size=256\nepoch_num=100\n\ndef dec2onehot(dec):\n num=dec.shape[0]\n ret=np.zeros((num,output_dim))\n ret[range(0,num),dec]=1\n return ret\n\ntr_out=dec2onehot(tr_out_dec)\ndev_out=dec2onehot(dev_out_dec)\ntst_out=dec2onehot(tst_out_dec)\n\nprint 'Samples num: {}'.format(tr_in.shape[0]+dev_in.shape[0]+tst_in.shape[0])\nprint ' of which: {} in train, {} in dev and {} in test'.format(tr_in.shape[0],dev_in.shape[0],tst_in.shape[0])\nprint 'Input size: {}'.format(input_dim)\nprint 'Output size (number of classes): {}'.format(output_dim)",
"Model definition\nHere we define our model using the Keras interface. There are two main model types in Keras: sequential and graph. Sequential is much more common and easy to use, so we start with that.\nNext we define the MLP topology. Here we have 3 layers: input, hidden and output. They are interconnected with two sets of Dense weight connections and a layer of activation functions after these weights. When defining the Dense weight layers, we need to provide the size: input and output are neccessary only for the first layer, subsequent layers use the output size of the previous layer as their input size.\nWe also define the type of optimizer and loss function we want to use. There are a few optimizers to choose from in the library and they are all interchangable. The differences between them are not too large in this example (feel free to experiment). The loss function chosen here is the cross-entropy function. Another option would be the simpler MSE (mean square error). Again, there doesn't seem to be much of a difference, but cross-entropy does seem like performing a bit better overall.",
"model = Sequential()\n\nmodel.add(Dense(input_dim=input_dim,output_dim=hidden_num))\nmodel.add(Activation('sigmoid'))\nmodel.add(Dense(output_dim=output_dim))\nmodel.add(Activation('softmax'))\n\n#optimizer = SGD(lr=0.01, momentum=0.9, nesterov=True)\noptimizer= Adadelta()\nloss='categorical_crossentropy'",
"After defining the model and all its parameters, we can compile it. This literally means compiling, because the model is converted into C++ code in the background and compiled with lots of optimizations to work as efficiently as possible. The process can take a while, but is worth the added speed in training.",
"model.compile(loss=loss, optimizer=optimizer)\n\nprint model.summary()",
"We can also try and visualize the model using the builtin Dot painter:",
"from keras.utils import visualize_util\nfrom IPython.display import SVG\n\nSVG(visualize_util.to_graph(model,show_shape=True).create(prog='dot', format='svg'))",
"Finally, we can start training the model. We provide the training function both training and validation data and define a few parameters: batch size and number of training epochs. Changing the batch size can affect both the training speed and final accuracy. This value is also closely related to the number of epochs. Generally, you want to run the training for as many epochs as needed for the model to converge on some value. The value of 100 should be fine for a quick comparison but up to 1k may be necessary to be abolutely sure (especially when testing larger models).",
"val=(dev_in,dev_out)\n\nhist=model.fit(tr_in, tr_out, shuffle=True, batch_size=batch_size, nb_epoch=epoch_num, verbose=0, validation_data=val)",
"The training method returns an object that contains the trained model parameters and the training history:",
"import matplotlib.pyplot as P\n%matplotlib inline\n\nP.plot(hist.history['loss'])",
"You can get better graphs and more data if you overload the training callback method, which will provide you with the model parameters after each epoch during training.\nAfter the model is trained, we can easily test it using the evaluate method. The show_accuracy argument is required to compute the accuracy of the decision variable. The returned result has a 2-element list, where the first value is the loss of the model on the test data and the second is the accuracy:",
"res=model.evaluate(tst_in,tst_out,batch_size=batch_size,show_accuracy=True,verbose=0)\n\nprint 'Loss: {}'.format(res[0])\nprint 'Accuracy: {:%}'.format(res[1])",
"One other way to look at this is to check where the errors occur by looking at what's known as the confusion matrix. The confusion matrix counts the number of predicted outputs with respect on how they should have been predicted. All the values on the diagonal (so where the predicted class is equal to the reference) are correct results. Any values outside of the diagonal are the errors, or confusions of one class with another. For example, you can see that 'g' is confused by 'k' (both same phonation place, but different voiceness), 'r' with 'er' (same thing, but the latter is a diphone), 't' with 'ch' (again same phonantion place, but sligthly different pronounciaction) and so on...",
"out = model.predict_classes(tst_in,batch_size=256,verbose=0)\n\nconfusion=np.zeros((output_dim,output_dim))\nfor s in range(len(out)):\n confusion[out[s],tst_out_dec[s]]+=1\n\n#normalize by class - because some classes occur much more often than others\nfor c in range(output_dim):\n confusion[c,:]/=np.sum(confusion[c,:])\n\nwith open('../data/phones.list') as f:\n ph=f.read().splitlines()\n \nP.figure(figsize=(15,15))\nP.pcolormesh(confusion,cmap=P.cm.gray)\nP.xticks(np.arange(0,output_dim)+0.5)\nP.yticks(np.arange(0,output_dim)+0.5)\nax=P.axes()\nax.set_xticklabels(ph)\nax.set_yticklabels(ph)\nprint ''",
"Further steps\nYou can play around with the different parameters and network topologies. The results aren't going to be much better using this type of model. Using recurrent topologies (e.g. LSTM) can work better, as well as providing more data. Crucially, however, framewise phoneme classification is not the best benchmark to test and isn't the most useful. Further notebooks will go into other technuiques for getting closer to the best speech recognition can provide."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
amitkaps/applied-machine-learning
|
Module-04a-Regression-Basic.ipynb
|
mit
|
[
"Housing Price Prediction\nFrame\nPredict the sale price of the house, given various features of the house\nAcquire",
"import pandas as pd\n\ndf_train = pd.read_csv(\"http://bit.do/house-price\")\n\ndf_train.head()\n\ndf_train.columns",
"Some info about the attributes\nMSSubClass: Identifies the type of dwelling involved in the sale. \n 20 1-STORY 1946 & NEWER ALL STYLES\n 30 1-STORY 1945 & OLDER\n 40 1-STORY W/FINISHED ATTIC ALL AGES\n 45 1-1/2 STORY - UNFINISHED ALL AGES\n 50 1-1/2 STORY FINISHED ALL AGES\n 60 2-STORY 1946 & NEWER\n 70 2-STORY 1945 & OLDER\n 75 2-1/2 STORY ALL AGES\n 80 SPLIT OR MULTI-LEVEL\n 85 SPLIT FOYER\n 90 DUPLEX - ALL STYLES AND AGES\n 120 1-STORY PUD (Planned Unit Development) - 1946 & NEWER\n 150 1-1/2 STORY PUD - ALL AGES\n 160 2-STORY PUD - 1946 & NEWER\n 180 PUD - MULTILEVEL - INCL SPLIT LEV/FOYER\n 190 2 FAMILY CONVERSION - ALL STYLES AND AGES\n\nMSZoning: Identifies the general zoning classification of the sale.\n A Agriculture\n C Commercial\n FV Floating Village Residential\n I Industrial\n RH Residential High Density\n RL Residential Low Density\n RP Residential Low Density Park \n RM Residential Medium Density\n\nLotFrontage: Linear feet of street connected to property\nLotArea: Lot size in square feet\nStreet: Type of road access to property\n Grvl Gravel \n Pave Paved\n\nAlley: Type of alley access to property\n Grvl Gravel\n Pave Paved\n NA No alley access\n\nLotShape: General shape of property\n Reg Regular \n IR1 Slightly irregular\n IR2 Moderately Irregular\n IR3 Irregular\n\nLandContour: Flatness of the property\n Lvl Near Flat/Level \n Bnk Banked - Quick and significant rise from street grade to building\n HLS Hillside - Significant slope from side to side\n Low Depression\n\nUtilities: Type of utilities available\n AllPub All public Utilities (E,G,W,& S) \n NoSewr Electricity, Gas, and Water (Septic Tank)\n NoSeWa Electricity and Gas Only\n ELO Electricity only\n\nLotConfig: Lot configuration\n Inside Inside lot\n Corner Corner lot\n CulDSac Cul-de-sac\n FR2 Frontage on 2 sides of property\n FR3 Frontage on 3 sides of property\n\nLandSlope: Slope of property\n Gtl Gentle slope\n Mod Moderate Slope \n Sev Severe Slope\n\nNeighborhood: Physical locations within Ames city limits\n Blmngtn Bloomington Heights\n Blueste Bluestem\n BrDale Briardale\n BrkSide Brookside\n ClearCr Clear Creek\n CollgCr College Creek\n Crawfor Crawford\n Edwards Edwards\n Gilbert Gilbert\n IDOTRR Iowa DOT and Rail Road\n MeadowV Meadow Village\n Mitchel Mitchell\n Names North Ames\n NoRidge Northridge\n NPkVill Northpark Villa\n NridgHt Northridge Heights\n NWAmes Northwest Ames\n OldTown Old Town\n SWISU South & West of Iowa State University\n Sawyer Sawyer\n SawyerW Sawyer West\n Somerst Somerset\n StoneBr Stone Brook\n Timber Timberland\n Veenker Veenker\n\nCondition1: Proximity to various conditions\n Artery Adjacent to arterial street\n Feedr Adjacent to feeder street \n Norm Normal \n RRNn Within 200' of North-South Railroad\n RRAn Adjacent to North-South Railroad\n PosN Near positive off-site feature--park, greenbelt, etc.\n PosA Adjacent to postive off-site feature\n RRNe Within 200' of East-West Railroad\n RRAe Adjacent to East-West Railroad\n\nCondition2: Proximity to various conditions (if more than one is present)\n Artery Adjacent to arterial street\n Feedr Adjacent to feeder street \n Norm Normal \n RRNn Within 200' of North-South Railroad\n RRAn Adjacent to North-South Railroad\n PosN Near positive off-site feature--park, greenbelt, etc.\n PosA Adjacent to postive off-site feature\n RRNe Within 200' of East-West Railroad\n RRAe Adjacent to East-West Railroad\n\nBldgType: Type of dwelling\n 1Fam Single-family Detached \n 2FmCon Two-family Conversion; originally built as one-family dwelling\n Duplx Duplex\n TwnhsE Townhouse End Unit\n TwnhsI Townhouse Inside Unit\n\nHouseStyle: Style of dwelling\n 1Story One story\n 1.5Fin One and one-half story: 2nd level finished\n 1.5Unf One and one-half story: 2nd level unfinished\n 2Story Two story\n 2.5Fin Two and one-half story: 2nd level finished\n 2.5Unf Two and one-half story: 2nd level unfinished\n SFoyer Split Foyer\n SLvl Split Level\n\nOverallQual: Rates the overall material and finish of the house\n 10 Very Excellent\n 9 Excellent\n 8 Very Good\n 7 Good\n 6 Above Average\n 5 Average\n 4 Below Average\n 3 Fair\n 2 Poor\n 1 Very Poor\n\nOverallCond: Rates the overall condition of the house\n 10 Very Excellent\n 9 Excellent\n 8 Very Good\n 7 Good\n 6 Above Average \n 5 Average\n 4 Below Average \n 3 Fair\n 2 Poor\n 1 Very Poor\n\nYearBuilt: Original construction date\nYearRemodAdd: Remodel date (same as construction date if no remodeling or additions)\nRoofStyle: Type of roof\n Flat Flat\n Gable Gable\n Gambrel Gabrel (Barn)\n Hip Hip\n Mansard Mansard\n Shed Shed\n\nRoofMatl: Roof material\n ClyTile Clay or Tile\n CompShg Standard (Composite) Shingle\n Membran Membrane\n Metal Metal\n Roll Roll\n Tar&Grv Gravel & Tar\n WdShake Wood Shakes\n WdShngl Wood Shingles\n\nExterior1st: Exterior covering on house\n AsbShng Asbestos Shingles\n AsphShn Asphalt Shingles\n BrkComm Brick Common\n BrkFace Brick Face\n CBlock Cinder Block\n CemntBd Cement Board\n HdBoard Hard Board\n ImStucc Imitation Stucco\n MetalSd Metal Siding\n Other Other\n Plywood Plywood\n PreCast PreCast \n Stone Stone\n Stucco Stucco\n VinylSd Vinyl Siding\n Wd Sdng Wood Siding\n WdShing Wood Shingles\n\nExterior2nd: Exterior covering on house (if more than one material)\n AsbShng Asbestos Shingles\n AsphShn Asphalt Shingles\n BrkComm Brick Common\n BrkFace Brick Face\n CBlock Cinder Block\n CemntBd Cement Board\n HdBoard Hard Board\n ImStucc Imitation Stucco\n MetalSd Metal Siding\n Other Other\n Plywood Plywood\n PreCast PreCast\n Stone Stone\n Stucco Stucco\n VinylSd Vinyl Siding\n Wd Sdng Wood Siding\n WdShing Wood Shingles\n\nMasVnrType: Masonry veneer type\n BrkCmn Brick Common\n BrkFace Brick Face\n CBlock Cinder Block\n None None\n Stone Stone\n\nMasVnrArea: Masonry veneer area in square feet\nExterQual: Evaluates the quality of the material on the exterior \n Ex Excellent\n Gd Good\n TA Average/Typical\n Fa Fair\n Po Poor\n\nExterCond: Evaluates the present condition of the material on the exterior\n Ex Excellent\n Gd Good\n TA Average/Typical\n Fa Fair\n Po Poor\n\nFoundation: Type of foundation\n BrkTil Brick & Tile\n CBlock Cinder Block\n PConc Poured Contrete \n Slab Slab\n Stone Stone\n Wood Wood\n\nBsmtQual: Evaluates the height of the basement\n Ex Excellent (100+ inches) \n Gd Good (90-99 inches)\n TA Typical (80-89 inches)\n Fa Fair (70-79 inches)\n Po Poor (<70 inches\n NA No Basement\n\nBsmtCond: Evaluates the general condition of the basement\n Ex Excellent\n Gd Good\n TA Typical - slight dampness allowed\n Fa Fair - dampness or some cracking or settling\n Po Poor - Severe cracking, settling, or wetness\n NA No Basement\n\nBsmtExposure: Refers to walkout or garden level walls\n Gd Good Exposure\n Av Average Exposure (split levels or foyers typically score average or above) \n Mn Mimimum Exposure\n No No Exposure\n NA No Basement\n\nBsmtFinType1: Rating of basement finished area\n GLQ Good Living Quarters\n ALQ Average Living Quarters\n BLQ Below Average Living Quarters \n Rec Average Rec Room\n LwQ Low Quality\n Unf Unfinshed\n NA No Basement\n\nBsmtFinSF1: Type 1 finished square feet\nBsmtFinType2: Rating of basement finished area (if multiple types)\n GLQ Good Living Quarters\n ALQ Average Living Quarters\n BLQ Below Average Living Quarters \n Rec Average Rec Room\n LwQ Low Quality\n Unf Unfinshed\n NA No Basement\n\nBsmtFinSF2: Type 2 finished square feet\nBsmtUnfSF: Unfinished square feet of basement area\nTotalBsmtSF: Total square feet of basement area\nHeating: Type of heating\n Floor Floor Furnace\n GasA Gas forced warm air furnace\n GasW Gas hot water or steam heat\n Grav Gravity furnace \n OthW Hot water or steam heat other than gas\n Wall Wall furnace\n\nHeatingQC: Heating quality and condition\n Ex Excellent\n Gd Good\n TA Average/Typical\n Fa Fair\n Po Poor\n\nCentralAir: Central air conditioning\n N No\n Y Yes\n\nElectrical: Electrical system\n SBrkr Standard Circuit Breakers & Romex\n FuseA Fuse Box over 60 AMP and all Romex wiring (Average) \n FuseF 60 AMP Fuse Box and mostly Romex wiring (Fair)\n FuseP 60 AMP Fuse Box and mostly knob & tube wiring (poor)\n Mix Mixed\n\n1stFlrSF: First Floor square feet\n2ndFlrSF: Second floor square feet\nLowQualFinSF: Low quality finished square feet (all floors)\nGrLivArea: Above grade (ground) living area square feet\nBsmtFullBath: Basement full bathrooms\nBsmtHalfBath: Basement half bathrooms\nFullBath: Full bathrooms above grade\nHalfBath: Half baths above grade\nBedroom: Bedrooms above grade (does NOT include basement bedrooms)\nKitchen: Kitchens above grade\nKitchenQual: Kitchen quality\n Ex Excellent\n Gd Good\n TA Typical/Average\n Fa Fair\n Po Poor\n\nTotRmsAbvGrd: Total rooms above grade (does not include bathrooms)\nFunctional: Home functionality (Assume typical unless deductions are warranted)\n Typ Typical Functionality\n Min1 Minor Deductions 1\n Min2 Minor Deductions 2\n Mod Moderate Deductions\n Maj1 Major Deductions 1\n Maj2 Major Deductions 2\n Sev Severely Damaged\n Sal Salvage only\n\nFireplaces: Number of fireplaces\nFireplaceQu: Fireplace quality\n Ex Excellent - Exceptional Masonry Fireplace\n Gd Good - Masonry Fireplace in main level\n TA Average - Prefabricated Fireplace in main living area or Masonry Fireplace in basement\n Fa Fair - Prefabricated Fireplace in basement\n Po Poor - Ben Franklin Stove\n NA No Fireplace\n\nGarageType: Garage location\n 2Types More than one type of garage\n Attchd Attached to home\n Basment Basement Garage\n BuiltIn Built-In (Garage part of house - typically has room above garage)\n CarPort Car Port\n Detchd Detached from home\n NA No Garage\n\nGarageYrBlt: Year garage was built\nGarageFinish: Interior finish of the garage\n Fin Finished\n RFn Rough Finished \n Unf Unfinished\n NA No Garage\n\nGarageCars: Size of garage in car capacity\nGarageArea: Size of garage in square feet\nGarageQual: Garage quality\n Ex Excellent\n Gd Good\n TA Typical/Average\n Fa Fair\n Po Poor\n NA No Garage\n\nGarageCond: Garage condition\n Ex Excellent\n Gd Good\n TA Typical/Average\n Fa Fair\n Po Poor\n NA No Garage\n\nPavedDrive: Paved driveway\n Y Paved \n P Partial Pavement\n N Dirt/Gravel\n\nWoodDeckSF: Wood deck area in square feet\nOpenPorchSF: Open porch area in square feet\nEnclosedPorch: Enclosed porch area in square feet\n3SsnPorch: Three season porch area in square feet\nScreenPorch: Screen porch area in square feet\nPoolArea: Pool area in square feet\nPoolQC: Pool quality\n Ex Excellent\n Gd Good\n TA Average/Typical\n Fa Fair\n NA No Pool\n\nFence: Fence quality\n GdPrv Good Privacy\n MnPrv Minimum Privacy\n GdWo Good Wood\n MnWw Minimum Wood/Wire\n NA No Fence\n\nMiscFeature: Miscellaneous feature not covered in other categories\n Elev Elevator\n Gar2 2nd Garage (if not described in garage section)\n Othr Other\n Shed Shed (over 100 SF)\n TenC Tennis Court\n NA None\n\nMiscVal: $Value of miscellaneous feature\nMoSold: Month Sold (MM)\nYrSold: Year Sold (YYYY)\nSaleType: Type of sale\n WD Warranty Deed - Conventional\n CWD Warranty Deed - Cash\n VWD Warranty Deed - VA Loan\n New Home just constructed and sold\n COD Court Officer Deed/Estate\n Con Contract 15% Down payment regular terms\n ConLw Contract Low Down payment and low interest\n ConLI Contract Low Interest\n ConLD Contract Low Down\n Oth Other\n\nSaleCondition: Condition of sale\n Normal Normal Sale\n Abnorml Abnormal Sale - trade, foreclosure, short sale\n AdjLand Adjoining Land Purchase\n Alloca Allocation - two linked properties with separate deeds, typically condo with a garage unit \n Family Sale between family members\n Partial Home was not completed when last assessed (associated with New Homes)\n\nExplore",
"import matplotlib.pyplot as plt\n%matplotlib inline\nimport seaborn as sns",
"Sale Price",
"sns.distplot(df_train[\"SalePrice\"]);",
"grlivarea vs Sale Price",
"df_train.plot.scatter(x=\"GrLivArea\", y=\"SalePrice\")",
"TotalBsmtSF vs Sale Price",
"df_train.plot.scatter(x=\"TotalBsmtSF\", y=\"SalePrice\")",
"box plot overallqual/saleprice",
"var = 'OverallQual'\ndata = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)\nf, ax = plt.subplots(figsize=(8, 6))\nfig = sns.boxplot(x=var, y=\"SalePrice\", data=data)\nfig.axis(ymin=0, ymax=800000);",
"box plot year built / saleprice",
"var = 'YearBuilt'\ndata = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)\nf, ax = plt.subplots(figsize=(16, 8))\nfig = sns.boxplot(x=var, y=\"SalePrice\", data=data)\nfig.axis(ymin=0, ymax=800000);\nplt.xticks(rotation=90);",
"correlation matrix",
"corrmat = df_train.corr()\nf, ax = plt.subplots(figsize=(12, 9))\nsns.heatmap(corrmat, vmax=.8, square=True);",
"scatterplot with highly correlated features",
"sns.set()\ncols = ['SalePrice', 'OverallQual', 'GrLivArea', 'GarageCars', 'TotalBsmtSF', 'FullBath', 'YearBuilt']\nsns.pairplot(df_train[cols], height = 2.5);\nplt.show();",
"Missing Data",
"missing_features = df_train.isnull().sum()\n\nmissing_features[missing_features>0]\n\ndf_train.shape"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
eds-uga/csci1360-fa16
|
lectures/L19.ipynb
|
mit
|
[
"Lecture 19: Natural Language Processing I\nCSCI 1360: Foundations for Informatics and Analytics\nOverview and Objectives\nWe've covered about all the core basics of Python and are now solidly into how we wield these tools in the realm of data science. One extremely common, almost unavoidable application is text processing. It's a messy, complex, but very rewarding subarea that has reams of literature devoted to it, whereas we have this single lecture. By the end of this lecture, you should be able to:\n\nDifferentiate structured from unstructured data\nUnderstand the different string parsing tools available through Python\nGrasp some of the basic preprocessing steps required when text is involved\nDefine the \"bag of words\" text representation\n\nPart 1: Text Preprocessing\n\"Preprocessing\" is something of a recursively ambiguous: it's the processing before the processing (what?).\nMore colloquially, it's the processing that you do in order to put your data in a useful format for the actual analysis you intend to perform. As we saw in the previous lecture, this is what data scientists spend the majority of their time doing, so it's important to know and understand the basic steps.\nThe vast majority of interesting data is in unstructured format. You can think of this kind of like data in its natural habitat. Like wild animals, though, data in unstructured form requires significantly more effort to study effectively.\n\nOur goal in preprocessing is, in a sense, to turn unstructured data into structured data, or data that has a logical flow and format. CSV and JSON formats would be considered structured.\nTo start, let's go back to the Alice in Wonderland example from the previous lecture (you can download the text version of the book here).",
"book = None\ntry: # Good coding practices!\n f = open(\"alice.txt\", \"r\")\n book = f.read()\nexcept FileNotFoundError:\n print(\"Could not find alice.txt.\")\nelse:\n f.close()\n print(book[:71]) # Print the first 71 characters.",
"Recalling the mechanics of file I/O from the previous lecture, you'll see we opened up a file descriptor to alice.txt and read the whole file in a single go, storing all the text as a single string book. We then closed the file descriptor and printed out the first line (or first 71 characters), while wrapping the entire operation in a try / except block.\nBut as we saw before, it's also pretty convenient to split up a large text file by lines. You could use the readlines() method instead, but you can take a string and split it up into a list of strings as well.",
"print(type(book))\nlines = book.split(\"\\n\") # Split the string. Where should the splits happen? On newline characters, of course.\nprint(type(lines))",
"voilà! lines is now a list of strings.",
"print(len(lines))",
"...a list of over 3,700 lines of text, no less o_O\nNewline characters\nLet's go over this point in a little more detail.\nA \"newline\" character is an actual character--like \"a\" or \"b\" or \"1\" or \":\"--that represents pressing the \"enter\" key. However, like tabs and spaces, this character falls under the category of a \"whitespace\" character, meaning that in print you can't actually see it; the computer hides it.\nHowever, in programming languages like Python (and Java, and C, and Matlab, and R, and and and...), they need a way to explicitly represent these whitespace characters, specifically when processing text like we're doing right now.\nSo, even though you can't see tabs or newlines in the actual text--go ahead and open up Alice in Wonderland and tell me if you can see the actual characters representing newlines and tabs--you can see these characters in Python.\n\nTabs are represented by a backslash followed by the letter \"t\", the whole thing in quotes: \"\\t\"\nNewlines are represented by a backslash followed by the letter \"n\", the whole thing in quotes: \"\\n\"\n\n\"But wait!\" you say, \"Slash-t and slash-n are two characters each, not one! What kind of shenanigans are you trying to pull?\"\nYes, it's weird. If you build a career in text processing, you'll find the backslash has a long and storied history as a kind of \"meta\"-character, in that it tells whatever programming language that the character after it is a super-special snowflake. So in some sense, the backslash-t and backslash-n constructs are actually one character, because the backslash is the text equivalent of a formal introduction.\nBack to text parsing\nWhen we called split() on the string holding the entire Alice in Wonderland book, we passed in the argument \"\\n\", which is the newline character. In doing so, we instructed Python to\n\n\nSplit up the original string (hence, the name of the function) into a list of strings\n\n\nThe end of one list and the beginning of the next list would be delimited by the occurrence of a newline character \"\\n\" in the original string. In a sense, we're treating the book as a \"newline-delimited\" format\n\n\nReturn a list of strings, where each string is one line of the book\n\n\nAn important distinction for text processing neophytes: this splits the book up on a line by line basis, NOT a sentence by sentence basis. There are a lot of implicit semantic assumptions we hold from a lifetime of taking our native language for granted, but which Python has absolutely no understanding of beyond what we tell it to do.\nYou certainly could, in theory, split the book on punctuation, rather than newlines. This is a bit trickier to do without regular expressions (see Part 3), but to give an example of splitting by period:",
"sentences = book.split(\".\")\nprint(sentences[0])",
"You can already see some problems with this approach: not all sentences end with periods. Sure, you could split things again on question marks and exclamation points, but this still wouldn't tease out the case of the title--which has NO punctuation to speak of!--and doesn't account for important literary devices like semicolons and parentheses. These are valid punctuation characters in English! But how would you handle them?\nCleaning up trailing whitespace\nYou may have noticed that, whenever you invoke the print() statement, you automatically get a new line even though I doubt you've ever added a \"\\n\" to the end of the string you're printing.",
"print(\"Even though there's no newline in the string I wrote, Python's print function still adds one.\")\nprint() # Blank line!\nprint(\"There's a blank line above.\")",
"This is fine for 99% of cases, except when the string already happens to have a newline at the end.",
"print(\"Here's a string with an explicit newline --> \\n\")\nprint()\nprint(\"Now there are TWO blank lines above!\")",
"\"But wait!\" you say again, \"You read in the text file and split it on newlines a few slides ago, but when you printed out the first line, there was no extra blank line underneath! Why did that work today but not in previous lectures?\"\nAn excellent question. It has to do with the approach we took. Previously, we used the readline() method, which hands you back one line of text at a time with the trailing newline intact:",
"readlines = None\ntry:\n with open(\"alice.txt\", \"r\") as f:\n readlines = f.readlines()\nexcept:\n print(\"Something went wrong.\")\nprint(readlines[0])\nprint(readlines[2])\nprint(\"There are blank lines because of the trailing newline characters.\")",
"On the other hand, when you call split() on a string, it not only identifies all the instances of the character you specify as the endpoints of each successive list, but it also removes those characters from the ensuing lists.",
"print(readlines[0]) # This used readlines(), so it STILL HAS trailing newlines.\nprint(lines[0]) # This used split(), so the newlines were REMOVED.\nprint(\"No trailing newline when using split()!\")",
"Is this getting confusing? If so, just remember the following: \nIn general, make liberal use of the strip() function for strings you read in from files.\nThis function strips (hence, the name) any whitespace off the front AND end of a string. So in the following example:",
"trailing_whitespace = \" \\t this is the important part \\n \\n \\t \"\nno_whitespace = trailing_whitespace.strip()\nprint(\"Border --> |{}| <-- Border\".format(no_whitespace))",
"All the pesky spaces, tabs, and newlines have been stripped off the string. This is extremely useful and pretty much a must when you're preprocessing text.\nCapitalization\nThis is one of those insidious that seems like such a tiny detail but can radically alter your analysis if left unnoticed: developing a strategy for how you're going to handle uppercase versus lowercase. \nTake the following example from Alice in Wonderland, lines 410 and 411:",
"print(lines[410])\nprint(lines[411])",
"You'll notice the word \"and\" appears twice: once at the beginning of the sentence in line 410, and again in the middle of the sentence in line 411. It's the same word, but given their difference in capitalization, it's entirely likely that your analysis framework would treat those as two separate words. After all, \"and\" != \"And\". Go ahead and try!\nA common strategy is to simply lowercase everything. Yes, you likely lose a little bit of information, as it becomes more difficult to identify proper nouns, but a significant source of confusion--is it a proper noun, or just the start of a sentence? has the meaning of the word changed if it's in lowercase versus ALL CAPS? what if you're comparing multiple styles of writing and the authors use different literary forms of capitalizatoin?--is removed entirely.\nYou can do this with the Python string's lower() method:",
"print(lines[0])\ntitle = lines[0].lower()\nprint(title)",
"Now everything is, in some sense, \"equivalent.\"\nPart 2: The \"Bag of Words\"\nThe \"bag of words\" model is one of the most popular ways of representing a large collection of text, and one of the easiest ways to structure text.\nThe \"bag of words\" on display on the 8th floor of the Computer Science building at Carnegie Mellon University:\n\nWhen using this model, the implicit assumptions behind it are saying\n\n\nRelative word order and grammar DON'T MATTER to the overall meaning of the text.\n\n\nRelative word frequencies ABSOLUTELY MATTER to the overall meaning of the text.\n\n\nFormally, the bag of words is a \"multiset\", but you can think of it like a Python dictionary. In fact, at its simplest, that's all the bag of words is: a count of how many times each word occurs in your text. But like dictionaries, ordering no longer matters.\nTo illustrate, let's go ahead and design a word counter for Alice in Wonderland! First, we'll initialize our dictionary of counts. To make our lives easier, we'll use a defaultdict, a special kind of dictionary you can use when you want automatic default values enforced for keys that don't exist.",
"from collections import defaultdict\nword_counts = defaultdict(int) # All values are integers.",
"It otherwise behaves exactly like a regular Python dictionary, except we won't get a KeyError if we reference a key that doesn't exist; instead, a new key will be automatically created and a default value set. For the int type, this default value is 0.\nNext, we'll iterate through the lines of the book. There are a couple things we need to do here:\n\n\nFor each line, split the line into single words. We'll go back yet again to our good friend split().\n\n\nNow we'll have a list of words, so we'll need to iterate over these words, lowercasing them all and then adding them up.\n\n\nSo the code should look something like this:",
"for line in lines: # Iterate through the lines of the book\n words = line.split() # If you don't give split() any arguments, the *default* split character is any whitespace.\n for word in words:\n w = word.lower() # Convert to lowercase.\n word_counts[w] += 1 # Add 1 to the count for that word in our word dictionary.",
"Let's take a look at what we have! First, we'll count how many unique words there are.",
"print(\"Unique words: {}\".format(len(word_counts.keys())))",
"Next, we'll count the total number of words in the book.",
"print(\"Total words: {}\".format(sum(word_counts.values())))",
"Now we'll find the word that occurred most often:",
"maxcount = -1\nmaxitem = None\nfor k, v in word_counts.items():\n if v > maxcount:\n maxcount = v\n maxitem = k\nprint(\"'{}' occurred most often ({} times).\".format(maxitem, maxcount))",
"Well, there's a shocker. /sarcasm\nPython has another incredibly useful utility class for whenever we're counting things: a Counter! This will let us easily find the n words with the highest counts.",
"from collections import Counter\ncounts = Counter(word_counts)\nprint(counts.most_common(20)) # Find the 20 words with the highest counts!",
"Pretty boring, right? Most of these words are referred to as stop words, or words that used pretty much in every context and therefore don't tell you anything particularly interesting. They're usually filtered out, but because of some interesting corner cases, there's no universal \"stop word list\"; it's generally up to you to decide what words to remove (though pretty much all of the above top 20, with the exception of \"alice\", can be removed).\nSo, in addition to stripping out and splitting on whitespace, and lowercasing all the words, we also check if the word is part of some pre-built stop-word list. If it is, just throw it out; if not, then we'll count it.\nPart 3: String Formatting\nWe've seen previously how to convert strings and numbers (integers and floating-point values) back and forth; just using the str(), int(), and float() functions. Pretty easy.\nHere's a harder question: how do you represent a floating-point number as a string, but to only 2 decimal places?\nAnother hard question: how do you represent an integer as string, but with 3 leading zeros?\nYou've probably noticed the bizarre notation I've used when printing out strings.",
"print(\"Here's the notation --> {}\".format(\"another string\"))",
"By using the curly braces {} inside the string, I've effectively created a placeholder for another string to be inserted. That other string is the argument(s) to the format() function.\nBut there's a lot more to the curly braces than just {}.\nThe simplest is just using the curly braces and nothing else. If you specify multiple pairs of curly braces, you'll need to specify an equal number of arguments to format(), and they'll be inserted into the string in the order you gave them to format().",
"print(\"{}, {}, and {}\".format(\"a\", \"b\", \"c\"))",
"Alternatively, you can specify the indices of the format() arguments inside the curly braces:",
"print(\"{0}, {2}, and {1}\".format(\"a\", \"b\", \"c\"))",
"Notice the 2nd and 3rd arguments were flipped in their final ordering!\nYou can even provide arbitrary named arguments inside the curly braces, which format() will then expect.",
"print(\"{first_arg}, {second_arg}, and {third_arg}\".format(second_arg = \"b\", first_arg = \"a\", third_arg = \"c\"))",
"Leading zeros and decimal precision\nYou can also use this same syntax to specify leading zeros and decimal precision, but the notation gets a little more complicated.\nYou'll need to first enter a colon \":\", followed by the number 0, followed by the number of places that should be counted:",
"print(\"One leading zero: {:02}\".format(1))\nprint(\"Two leading zeros: {:03}\".format(1))\nprint(\"One leading zero: {:04}\".format(100))\nprint(\"Two leading zeros: {:05}\".format(100))",
"Decimal precision is very similar, but instead of a 0, you'll specify a decimal point \".\" followed by the level of precision you want (a number), followed by the letter \"f\" to signify that it's a floating-point:",
"import numpy as np\nprint(\"Unformatted: {}\".format(np.pi))\nprint(\"Two decimal places: {:.2f}\".format(np.pi))",
"Finally, you can also include the comma in large numbers so you can actually read them more easily:",
"big_number = 98483745834\nprint(\"Big number: {}\".format(big_number))\nprint(\"Big number with commas: {:,}\".format(big_number))",
"Additional string functions\nThere is an entire ecosystem of Python string functions that I highly encourage you to investigate, but I'll go over a few of the most common here.\nupper() and lower(): we've seen the latter already, but the former can be just as useful.\ncount() will give you the number of times a substring occurs in the actual string. If you're interested in one word in particular, this can be a very efficient way of finding it:",
"print(\"'Wonderland' occurs {} times.\".format(book.count(\"Wonderland\")))",
"What if you need to find the actual location in a string of that substring? As in, where is \"Wonderland\" first mentioned in the book? find() to the rescue!",
"print(\"'Wonderland' is first found {} characters in.\".format(book.find(\"Wonderland\")))",
"...well, that's embarrassing; that's probably the \"Wonderland\" that's in the book title. How about the second occurrence, then? We can use the index of the first one to tell find() that we want to start looking from there.",
"print(\"'Wonderland' is first found {} characters in.\".format(book.find(\"Wonderland\", 43 + 1)))",
"Now, I've decided I don't want this book to be Alice in Wonderland, but rather Alice in Las Vegas! How can I make this happen? replace()!",
"my_book = book.replace(\"Wonderland\", \"Las Vegas\")\nprint(my_book[:71])",
"I wonder if Alice will be Ocean's 14th?\nTwo more very useful string functions are startswith() and endswith(). These are great if you're testing for leading or trailing characters or words.",
"print(lines[8])\nprint(lines[8].startswith(\"Title\"))\nprint(lines[8].endswith(\"Wonderland\"))",
"Finally, the join() method. This is a little tricky to use, but insanely useful. It's cropped up on a couple previous assignments.\nYou'll want to use this method whenever you have a list of strings that you want to \"glue\" together into a single string. Perhaps you have a list of words and want to put them back together into a sentence!",
"words = lines[8].split(\" \")\nprint(words)",
"We can do this by specifying first the character we want to put in between all the words we're joining--in this case, just a space character--then calling join() on that character, and passing in the list of words we want to glue together as the argument to the function.",
"between_char = \" \"\nsentence = between_char.join(words)\nprint(sentence)",
"Review Questions\nSome questions to discuss and consider:\n1: Provide at least three examples of unstructured text data. What could be done to structure these data?\n2: Recall in the lecture when we computed the number of unique words in Alice in Wonderland. That number we got is actually artificially high. Can you explain why? How can we fix this? (hint: think about what we didn't strip off lines and words)\n3: Can you think of any weaknesses in the bag-of-words model? How could you potentially mitigate these weaknesses? (hint: instead of splitting by word...)\n4: I want to know how many times the word \"Alice\" is the first word of a sentence in Alice in Wonderland. Assuming the book has already been read into Python as a single string, describe the steps (in English or pseudocode) required to obtain this number.\n5: I have a 1D NumPy array of floating-point numbers I want to manually convert to a comma-separated string, where each number is printed to six-decimal precision. Using the join() function, explain in words or pseudocode how this would be done.\nCourse Administrivia\n\n\nA8 is out, and A7 is due. Any questions?\n\n\nNext week we're going deeper into NLP techniques and tools, and then we'll have our 4th review session.\n\n\nAdditional Resources\n\nGrus, Joel. Data Science from Scratch, Chapter 9. 2015. ISBN-13: 978-1491901427\nSaha, Amit. Doing Math with Python, Chapter 3. 2015. ISBN-13: 978-1593276409\nAlice in Wonderland, by Lewis Carroll. Project Gutenburg. http://www.gutenberg.org/cache/epub/11/pg11.txt"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ameliecordier/iutdoua-info_algo2015
|
2015-12-02 - TD15 - Révisions sur la récursivité.ipynb
|
cc0-1.0
|
[
"Préambule : nous avons commencé par faire un rappel sur la récursivité en ré-écrivant le comportement de factorielle au tableau et en déroulant l'algorithme à la main. \nNous rappelons qu'une fonction récursive est une fonction qui s'appelle elle-même. Chaque appel à la fonction est indépendant des autres. Le moyen le plus simple pour réussir l'écriture d'une fonction récursive est de toujours commencer par exprimer le cas simple (la solutiotin immédiate), puis d'écrire le cas récursif (la solution qui fait appel à la solution plus simple).\nCi-dessous, vous trouverez le squelette d'une fonction récursive typique.",
"if (cas simple):\n (solution immédiate)\nelse:\n (solution récursive,\n impliquant un cas plus simple que le problème original)",
"Exercice Robozzle : nous avons ensuite résolu l'exercice Robozzle n°656, l'objectif était de vous faire comprendre le fonctionnement de la pile d'appels récursifs. http://robozzle.com/js/play.aspx?puzzle=656",
"def fact(n):\n \"\"\"\n :entrée n: int\n :pré-cond: n > 0\n :sortie f: int\n :post-cond: f = n * (n-1) * ... * 1\n \"\"\"\n if n == 1:\n f = 1\n else:\n f = fact(n-1)*n\n print(\"--- fact({}) = {}\".format(n,f))\n return f\n\nprint(fact(6))\n",
"Vous cherchez d'autres idées pour vous entraîner à la récursivité ? Proposez donc l'algorithme de puissance, celui de Fibonacci ou encore le triangle de Pascal en récursif."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
mne-tools/mne-tools.github.io
|
dev/_downloads/0a1bad60270bfbdeeea274fcca0015d2/multidict_reweighted_tfmxne.ipynb
|
bsd-3-clause
|
[
"%matplotlib inline",
"Compute iterative reweighted TF-MxNE with multiscale time-frequency dictionary\nThe iterative reweighted TF-MxNE solver is a distributed inverse method\nbased on the TF-MxNE solver, which promotes focal (sparse) sources\n:footcite:StrohmeierEtAl2015. The benefits of this approach are that:\n\nit is spatio-temporal without assuming stationarity (source properties\n can vary over time),\nactivations are localized in space, time, and frequency in one step,\nthe solver uses non-convex penalties in the TF domain, which results in a\n solution less biased towards zero than when simple TF-MxNE is used,\nusing a multiscale dictionary allows to capture short transient\n activations along with slower brain waves :footcite:BekhtiEtAl2016.",
"# Author: Mathurin Massias <mathurin.massias@gmail.com>\n# Yousra Bekhti <yousra.bekhti@gmail.com>\n# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>\n# Alexandre Gramfort <alexandre.gramfort@inria.fr>\n#\n# License: BSD-3-Clause\n\nimport os.path as op\n\nimport mne\nfrom mne.datasets import somato\nfrom mne.inverse_sparse import tf_mixed_norm, make_stc_from_dipoles\nfrom mne.viz import plot_sparse_source_estimates\n\nprint(__doc__)",
"Load somatosensory MEG data",
"data_path = somato.data_path()\nsubject = '01'\ntask = 'somato'\nraw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',\n 'sub-{}_task-{}_meg.fif'.format(subject, task))\nfwd_fname = op.join(data_path, 'derivatives', 'sub-{}'.format(subject),\n 'sub-{}_task-{}-fwd.fif'.format(subject, task))\n\n# Read evoked\nraw = mne.io.read_raw_fif(raw_fname)\nraw.pick_types(meg=True, eog=True, stim=True)\nevents = mne.find_events(raw, stim_channel='STI 014')\n\nreject = dict(grad=4000e-13, eog=350e-6)\nevent_id, tmin, tmax = dict(unknown=1), -0.5, 0.5\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, reject=reject,\n baseline=(None, 0))\nevoked = epochs.average()\n\nevoked.crop(tmin=0.0, tmax=0.2)\n\n# Compute noise covariance matrix\ncov = mne.compute_covariance(epochs, rank='info', tmax=0.)\ndel epochs, raw\n\n# Handling forward solution\nforward = mne.read_forward_solution(fwd_fname)",
"Run iterative reweighted multidict TF-MxNE solver",
"alpha, l1_ratio = 20, 0.05\nloose, depth = 0.9, 1.\n# Use a multiscale time-frequency dictionary\nwsize, tstep = [4, 16], [2, 4]\n\n\nn_tfmxne_iter = 10\n# Compute TF-MxNE inverse solution with dipole output\ndipoles, residual = tf_mixed_norm(\n evoked, forward, cov, alpha=alpha, l1_ratio=l1_ratio,\n n_tfmxne_iter=n_tfmxne_iter, loose=loose,\n depth=depth, tol=1e-3,\n wsize=wsize, tstep=tstep, return_as_dipoles=True,\n return_residual=True)",
"Generate stc from dipoles",
"stc = make_stc_from_dipoles(dipoles, forward['src'])\nplot_sparse_source_estimates(\n forward['src'], stc, bgcolor=(1, 1, 1), opacity=0.1,\n fig_name=f\"irTF-MxNE (cond {evoked.comment})\")",
"Show the evoked response and the residual for gradiometers",
"ylim = dict(grad=[-300, 300])\nevoked.copy().pick_types(meg='grad').plot(\n titles=dict(grad='Evoked Response: Gradiometers'), ylim=ylim)\nresidual.copy().pick_types(meg='grad').plot(\n titles=dict(grad='Residuals: Gradiometers'), ylim=ylim)",
"References\n.. footbibliography::"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
VectorBlox/PYNQ
|
Pynq-Z1/notebooks/examples/arduino_grove_ledbar.ipynb
|
bsd-3-clause
|
[
"Grove LED Bar Example\nThis example shows how to use the Grove LED Bar on the Pynq-Z1 board. The LED bar has 10 LEDs: 8 green LEDs, 1 orange LED, and 1 red LED. The brightness for each LED can be set independantly.\nFor this notebook, a PYNQ Arduino shield is also required. The LED bar is attached to the G4 connection on the shield. The grove LED bar also works with PMODA and PMODB on the Pynq-Z1 board.",
"# Make sure the base overlay is loaded\nfrom pynq import Overlay\nOverlay(\"base.bit\").download()",
"1. Instantiate and reset LED Bar",
"from pynq.iop import Grove_LEDbar\nfrom pynq.iop import ARDUINO\nfrom pynq.iop import ARDUINO_GROVE_G4\n\n# Instantiate Grove LED Bar on Arduino shield G4\nledbar = Grove_LEDbar(ARDUINO,ARDUINO_GROVE_G4)\nledbar.reset()",
"2. Turn individual LEDs on or off\nWrite a 10-bit binary pattern, with each bit representing the corresponding LED. 1 = on, 0 = off",
"from time import sleep\n\n# Light up different bars in a loop\nfor i in range(2):\n ledbar.write_binary(0b1010100000)\n sleep(0.5)\n ledbar.write_binary(0b0000100100)\n sleep(0.5)\n ledbar.write_binary(0b1010101110)\n sleep(0.5)\n ledbar.write_binary(0b1111111110)\n sleep(0.5)",
"3. Set LEDs individually with different brightness levels\nThe brightness of each LED can be set individually by writing a list of 10x 8-bit values to the LED bar. 0 is off, 0xff is full brightness.",
"# Brightness 0-255\nHIGH = 0xFF\nMED = 0xAA\nLOW = 0x01\nOFF = 0X00\n\nbrightness = [OFF, OFF, OFF, LOW, LOW, MED, MED, HIGH, HIGH, HIGH]\n\nledbar.write_brightness(0b1111111111,brightness)",
"4. Set the \"level\" or the number of LEDs which are set\nA number or level of LEDs can be turned on, started from either end of the LED bar. For example, this feature could be used to indicate the level of something being measured.\nwrite_level(level, bright_level, green_to_red)\n\nlevel is the number of LEDs that are on.\nbright_level [0-10] is the level of brightness\ngreen_to_red = 1 means the LEDs start being lit from the \"green\" end of the LED bar\ngreen_to_red = 0 means the LEDs start being lit from the \"red\" end of the LED bar.\n\nFor example, ledbar.write_level(5,4,1) will light 5 LEDs, to brightness 4 (out of 10) and will start from the Green LED (the LED furthest away from Grove connector on the LED bar module.)",
"for i in range (1,11):\n ledbar.write_level(i,3,0)\n sleep(0.3)\nfor i in range (1,10):\n ledbar.write_level(i,3,1)\n sleep(0.3) ",
"5. Controlling the LED Bar from the board buttons\nThis cell demonstrates controlling the \"level\" of the LEDs from onboard buttons. \n\nButton 0 to increase level\nButton 1 to decrease level\nButton 3 to exit",
"from pynq.board import Button\n\nbtns = [Button(index) for index in range(4)] \ni = 1\nledbar.reset()\n\ndone = False\nwhile not done:\n if (btns[0].read()==1):\n sleep(0.2)\n ledbar.write_level(i,2,1)\n i = min(i+1,9)\n elif (btns[1].read()==1):\n sleep(0.2)\n i = max(i-1,0)\n ledbar.write_level(i,2,1)\n elif (btns[3].read()==1):\n ledbar.reset()\n done = True"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ForestClaw/forestclaw
|
applications/clawpack/advection/2d/filament/filament.ipynb
|
bsd-2-clause
|
[
"Filament\n\nScalar advection problem with swirling velocity field.\n\nRun code in serial mode (will work, even if code is compiled with MPI)",
"!filament",
"Or, run code in parallel mode (command may need to be customized, depending your on MPI installation.)",
"!mpirun -n 4 filament",
"Create PNG files for web-browser viewing, or animation.",
"%run make_plots.py",
"View PNG files in browser, using URL above, or create an animation of all PNG files, using code below.",
"%pylab inline\n\nimport glob\nfrom matplotlib import image\nfrom clawpack.visclaw.JSAnimation import IPython_display\nfrom matplotlib import animation\n\nfigno = 0\nfname = '_plots/*fig' + str(figno) + '.png'\nfilenames=sorted(glob.glob(fname))\n\nfig = plt.figure()\nim = plt.imshow(image.imread(filenames[0]))\ndef init():\n im.set_data(image.imread(filenames[0]))\n return im,\n\ndef animate(i):\n image_i=image.imread(filenames[i])\n im.set_data(image_i)\n return im,\n\nanimation.FuncAnimation(fig, animate, init_func=init,\n frames=len(filenames), interval=500, blit=True)",
""
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
AllenDowney/ThinkBayes2
|
examples/blaster_soln.ipynb
|
mit
|
[
"Think Bayes\nThis notebook presents code and exercises from Think Bayes, second edition.\nCopyright 2016 Allen B. Downey\nMIT License: https://opensource.org/licenses/MIT",
"# Configure Jupyter so figures appear in the notebook\n%matplotlib inline\n\n# Configure Jupyter to display the assigned value after an assignment\n%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'\n\nimport math\nimport numpy as np\n\nfrom thinkbayes2 import Hist, Pmf, Suite, Beta\nimport thinkplot",
"The Alien Blaster problem\nIn preparation for an alien invasion, the Earth Defense League (EDL) has been working on new missiles to shoot down space invaders. Of course, some missile designs are better than others; let's assume that each design has some probability of hitting an alien ship, x.\nBased on previous tests, the distribution of x in the population of designs is well described by a Beta distribution with parameters 5, 10.\nNow suppose the new ultra-secret Alien Blaster 9000 is being tested. In a press conference, an EDL general reports that the new design has been tested twice, taking two shots during each test. The results of the test are confidential, so the general won't say how many targets were hit, but they report: \"The same number of targets were hit in the two tests, so we have reason to think this new design is consistent.\"\nIs this data good or bad; that is, does it increase or decrease your estimate of x for the Alien Blaster 9000?",
"# Solution\n\n# Here's the prior\n\nprior = Beta(5, 10)\nthinkplot.Pdf(prior.MakePmf())\nthinkplot.decorate(xlabel='Probability of hit',\n ylabel='PMF')\nprior.Mean()\n\n# Solution\n\n# And here's the likelhood function\n\nfrom scipy.stats import binom\n\nclass AlienBlaster(Suite):\n \n def Likelihood(self, data, hypo):\n \"\"\"Computes the likeliood of data under hypo.\n \n data: number of shots they took\n hypo: probability of a hit, p\n \"\"\"\n n = data\n x = hypo\n \n # specific version for n=2 shots\n likes = [x**4, (1-x)**4, (2*x*(1-x))**2]\n\n # general version for any n shots\n likes = [binom.pmf(k, n, x)**2 for k in range(n+1)]\n \n return np.sum(likes)\n\n# Solution\n\n# If we start with a uniform prior, \n# we can see what the likelihood function looks like:\n\npmf = Beta(1, 1).MakePmf()\nblaster = AlienBlaster(pmf)\nblaster.Update(2)\nthinkplot.Pdf(blaster)\nthinkplot.decorate(xlabel='Probability of hit',\n ylabel='PMF')\n\n# Solution\n\n# Now let's run it with the specified prior and \n# see what happens when we multiply the convex prior and \n# the concave posterior.\n\npmf = Beta(5, 10).MakePmf()\nblaster = AlienBlaster(pmf)\nthinkplot.Pdf(blaster, color='gray')\nblaster.Update(2)\nthinkplot.Pdf(blaster)\nthinkplot.decorate(xlabel='Probability of hit',\n ylabel='PMF')\n\n# Solution\n\n# The posterior mean is lower\n\nprior.Mean(), blaster.Mean()\n\n# Solution\n\n# So is the MAP\n\nprior.MAP(), blaster.MAP()\n\n# So if we learn that the new design is \"consistent\",\n# it is more likely to be consistently bad (in this case).",
"Part Two\nSuppose we\nhave we have a stockpile of 3 Alien Blaster 9000s and 7 Alien\nBlaster 10Ks. After extensive testing, we have concluded that\nthe AB9000 hits the target 30% of the time, precisely, and the\nAB10K hits the target 40% of the time.\nIf I grab a random weapon from the stockpile and shoot at 10 targets,\nwhat is the probability of hitting exactly 3? Again, you can write a\nnumber, mathematical expression, or Python code.",
"k = 3\nn = 10\nx1 = 0.3\nx2 = 0.4\n\n0.3 * binom.pmf(k, n, x1) + 0.7 * binom.pmf(k, n, x2)",
"The answer is a value drawn from the mixture of the two distributions.\nContinuing the previous problem, let's estimate the distribution\nof k, the number of successful shots out of 10. \n\n\nWrite a few lines of Python code to simulate choosing a random weapon and firing it.\n\n\nWrite a loop that simulates the scenario and generates random values of k 1000 times. \n\n\nStore the values of k you generate and plot their distribution.",
"def flip(p):\n return np.random.random() < p\n\ndef simulate_shots(n, p):\n return np.random.binomial(n, p)\n\nks = []\nfor i in range(1000):\n if flip(0.3):\n k = simulate_shots(n, x1)\n else:\n k = simulate_shots(n, x2)\n ks.append(k)",
"Here's what the distribution looks like.",
"pmf = Pmf(ks)\nthinkplot.Hist(pmf)\nthinkplot.decorate(xlabel='Number of hits',\n ylabel='PMF')\nlen(ks), np.mean(ks)",
"The mean should be near 3.7. We can run this simulation more efficiently using NumPy. First we generate a sample of xs:",
"xs = np.random.choice(a=[x1, x2], p=[0.3, 0.7], size=1000)\nHist(xs)",
"Then for each x we generate a k:",
"ks = np.random.binomial(n, xs);",
"And the results look similar.",
"pmf = Pmf(ks)\nthinkplot.Hist(pmf)\nthinkplot.decorate(xlabel='Number of hits',\n ylabel='PMF')\nnp.mean(ks)",
"One more way to do the same thing is to make a meta-Pmf, which contains the two binomial Pmf objects:",
"from thinkbayes2 import MakeBinomialPmf\n\npmf1 = MakeBinomialPmf(n, x1)\npmf2 = MakeBinomialPmf(n, x2)\n\nmetapmf = Pmf({pmf1:0.3, pmf2:0.7})\nmetapmf.Print()",
"Here's how we can draw samples from the meta-Pmf:",
"ks = [metapmf.Random().Random() for _ in range(1000)];",
"And here are the results, one more time:",
"pmf = Pmf(ks)\nthinkplot.Hist(pmf)\nthinkplot.decorate(xlabel='Number of hits',\n ylabel='PMF')\nnp.mean(ks)",
"This result, which we have estimated three ways, is a predictive distribution, based on our uncertainty about x.\nWe can compute the mixture analtically using thinkbayes2.MakeMixture:\ndef MakeMixture(metapmf, label='mix'):\n \"\"\"Make a mixture distribution.\n\n Args:\n metapmf: Pmf that maps from Pmfs to probs.\n label: string label for the new Pmf.\n\n Returns: Pmf object.\n \"\"\"\n mix = Pmf(label=label)\n for pmf, p1 in metapmf.Items():\n for k, p2 in pmf.Items():\n mix[k] += p1 * p2\n return mix\n\nThe outer loop iterates through the Pmfs; the inner loop iterates through the items.\nSo p1 is the probability of choosing a particular Pmf; p2 is the probability of choosing a value from the Pmf.\nIn the example, each Pmf is associated with a value of x (probability of hitting a target). The inner loop enumerates the values of k (number of targets hit after 10 shots).",
"from thinkbayes2 import MakeMixture\n\nmix = MakeMixture(metapmf)\nthinkplot.Hist(mix)\nmix.Mean()\n\nmix[3]",
"Exercise: Assuming again that the distribution of x in the population of designs is well-modeled by a beta distribution with parameters α=2 and β=3, what the distribution if k if I choose a random Alien Blaster and fire 10 shots?"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
fabriziocosta/GraphFinder
|
Functions_Fasta_Input_to_Structure_and_Graph_modifing...-submit.ipynb
|
gpl-2.0
|
[
"New tasks:\n\nmake a function/object that read a fasta file from disk and (header, seq) pairs +\nex from:\nAB003409.1/96-167\nGGGCCCAUAGCUCAGUGGUAGAGUGCCUCCUUUGCAAGGAGGAUGCCCUGGGUUCGAAUC comment\nCCAGUGGGUCCA\nAB009835.1/1-71\nCAUUAGAUGACUGAAAGCAAGUACUGGUCUCUUAAACCAUUUAAUAGUAAAUacagugcCUU\nCAUUAGAUGACUGAAAGCAAGUACUGGUCUCUUAAACCAUUUAAUAGUAAAUacagugcCUU\nCAUUAGAUGACUGAAAGCAAGUACUGGUCUCUUAAACCAUUUAAUAGUAAAUacagugcCUU\nCAUUAGAUGACUGAAAGCAAGUACUGGUCUCUUAAACCAUUUAAUAGUAAAUacagugcCUU\n\n\n\n\nAJGHDJHASGDJAS khsk skdjfhskdj slkshd skhksjdf\nCACGUAGCAUGCUAGCAUGCUAGCAUGCUAGCUAGCUGAC 276512764523765423764527365427365427542735427\nCAUCGUAGCUAGCUAGCUAGCUACG\nAUCGUAGUAGCUAGCUAGCUAGCUAGC\n\n\n\nyield:\n(AB003409.1/96-167, GGGCCCAUAGCUCAGUGGUAGAGUGCCUCCUUUGCAAGGAGGAUGCCCUGGGUUCGAAUCCCAGUGGGUCCA)\n(AB009835.1/1-71,CAUUAGAUGACUGAAAGCAAGUACUGGUCUCUUAAACCAUUUAAUAGUAAAUacagugcCUUCAUUAGAUGACUGAAAGCAAGUACUGGUCUCUUAAACCAUUUAAUAGUAAAUacagugcCUUCAUUAGAUGACUGAAAGCAAGUACUGGUCUCUUAAACCAUUUAAUAGUAAAUacagugcCUUCAUUAGAUGACUGAAAGCAAGUACUGGUCUCUUAAACCAUUUAAUAGUAAAUacagugcCUU)\n(AJGHDJHASGDJAS khsk skdjfhskdj slkshd skhksjdf, CACGUAGCAUGCUAGCAUGCUAGCAUGCUAGCUAGCUGACCAUCGUAGCUAGCUAGCUAGCUACGAUCGUAGUAGCUAGCUAGCUAGCUAGC)\n\n\nseparately:\n\nmake a function that receives in input the list of sequences, and yields structure graphs (use RNAfold)",
"%matplotlib inline\nimport os, sys\nimport subprocess as sp\nfrom itertools import cycle\nimport networkx as nx\nimport re\nfrom eden.util import display\n\n# read a fasta file separate the head and the sequence\ndef _readFastaFile(file_path=None):\n head_start = '>'\n head = []\n seq = []\n seq_temps = []\n string_seq = '' \n \n #for file in os.listdir(path): #open file\n read_file = open(file_path,'r') \n \n for line in read_file:\n lines = list(line)\n # the read line is the head of the sequence write it in head list\n if lines[0] == head_start:\n line = line.strip('\\n')\n line = line.strip(head_start)\n head.append(line)\n seq.append(string_seq)\n seq_temps = []\n\n # the read line is a sequence write it in a sequence list\n # remove the unwanted charachters and whitespace, tab\n if lines[0] != head_start:\n line = line.strip()\n line = re.sub(r'\\ .*?\\ ', '', line)\n seq_temps.append(line)\n string_seq= ''.join(seq_temps)\n print ('string_seq', string_seq)\n string_seq = re.sub(r' ', '',string_seq) \n seq.append(string_seq)\n #to remove empty head or seq\n seq = filter(None, seq)\n head_seq_zip = zip(head, seq)\n print ('Sequences with comments', head_seq_zip)\n return head_seq_zip\n\nfile_path = \"/home/alsheikm/GitDir/EeDN_work/fasta/test2\"\ndef _sequeceWrapper(file_path=None):\n #path = \"/home/alsheikm/Work/EDeN_examples/fastaFiles/\"\n zip_head_seqs = _readFastaFile(file_path)\n print file_path\n return zip_head_seqs\n \ndef _fold(seq):\n head, seq, struc = _get_sequence_structure(seq)\n #G = self._make_graph(seq, struc)\n return head, seq, struc ",
"Get the sequence structure",
"#call RNAfold to get the sequence structure\ndef _get_sequence_structure(seqs):\n if mode == 'RNAfold':\n return _rnafold_wrapper(seqs)\n else:\n raise Exception('Not known: %s'% self.mode)\n \ndef _rnafold_wrapper(sequence):\n head = sequence[0]\n seq = sequence[1].split()[0]\n flags='--noPS'\n cmd = 'echo \"%s\" | RNAfold %s' % (seq, flags)\n out = sp.check_output(cmd, shell=True)\n #print out\n text = out.strip().split('\\n')\n print ('text:', text)\n seq = text[0]\n struc = text[1].split()[0]\n return head, seq, struc",
"Build the Graph",
"#Recognize basepairs and add them to the generated graph\ndef _make_graph(head, seq, struc):\n print (\"Graph title\", head)\n open_pran = \"(\"\n close_pran = \")\"\n stack_o = []\n stack_c = []\n G = nx.Graph()\n seq_struc_zip = zip(seq, struc)\n #print seq_struc_zip\n for i, k in enumerate(struc):\n G.add_node(i, label = seq[i])\n # connect with the next node\n if i > 0:\n G.add_edge(i-1, i, label= 'x')\n \n # find basepair and connect them\n if struc[i] == open_pran:\n j = i\n stack_o.append(struc[j])\n open_len = len(stack_o)\n\n if struc[i] == close_pran:\n stack_c.append(struc[i])\n stack_o.pop()\n G.add_edge(i, j, label = 'b')\n j = j-1\n\n return G ",
"Experiment",
"#generating the graph\n#seq,seqs are Not correct they do Not take the zipped output\n\nzip_head_seqs= _sequeceWrapper(file_path)\nprint ('zip_head_seqs here', zip_head_seqs)\nfor i, seq in enumerate(zip_head_seqs):\n heads = seq[0]\n seq1 = seq[1]\n mode = 'RNAfold'\n head, seq, struc =_fold(seq)\n G = _make_graph(head, seq, struc)\n display.draw_graph(G, node_size=180, font_size=9, node_border=True, prog='neato')",
"Note"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
seblabbe/MATH2010-Logiciels-mathematiques
|
NotesDeCours/13-boucle-for.ipynb
|
gpl-3.0
|
[
"$$\n\\def\\CC{\\bf C}\n\\def\\QQ{\\bf Q}\n\\def\\RR{\\bf R}\n\\def\\ZZ{\\bf Z}\n\\def\\NN{\\bf N}\n$$\nBoucle for",
"from __future__ import division, print_function # Python 3\nfrom sympy import init_printing\ninit_printing(use_latex='mathjax',use_unicode=False) # Affichage des résultats",
"Dans ce chapitre et les suivants, nous traitons de la programmation en Python. Les notes ici présentent les grandes lignes et les éléments principaux de ce sujet. Le lecteur désirant en savoir plus sera invité à consulter les chapitres 1 à 7 du livre en français de G. Swinnen Apprendre à programmer avec Python 3 [Swinnen]_, le syllabus du cours de programmation de Thierry Massart [Massart]_ ou encore les chapitre 1 à 11 du livre en anglais de Wentworth et al. How to Think Like a Computer Scientist - Learning with Python [Thinklike]_.\nUne boucle permet de faire des tâches répétitives sur un ordinateur avec un moindre effort.\n<img src=\"images/bart_simpson.jpg\" alt=\"image\" width=\"264\" />",
"for a in range(9):",
"La boucle for\nLa boucle for permet aussi de parcourir les éléments d'une liste, une chaîne de caractères ou en général de tout objet itérable:",
"for a in [1,2,3,4]:\n\nfor a in 'bonjour':",
"En Python, une boucle for est identifiée par une ligne d'en-tête commançant par for se terminant par un deux-points : et avec la syntaxe for TRUC in MACHIN:. La convention est de toujours utiliser 4 espaces pour indenter les lignes du bloc d'instructions qui appartient à la boucle:",
"for i in liste: # ligne d'en-tête\n <ligne 1 du bloc d'instruction>\n <ligne 2 du bloc d'instruction>\n ...\n <ligne n du bloc d'instruction>\n<ligne exécutée après la boucle>",
"Le bloc d'instructions est exécuté autant de fois qu'il y a d'éléments dans la liste. Le bloc d'instruction est exécuté une fois pour chaque valeur de la variable i dans la liste.\nUn exemple de boucle for avec Sympy\nSupposons que l'on désire factoriser le polynôme $x^k-1$ pour toutes les valeurs de $k=1,...,9$. En SymPy, il est possible d'écrire onze fois le même calcul où on change la valeur de l'exposant $k$ à chaque fois:",
"from sympy import factor\nfrom sympy.abc import x\nfactor(x**1-1)\n\nfactor(x**2-1)\n\nfactor(x**3-1)\n\nfactor(x**4-1)\n\nfactor(x**5-1)\n\nfactor(x**6-1)\n\nfactor(x**7-1)\n\nfactor(x**8-1)\n\nfactor(x**9-1)",
"La boucle for permet répéter une action pour toutes les valeurs d'une liste. En utilisant une boucle for, l'exemple ci-haut peut se réécrire plus facilement:",
"for k in range(1,12):",
"Pour différencier les lignes, il est possible d'afficher plus d'informations:",
"from sympy import Eq\nfor k in range(2, 10):",
"Affectation d'une variable\nPour affecter une valeur dans une variable, on se rappelle que cela se fait en Python comme en C ou C++ ou Java avec la syntaxe:",
"a = 5",
"La syntaxe a == 5 est réservée pour le test d'égalité.\nMise à jour d'une variable\nQuand une instruction d'affectation est exécutée, l'expression de droite (à savoir l'expression qui vient après le signe = d'affectation) est évaluée en premier. Cela produit une valeur. Ensuite, l'assignation est faite, de sorte que la variable sur le côté gauche se réfère maintenant à la nouvelle valeur.\nL'une des formes les plus courantes de l'affectation est une mise à jour, lorsque la nouvelle valeur de la variable dépend de son ancienne valeur:",
"n = 5\nn = 3 * n + 1",
"Ligne 2 signifie obtenir la valeur courante de n, la multiplier par trois et ajouter un, et affecter la réponse à n. Donc, après avoir exécuté les deux lignes ci-dessus, n va pointer / se référer à l'entier 16.\nSi vous essayez d'obtenir la valeur d'une variable qui n'a jamais été attribuée, vous obtenez une erreur:",
"W = x + 1",
"Avant de pouvoir mettre à jour une variable, vous devez l'initialiser à une valeur de départ, habituellement avec une valeur simple:",
"sous_total = 0\nsous_total = sous_total + 1",
"La mise à jour d'une variable en lui ajoutant 1 à celle-ci est très commune. On appelle cela un incrément de la variable; soustraire 1 est appelé un décrément.\nLe code sous_total = sous_total + 1 calcule le résultat de la partie droite dans un nouvel espace en mémoire et ensuite cette nouvelle valeur est affectée à la variable sous_total. Une façon plus efficace d'incrémenter une variable est de la modifier sans avoir à garder en mémoire un résultat partiel. En Python (comme en C), on peut incrémenter une variable avec l'opérateur +=. Donc, il suffit d'écrire:",
"sous_total += 1",
"Quelques exemples\nL'exemple suivant illustre comment calculer la somme des éléments d'une liste en utilisant une variable s initialisée à zéro avant la boucle:",
"L = [134, 13614, 73467, 1451, 134, 88]\ns = 0\nfor a in L:\n\ns",
"On écrit la même chose en utilisant le signe += pour incrémenter la variable s :",
"s = 0\nfor a in L:\n\ns",
"On vérifie que le calcul est bon:",
"sum(L)",
"L'exemple suivant double chacune des lettres d'une chaîne de caractères:",
"s = 'gaston'\nt = ''\nfor lettre in s:\n\nt",
"Lorsque la variable de la boucle n'est pas utilisée dans le bloc d'instruction la convention est d'utiliser la barre de soulignement (_) pour l'indiquer. Ici, on calcule les puissances du nombre 3. On remarque que l'expression d'assignation k *= 3 est équivalente à k = k * 3 :",
"k = 1\nfor _ in range(10):"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
gon1213/SDC
|
traffic_sign/tensorflow/CarND-TensorFlow-Lab/lab.ipynb
|
gpl-3.0
|
[
"<h1 align=\"center\">TensorFlow Neural Network Lab</h1>\n\n<img src=\"image/notmnist.png\">\nIn this lab, you'll use all the tools you learned from Introduction to TensorFlow to label images of English letters! The data you are using, <a href=\"http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html\">notMNIST</a>, consists of images of a letter from A to J in differents font.\nThe above images are a few examples of the data you'll be training on. After training the network, you will compare your prediction model against test data. Your goal, by the end of this lab, is to make predictions against that test set with at least an 80% accuracy. Let's jump in!\nTo start this lab, you first need to import all the necessary modules. Run the code below. If it runs successfully, it will print \"All modules imported\".",
"import hashlib\nimport os\nimport pickle\nfrom urllib.request import urlretrieve\n\nimport numpy as np\nfrom PIL import Image\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.utils import resample\nfrom tqdm import tqdm\nfrom zipfile import ZipFile\n\nprint('All modules imported.')",
"The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).",
"def download(url, file):\n \"\"\"\n Download file from <url>\n :param url: URL to file\n :param file: Local file path\n \"\"\"\n if not os.path.isfile(file):\n print('Downloading ' + file + '...')\n urlretrieve(url, file)\n print('Download Finished')\n\n# Download the training and test dataset.\ndownload('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')\ndownload('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')\n\n# Make sure the files aren't corrupted\nassert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\\\n 'notMNIST_train.zip file is corrupted. Remove the file and try again.'\nassert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\\\n 'notMNIST_test.zip file is corrupted. Remove the file and try again.'\n\n# Wait until you see that all files have been downloaded.\nprint('All files downloaded.')\n\ndef uncompress_features_labels(file):\n \"\"\"\n Uncompress features and labels from a zip file\n :param file: The zip file to extract the data from\n \"\"\"\n features = []\n labels = []\n\n with ZipFile(file) as zipf:\n # Progress Bar\n filenames_pbar = tqdm(zipf.namelist(), unit='files')\n \n # Get features and labels from all files\n for filename in filenames_pbar:\n # Check if the file is a directory\n if not filename.endswith('/'):\n with zipf.open(filename) as image_file:\n image = Image.open(image_file)\n image.load()\n # Load image data as 1 dimensional array\n # We're using float32 to save on memory space\n feature = np.array(image, dtype=np.float32).flatten()\n\n # Get the the letter from the filename. This is the letter of the image.\n label = os.path.split(filename)[1][0]\n\n features.append(feature)\n labels.append(label)\n return np.array(features), np.array(labels)\n\n# Get the features and labels from the zip files\ntrain_features, train_labels = uncompress_features_labels('notMNIST_train.zip')\ntest_features, test_labels = uncompress_features_labels('notMNIST_test.zip')\n\n# Limit the amount of data to work with a docker container\ndocker_size_limit = 150000\ntrain_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)\n\n# Set flags for feature engineering. This will prevent you from skipping an important step.\nis_features_normal = False\nis_labels_encod = False\n\n# Wait until you see that all features and labels have been uncompressed.\nprint('All features and labels uncompressed.')",
"<img src=\"image/mean_variance.png\" style=\"height: 75%;width: 75%; position: relative; right: 5%\">\nProblem 1\nThe first problem involves normalizing the features for your training and test data.\nImplement Min-Max scaling in the normalize() function to a range of a=0.1 and b=0.9. After scaling, the values of the pixels in the input data should range from 0.1 to 0.9.\nSince the raw notMNIST image data is in grayscale, the current values range from a min of 0 to a max of 255.\nMin-Max Scaling:\n$\nX'=a+{\\frac {\\left(X-X_{\\min }\\right)\\left(b-a\\right)}{X_{\\max }-X_{\\min }}}\n$\nIf you're having trouble solving problem 1, you can view the solution here.",
"# Problem 1 - Implement Min-Max scaling for grayscale image data\ndef normalize_grayscale(image_data):\n \"\"\"\n Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]\n :param image_data: The image data to be normalized\n :return: Normalized image data\n \"\"\"\n # TODO: Implement Min-Max scaling for grayscale image data\n a = 0.1\n b = 0.9\n xmin = 0\n xmax = 255\n return (a + ((image_data - xmin)*(b-a))/(xmax-xmin))\n### DON'T MODIFY ANYTHING BELOW ###\n# Test Cases\nnp.testing.assert_array_almost_equal(\n normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),\n [0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,\n 0.125098039216, 0.128235294118, 0.13137254902, 0.9],\n decimal=3)\nnp.testing.assert_array_almost_equal(\n normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),\n [0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,\n 0.896862745098, 0.9])\n\nif not is_features_normal:\n train_features = normalize_grayscale(train_features)\n test_features = normalize_grayscale(test_features)\n is_features_normal = True\n\nprint('Tests Passed!')\n\nif not is_labels_encod:\n # Turn labels into numbers and apply One-Hot Encoding\n encoder = LabelBinarizer()\n encoder.fit(train_labels)\n train_labels = encoder.transform(train_labels)\n test_labels = encoder.transform(test_labels)\n\n # Change to float32, so it can be multiplied against the features in TensorFlow, which are float32\n train_labels = train_labels.astype(np.float32)\n test_labels = test_labels.astype(np.float32)\n is_labels_encod = True\n\nprint('Labels One-Hot Encoded')\n\nassert is_features_normal, 'You skipped the step to normalize the features'\nassert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'\n\n# Get randomized datasets for training and validation\ntrain_features, valid_features, train_labels, valid_labels = train_test_split(\n train_features,\n train_labels,\n test_size=0.05,\n random_state=832289)\n\nprint('Training features and labels randomized and split.')\n\n# Save the data for easy access\npickle_file = 'notMNIST.pickle'\nif not os.path.isfile(pickle_file):\n print('Saving data to pickle file...')\n try:\n with open('notMNIST.pickle', 'wb') as pfile:\n pickle.dump(\n {\n 'train_dataset': train_features,\n 'train_labels': train_labels,\n 'valid_dataset': valid_features,\n 'valid_labels': valid_labels,\n 'test_dataset': test_features,\n 'test_labels': test_labels,\n },\n pfile, pickle.HIGHEST_PROTOCOL)\n except Exception as e:\n print('Unable to save data to', pickle_file, ':', e)\n raise\n\nprint('Data cached in pickle file.')",
"Checkpoint\nAll your progress is now saved to the pickle file. If you need to leave and comeback to this lab, you no longer have to start from the beginning. Just run the code block below and it will load all the data and modules required to proceed.",
"%matplotlib inline\n\n# Load the modules\nimport pickle\nimport math\n\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\n# Reload the data\npickle_file = 'notMNIST.pickle'\nwith open(pickle_file, 'rb') as f:\n pickle_data = pickle.load(f)\n train_features = pickle_data['train_dataset']\n train_labels = pickle_data['train_labels']\n valid_features = pickle_data['valid_dataset']\n valid_labels = pickle_data['valid_labels']\n test_features = pickle_data['test_dataset']\n test_labels = pickle_data['test_labels']\n del pickle_data # Free up memory\n\n\nprint('Data and modules loaded.')",
"<img src=\"image/weight_biases.png\" style=\"height: 60%;width: 60%; position: relative; right: 10%\">\nProblem 2\nFor the neural network to train on your data, you need the following <a href=\"https://www.tensorflow.org/resources/dims_types.html#data-types\">float32</a> tensors:\n - features\n - Placeholder tensor for feature data (train_features/valid_features/test_features)\n - labels\n - Placeholder tensor for label data (train_labels/valid_labels/test_labels)\n - weights\n - Variable Tensor with random numbers from a truncated normal distribution.\n - See <a href=\"https://www.tensorflow.org/api_docs/python/constant_op.html#truncated_normal\">tf.truncated_normal() documentation</a> for help.\n - biases\n - Variable Tensor with all zeros.\n - See <a href=\"https://www.tensorflow.org/api_docs/python/constant_op.html#zeros\"> tf.zeros() documentation</a> for help.\nIf you're having trouble solving problem 2, review \"TensorFlow Linear Function\" section of the class. If that doesn't help, the solution for this problem is available here.",
"features_count = 784\nlabels_count = 10\n\n# TODO: Set the features and labels tensors\nfeatures = tf.placeholder(tf.float32)\nlabels = tf.placeholder(tf.float32)\n\n# TODO: Set the weights and biases tensors\nweights = tf.Variable(tf.truncated_normal((features_count,labels_count)))\nbiases = tf.Variable(tf.zeros(labels_count))\n\n\n\n### DON'T MODIFY ANYTHING BELOW ###\n\n#Test Cases\nfrom tensorflow.python.ops.variables import Variable\n\nassert features._op.name.startswith('Placeholder'), 'features must be a placeholder'\nassert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'\nassert isinstance(weights, Variable), 'weights must be a TensorFlow variable'\nassert isinstance(biases, Variable), 'biases must be a TensorFlow variable'\n\nassert features._shape == None or (\\\n features._shape.dims[0].value is None and\\\n features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'\nassert labels._shape == None or (\\\n labels._shape.dims[0].value is None and\\\n labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'\nassert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'\nassert biases._variable._shape == (10), 'The shape of biases is incorrect'\n\nassert features._dtype == tf.float32, 'features must be type float32'\nassert labels._dtype == tf.float32, 'labels must be type float32'\n\n# Feed dicts for training, validation, and test session\ntrain_feed_dict = {features: train_features, labels: train_labels}\nvalid_feed_dict = {features: valid_features, labels: valid_labels}\ntest_feed_dict = {features: test_features, labels: test_labels}\n\n# Linear Function WX + b\nlogits = tf.matmul(features, weights) + biases\n\nprediction = tf.nn.softmax(logits)\n\n# Cross entropy\ncross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)\n\n# Training loss\nloss = tf.reduce_mean(cross_entropy)\n\n# Create an operation that initializes all variables\ninit = tf.initialize_all_variables()\n\n# Test Cases\nwith tf.Session() as session:\n session.run(init)\n session.run(loss, feed_dict=train_feed_dict)\n session.run(loss, feed_dict=valid_feed_dict)\n session.run(loss, feed_dict=test_feed_dict)\n biases_data = session.run(biases)\n\nassert not np.count_nonzero(biases_data), 'biases must be zeros'\n\nprint('Tests Passed!')\n\n# Determine if the predictions are correct\nis_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))\n# Calculate the accuracy of the predictions\naccuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))\n\nprint('Accuracy function created.')",
"<img src=\"image/learn_rate_tune.png\" style=\"height: 60%;width: 60%\">\nProblem 3\nBelow are 3 parameter configurations for training the neural network. In each configuration, one of the parameters has multiple options. For each configuration, choose the option that gives the best acccuracy.\nParameter configurations:\nConfiguration 1\n* Epochs: 1\n* Batch Size:\n * 2000\n * 1000\n * 500\n * 300\n * 50\n* Learning Rate: 0.01\nConfiguration 2\n* Epochs: 1\n* Batch Size: 100\n* Learning Rate:\n * 0.8\n * 0.5\n * 0.1\n * 0.05\n * 0.01\nConfiguration 3\n* Epochs:\n * 1\n * 2\n * 3\n * 4\n * 5\n* Batch Size: 100\n* Learning Rate: 0.2\nThe code will print out a Loss and Accuracy graph, so you can see how well the neural network performed.\nIf you're having trouble solving problem 3, you can view the solution here.",
"# TODO: Find the best parameters for each configuration\nepochs = 1\nbatch_size = 100\nlearning_rate = 0.1\n\n\n\n### DON'T MODIFY ANYTHING BELOW ###\n# Gradient Descent\noptimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) \n\n# The accuracy measured against the validation set\nvalidation_accuracy = 0.0\n\n# Measurements use for graphing loss and accuracy\nlog_batch_step = 50\nbatches = []\nloss_batch = []\ntrain_acc_batch = []\nvalid_acc_batch = []\n\nwith tf.Session() as session:\n session.run(init)\n batch_count = int(math.ceil(len(train_features)/batch_size))\n\n for epoch_i in range(epochs):\n \n # Progress bar\n batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')\n \n # The training cycle\n for batch_i in batches_pbar:\n # Get a batch of training features and labels\n batch_start = batch_i*batch_size\n batch_features = train_features[batch_start:batch_start + batch_size]\n batch_labels = train_labels[batch_start:batch_start + batch_size]\n\n # Run optimizer and get loss\n _, l = session.run(\n [optimizer, loss],\n feed_dict={features: batch_features, labels: batch_labels})\n\n # Log every 50 batches\n if not batch_i % log_batch_step:\n # Calculate Training and Validation accuracy\n training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)\n validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)\n\n # Log batches\n previous_batch = batches[-1] if batches else 0\n batches.append(log_batch_step + previous_batch)\n loss_batch.append(l)\n train_acc_batch.append(training_accuracy)\n valid_acc_batch.append(validation_accuracy)\n\n # Check accuracy against Validation data\n validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)\n\nloss_plot = plt.subplot(211)\nloss_plot.set_title('Loss')\nloss_plot.plot(batches, loss_batch, 'g')\nloss_plot.set_xlim([batches[0], batches[-1]])\nacc_plot = plt.subplot(212)\nacc_plot.set_title('Accuracy')\nacc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')\nacc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')\nacc_plot.set_ylim([0, 1.0])\nacc_plot.set_xlim([batches[0], batches[-1]])\nacc_plot.legend(loc=4)\nplt.tight_layout()\nplt.show()\n\nprint('Validation accuracy at {}'.format(validation_accuracy))",
"Test\nSet the epochs, batch_size, and learning_rate with the best learning parameters you discovered in problem 3. You're going to test your model against your hold out dataset/testing data. This will give you a good indicator of how well the model will do in the real world. You should have a test accuracy of at least 80%.",
"# TODO: Set the epochs, batch_size, and learning_rate with the best parameters from problem 3\nepochs = 10\nbatch_size = 200\nlearning_rate = 0.01\n\n\n\n### DON'T MODIFY ANYTHING BELOW ###\n# The accuracy measured against the test set\ntest_accuracy = 0.0\n\nwith tf.Session() as session:\n \n session.run(init)\n batch_count = int(math.ceil(len(train_features)/batch_size))\n\n for epoch_i in range(epochs):\n \n # Progress bar\n batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')\n \n # The training cycle\n for batch_i in batches_pbar:\n # Get a batch of training features and labels\n batch_start = batch_i*batch_size\n batch_features = train_features[batch_start:batch_start + batch_size]\n batch_labels = train_labels[batch_start:batch_start + batch_size]\n\n # Run optimizer\n _ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})\n\n # Check accuracy against Test data\n test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)\n\n\nassert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)\nprint('Nice Job! Test Accuracy is {}'.format(test_accuracy))",
"Multiple layers\nGood job! You built a one layer TensorFlow network! However, you want to build more than one layer. This is deep learning after all! In the next section, you will start to satisfy your need for more layers."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
diego0020/va_course_2015
|
AstroML/notebooks/04_iris_clustering.ipynb
|
mit
|
[
"Clustering of Iris Data\nClustering is the task of gathering samples into groups of similar\nsamples according to some predefined similarity or dissimilarity\nmeasure (such as the Euclidean distance).\nLet's re-use the results of the 2D PCA of the iris dataset in order to\nexplore clustering. First we need to repeat some of the code from the\nprevious notebook",
"# make sure ipython inline mode is activated\n%pylab inline\n\n# all of this is taken from the notebook '03_iris_dimensionality.ipynb' \nfrom sklearn.datasets import load_iris\nfrom sklearn.decomposition import PCA\nimport pylab as pl\nfrom itertools import cycle\n\niris = load_iris()\nX = iris.data\ny = iris.target\n\npca = PCA(n_components=2, whiten=True).fit(X)\nX_pca = pca.transform(X)\n\ndef plot_2D(data, target, target_names):\n colors = cycle('rgbcmykw')\n target_ids = range(len(target_names))\n pl.figure()\n for i, c, label in zip(target_ids, colors, target_names):\n pl.scatter(data[target == i, 0], data[target == i, 1],\n c=c, label=label)\n pl.legend()",
"Now we will use one of the simplest clustering algorithms, K-means.\nThis is an iterative algorithm which searches for three cluster\ncenters such that the distance from each point to its cluster is\nminimizied.",
"from sklearn.cluster import KMeans\nfrom numpy.random import RandomState\nrng = RandomState(42)\n\nkmeans = KMeans(n_clusters=3, random_state=rng).fit(X_pca)\n\nimport numpy as np\nnp.round(kmeans.cluster_centers_, decimals=2)\n\nkmeans.labels_[:10]\n\nkmeans.labels_[-10:]",
"The K-means algorithm has been used to infer cluster labels for the\npoints. Let's call the plot_2D function again, but color the points\nbased on the cluster labels rather than the iris species.",
"plot_2D(X_pca, kmeans.labels_, [\"c0\", \"c1\", \"c2\"])\n\nplot_2D(X_pca, iris.target, iris.target_names)",
"Exercise\nPerform the K-Means cluster search again, but this time learn the\nclusters using the full data matrix X, rather than the projected\nmatrix X_pca. Does this change the results? Do these labels\nlook closer to the true labels?\nThe K-Means algorithm depends on the random initial placements of the first centroids. In the example you'll always obtain the same placement because the random state is fixed with the command rng = RandomState(42).\nRepeat a few times the K-Means cluster search with a true random state and compare the results. Share your thoughts about the results."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
rohinkumar/galsurveystudy
|
DR12Q/DR12Q_correl_V01_LCDMr2.ipynb
|
mit
|
[
"Correlation function of DR12Q SDSS Catalog\nFirst import all the modules such as healpy and astropy needed for analyzing the structure",
"import healpix_util as hu\nimport astropy as ap\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.table import Table\nimport astropy.io.ascii as ascii\nfrom astropy.io import fits\nfrom astropy.constants import c\nimport matplotlib.pyplot as plt\nimport math as m\nfrom math import pi\n#from scipy.constants import c\nimport scipy.special as sp\nfrom astroML.decorators import pickle_results\nfrom scipy import integrate\nimport warnings\nfrom sklearn.neighbors import BallTree\nimport pickle\nimport multiprocessing as mp\nimport time\nfrom lcdmmetric import *\nfrom progressbar import *\nfrom tqdm import *\nfrom functools import partial\nimport pymangle\nfrom scipy.optimize import curve_fit\n#from astroML.datasets import fetch_sdss_specgals\n#from astroML.correlation import bootstrap_two_point_angular\n%matplotlib inline\n\ndr12q=fits.open(\"./input/DR12Q.fits\")\n\ndr12q\n\ndr12qdat=dr12q[1].data\n\ndr12qdat.columns\n\nz=dr12qdat['Z_PIPE']\n\nra=dr12qdat['RA']\n\ndec=dr12qdat['DEC']\n\nfdata = open(\"./output/DR12Q.dat\",'w')\nfdata.write(\"z\\t ra\\t dec \\n\")\nfor i in range(0,len(ra)-1):\n fdata.write(\"%f\\t\" %z[i])\n fdata.write(\"%f\\t\" %ra[i])\n fdata.write(\"%f\\n\" %dec[i])\nfdata.close()\n\nz\n\nra,dec\n\ndat=np.array([z,ra,dec])\n\ndat\n\ndat=dat.transpose()\n\ndat",
"Read the data file (taken from http://cosmo.nyu.edu/~eak306/SDSS-LRG.html ) converted to ascii with comoving distance etc. in V01 reading from pkl files for faster read",
"# Saving the objects:\nwith open('datDR12Q.pkl', 'w') as f: # Python 3: open(..., 'wb')\n pickle.dump(dat, f)\n\n# Getting back the objects:\nwith open('datDR12Q.pkl') as f: # Python 3: open(..., 'rb')\n dat = pickle.load(f)\ndat\n\nEz = lambda x: 1/m.sqrt(0.3*(1+x)**3+0.7)\n\nnp.vectorize(Ez)\n#Calculate comoving distance of a data point using the Redshift - This definition is based on the cosmology model we take. Here the distance for E-dS universe is considered. Also note that c/H0 ratio is cancelled in the equations and hence not taken.\n\ndef DC_LCDM(z):\n return integrate.quad(Ez, 0, z)[0]\nDC_LCDM=np.vectorize(DC_LCDM)\n\ndr12f = open(\"./output/DR12Qsrarf.dat\",'w')\ndr12f.write(\"z\\t ra\\t dec\\t s\\t rar\\t decr \\n\")\n\nfor i in range(0,len(z)):\n dr12f.write(\"%f\\t \" %z[i])\n dr12f.write(\"%f\\t %f\\t \" %(ra[i],dec[i]))\n dr12f.write(\"%f\\t \" %DC_LCDM(z[i]))\n dr12f.write(\"%f\\t %f\\n \" %(ra[i]*pi/180.0,dec[i]*pi/180.0))\ndr12f.close()\n\ndr12Qdat=ascii.read(\"./output/DR12Qsrarf.dat\")\n\ns=dr12Qdat['s']\nrar=dr12Qdat['ra']\ndecr=dr12Qdat['dec']\n\ndat=np.array([s,rar,decr])\n\ndat\n\ndat=dat.transpose()\n\ndat",
"Read the data file (taken from http://cosmo.nyu.edu/~eak306/SDSS-LRG.html ) converted to ascii with comoving distance etc. in V01 reading from pkl files for faster read",
"# Saving the objects:\nwith open('datDR12Q.pkl', 'w') as f: # Python 3: open(..., 'wb')\n pickle.dump(dat, f)\n\n# Getting back the objects:\nwith open('datDR12Q.pkl') as f: # Python 3: open(..., 'rb')\n dat = pickle.load(f)\ndat\n\nbins=np.arange(0.,0.08,0.005)\n\nprint bins\n\nbinsq=bins**2\n\nbinsq\n\nlen(dat)\n\nLCDMmetricsq(dat[0],dat[1])\n\n%%time\nBTD = BallTree(dat,metric='pyfunc',func=LCDMmetricsq,leaf_size=5) \n\nwith open('BTDDR12QLCDM.pkl', 'w') as f:\n pickle.dump(BTD,f)\n\nwith open('BTDDR12QLCDM.pkl') as f:\n BTD = pickle.load(f)\n \nBTD\n\n%%time\nstart_time=time.time()\ncounts_DD=BTD.two_point_correlation(dat,binsq)\nprint counts_DD\nend_time=time.time()\ntottime=end_time-start_time\nprint \"Total run time:\"\nprint tottime\n\nwith open('BTD12QcDDLCDM.pkl', 'w') as f:\n pickle.dump(counts_DD,f)\n\nwith open('BTD12QcDDLCDM.pkl') as f:\n counts_DD = pickle.load(f)\n \ncounts_DD\n\nDD=np.diff(counts_DD)\n\nDD\n\nplt.plot(bins[1:len(bins)],DD,'ro-')",
"BallTree.two_point_correlation works almost 10 times faster! with leaf_size=5 Going with it to the random catalog",
"dataR=ascii.read(\"./output/rand200kDR12Q.dat\")\n\ndataR\n\nlen(dataR)\n\nlen(dat)\n\nrdr12f = open(\"./output/DR12Qsrarf.dat\",'w')\nrdr12f.write(\"z\\t ra\\t dec\\t s\\t rar\\t decr \\n\")\n\nfor i in range(0,len(dataR)):\n rdr12f.write(\"%f\\t \" %dataR['z'][i])\n rdr12f.write(\"%f\\t %f\\t \" %(dataR['ra'][i],dataR['dec'][i]))\n rdr12f.write(\"%f\\t \" %DC_LCDM(dataR['z'][i]))\n rdr12f.write(\"%f\\t %f\\n \" %((dataR['ra'][i]*pi)/180.0,(dataR['dec'][i]*pi)/180.0))\nrdr12f.close()\n\ndatR=ascii.read(\"./output/DR12Qsrarf.dat\")\n\ndatR\n\nrs=np.array(datR['s'])\nrrar=np.array(datR['rar'])\nrdecr=np.array(datR['decr'])\n\ndatR=np.array([rs,rrar,rdecr])\n\ndatR\n\ndatR.reshape(3,len(dataR))\n\ndatR=datR.transpose()\n\ndatR\n\n# Saving the objects:\nwith open('./output/rDR12Qsrarf.pkl', 'w') as f: # Python 3: open(..., 'wb')\n pickle.dump(datR, f)\n\n# Getting back the objects:\nwith open('./output/rDR12Qsrarf.pkl') as f: # Python 3: open(..., 'rb')\n datR = pickle.load(f)\ndatR\n\n%%time\nBT_R2 = BallTree(datR,metric='pyfunc',func=LCDMmetricsq,leaf_size=5) \n\nwith open('./output/BTRDR12QLCDM.pkl', 'w') as f:\n pickle.dump(BT_R2,f)\n\nwith open('./output/BTRDR12QLCDM.pkl') as f:\n BTR = pickle.load(f)\nBTR\n\n%%time\nstart_time=time.time()\ncounts_RR=BTR.two_point_correlation(datR,bins)\nprint counts_RR\nend_time=time.time()\ntottime=end_time-start_time\nprint \"Total run time:\"\nprint tottime\n\nwith open('./output/BTRDR12QcRRLCDM.pkl', 'w') as f:\n pickle.dump(counts_RR,f)\n\nwith open('./output/BTRDR12QcRRLCDM.pkl') as f:\n counts_RR = pickle.load(f)\n \ncounts_RR\n\ncounts_RR\n\nRR=np.diff(counts_RR)\n\nRR\n\nplt.plot(bins[1:len(bins)],RR,'bo-')\n\nRR_zero = (RR == 0)\nRR[RR_zero] = 1\n\n%%time\nstart_time=time.time()\ncounts_DR=BTR.two_point_correlation(dat,bins)\nprint counts_DR\nend_time=time.time()\ntottime=end_time-start_time\nprint \"Total run time:\"\nprint tottime\n\nwith open('./output/BTRDR12QcDRLCDM.pkl', 'w') as f:\n pickle.dump(counts_DR,f)\n\nwith open('./output/BTRDR12QcDRLCDM.pkl') as f:\n counts_DR = pickle.load(f)\n \ncounts_DR\n\nDR=np.diff(counts_DR)\n\ncorrells=(4.0 * DD - 4.0 * DR + RR) / RR\n\nDR\n\ncorrells\n\nplt.plot(bins[1:len(bins)],corrells,'go-')\n\nplt.plot(bins[1:len(bins)],bins[1:len(bins)]*bins[1:len(bins)]*corrells*(c*1e-5)**2,'go-')\n\nplt.plot(bins[2:len(bins)],bins[2:len(bins)]*bins[2:len(bins)]*corrells[1:len(bins)]*(c*1e-5)**2,'go-')\n\nplt.plot(bins[2:len(bins)],corrells[1:len(bins)],'go-')\n\nplt.plot(bins[2:len(bins)],corrells[1:len(bins)],'go-')\nplt.savefig(\"correlDR12Qls.pdf\")\n\nplt.plot(bins[2:len(bins)]*c/1e5,corrells[1:len(bins)],'bo-')\nplt.savefig(\"correl12Q1ls.pdf\")\n\nplt.yscale('log')\nplt.plot(bins[1:len(bins)]*c/1e5,corrells,'bo-')\nplt.savefig(\"correllsfiglogDR12Q.pdf\")\n\nplt.yscale('log')\nplt.plot(bins[2:len(bins)]*c/1e5,corrells[1:len(bins)],'ro-')\nplt.savefig(\"correllslog2xDR12Q.pdf\")\n\nplt.yscale('log')\nplt.xscale('log')\nplt.plot(bins[1:len(bins)]*c/1e5,corrells,'bo-')\nplt.savefig(\"correllsloglogDR12Q.pdf\")"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
GoogleCloudPlatform/healthcare
|
datathon/nusdatathon18/tutorials/image_preprocessing.ipynb
|
apache-2.0
|
[
"Copyright 2018 Google Inc.\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at\n\nhttps://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\nImage Preprocessing\nIn this tutorial, we are going to use the Pillow python lirbrary to show how to apply basic transformations on images. You can safely skip this tutorial if you are already familiar with Pillow.\nFirst of all, let's import all the libraries we need.",
"from google.colab import files\nfrom io import BytesIO\n# Display images.\nfrom IPython.display import display\nfrom PIL import Image, ImageEnhance",
"Next, let's upload a PNG image which we will apply all kinds of transformations on, and resize it to 500x500.",
"# Please assign the real file name of the image to image_name.\nimage_name = ''\n\nuploaded_files = files.upload()\n\nsize = (500, 500) # (width, height)\nimage = Image.open(BytesIO(uploaded_files[image_name])).resize(size)\n\ndisplay(image)",
"Now that we have the image uploaded, let's try rotate the image by 90 degrees cunter-clockwise.",
"image = image.transpose(Image.ROTATE_90)\n\n\ndisplay(image)",
"Now let's flip the image horizontally.",
"image = image.transpose(Image.FLIP_LEFT_RIGHT)\n\ndisplay(image)",
"As a next step, let's adjust the contrast of the image. The base value is 1 and here we are increasing it by 20%.",
"contrast = ImageEnhance.Contrast(image)\nimage = contrast.enhance(1.2)\n\ndisplay(image)",
"And brightness and sharpness.",
"brightness = ImageEnhance.Brightness(image)\nimage = brightness.enhance(1.1)\n\ndisplay(image)\n\nsharpness = ImageEnhance.Sharpness(image)\nimage = sharpness.enhance(1.2)\n\ndisplay(image)",
"There are a whole lot more transformations we can make on images, please take a look at the official documentation if you'd like to know more."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ES-DOC/esdoc-jupyterhub
|
notebooks/csiro-bom/cmip6/models/sandbox-1/toplevel.ipynb
|
gpl-3.0
|
[
"ES-DOC CMIP6 Model Properties - Toplevel\nMIP Era: CMIP6\nInstitute: CSIRO-BOM\nSource ID: SANDBOX-1\nSub-Topics: Radiative Forcings. \nProperties: 85 (42 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:53:56\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook",
"# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'csiro-bom', 'sandbox-1', 'toplevel')",
"Document Authors\nSet document authors",
"# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Contributors\nSpecify document contributors",
"# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Publication\nSpecify document publication status",
"# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)",
"Document Table of Contents\n1. Key Properties\n2. Key Properties --> Flux Correction\n3. Key Properties --> Genealogy\n4. Key Properties --> Software Properties\n5. Key Properties --> Coupling\n6. Key Properties --> Tuning Applied\n7. Key Properties --> Conservation --> Heat\n8. Key Properties --> Conservation --> Fresh Water\n9. Key Properties --> Conservation --> Salt\n10. Key Properties --> Conservation --> Momentum\n11. Radiative Forcings\n12. Radiative Forcings --> Greenhouse Gases --> CO2\n13. Radiative Forcings --> Greenhouse Gases --> CH4\n14. Radiative Forcings --> Greenhouse Gases --> N2O\n15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3\n16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3\n17. Radiative Forcings --> Greenhouse Gases --> CFC\n18. Radiative Forcings --> Aerosols --> SO4\n19. Radiative Forcings --> Aerosols --> Black Carbon\n20. Radiative Forcings --> Aerosols --> Organic Carbon\n21. Radiative Forcings --> Aerosols --> Nitrate\n22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect\n23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect\n24. Radiative Forcings --> Aerosols --> Dust\n25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic\n26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic\n27. Radiative Forcings --> Aerosols --> Sea Salt\n28. Radiative Forcings --> Other --> Land Use\n29. Radiative Forcings --> Other --> Solar \n1. Key Properties\nKey properties of the model\n1.1. Model Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nTop level overview of coupled model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.2. Model Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nName of coupled model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"2. Key Properties --> Flux Correction\nFlux correction properties of the model\n2.1. Details\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe if/how flux corrections are applied in the model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.flux_correction.details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3. Key Properties --> Genealogy\nGenealogy and history of the model\n3.1. Year Released\nIs Required: TRUE Type: STRING Cardinality: 1.1\nYear the model was released",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3.2. CMIP3 Parent\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCMIP3 parent if any",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3.3. CMIP5 Parent\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCMIP5 parent if any",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3.4. Previous Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nPreviously known as",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4. Key Properties --> Software Properties\nSoftware properties of model\n4.1. Repository\nIs Required: FALSE Type: STRING Cardinality: 0.1\nLocation of code for this component.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.2. Code Version\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCode version identifier.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.3. Code Languages\nIs Required: FALSE Type: STRING Cardinality: 0.N\nCode language(s).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.4. Components Structure\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe how model realms are structured into independent software components (coupled via a coupler) and internal software components.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.5. Coupler\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nOverarching coupling framework for model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OASIS\" \n# \"OASIS3-MCT\" \n# \"ESMF\" \n# \"NUOPC\" \n# \"Bespoke\" \n# \"Unknown\" \n# \"None\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"5. Key Properties --> Coupling\n**\n5.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of coupling in the model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5.2. Atmosphere Double Flux\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs the atmosphere passing a double flux to the ocean and sea ice (as opposed to a single one)?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"5.3. Atmosphere Fluxes Calculation Grid\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nWhere are the air-sea fluxes calculated",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Atmosphere grid\" \n# \"Ocean grid\" \n# \"Specific coupler grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"5.4. Atmosphere Relative Winds\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nAre relative or absolute winds used to compute the flux? I.e. do ocean surface currents enter the wind stress calculation?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"6. Key Properties --> Tuning Applied\nTuning methodology for model\n6.1. Description\nIs Required: TRUE Type: STRING Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics/diagnostics retained. Document the relative weight given to climate performance metrics/diagnostics versus process oriented metrics/diagnostics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.2. Global Mean Metrics Used\nIs Required: FALSE Type: STRING Cardinality: 0.N\nList set of metrics/diagnostics of the global mean state used in tuning model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.3. Regional Metrics Used\nIs Required: FALSE Type: STRING Cardinality: 0.N\nList of regional metrics/diagnostics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.4. Trend Metrics Used\nIs Required: FALSE Type: STRING Cardinality: 0.N\nList observed trend metrics/diagnostics used in tuning model/component (such as 20th century)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.5. Energy Balance\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe how energy balance was obtained in the full system: in the various components independently or at the components coupling stage?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.6. Fresh Water Balance\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe how fresh_water balance was obtained in the full system: in the various components independently or at the components coupling stage?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7. Key Properties --> Conservation --> Heat\nGlobal heat convervation properties of the model\n7.1. Global\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe if/how heat is conserved globally",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.2. Atmos Ocean Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how heat is conserved at the atmosphere/ocean coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.3. Atmos Land Interface\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe if/how heat is conserved at the atmosphere/land coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.4. Atmos Sea-ice Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how heat is conserved at the atmosphere/sea-ice coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.5. Ocean Seaice Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how heat is conserved at the ocean/sea-ice coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.6. Land Ocean Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how heat is conserved at the land/ocean coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8. Key Properties --> Conservation --> Fresh Water\nGlobal fresh water convervation properties of the model\n8.1. Global\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe if/how fresh_water is conserved globally",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.2. Atmos Ocean Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how fresh_water is conserved at the atmosphere/ocean coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.3. Atmos Land Interface\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe if/how fresh water is conserved at the atmosphere/land coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.4. Atmos Sea-ice Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how fresh water is conserved at the atmosphere/sea-ice coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.5. Ocean Seaice Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how fresh water is conserved at the ocean/sea-ice coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.6. Runoff\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe how runoff is distributed and conserved",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.7. Iceberg Calving\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how iceberg calving is modeled and conserved",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.8. Endoreic Basins\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how endoreic basins (no ocean access) are treated",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.9. Snow Accumulation\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe how snow accumulation over land and over sea-ice is treated",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"9. Key Properties --> Conservation --> Salt\nGlobal salt convervation properties of the model\n9.1. Ocean Seaice Interface\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how salt is conserved at the ocean/sea-ice coupling interface",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"10. Key Properties --> Conservation --> Momentum\nGlobal momentum convervation properties of the model\n10.1. Details\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how momentum is conserved in the model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"11. Radiative Forcings\nRadiative forcings of the model for historical and scenario (aka Table 12.1 IPCC AR5)\n11.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of radiative forcings (GHG and aerosols) implementation in model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"12. Radiative Forcings --> Greenhouse Gases --> CO2\nCarbon dioxide forcing\n12.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"12.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"13. Radiative Forcings --> Greenhouse Gases --> CH4\nMethane forcing\n13.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"14. Radiative Forcings --> Greenhouse Gases --> N2O\nNitrous oxide forcing\n14.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3\nTroposheric ozone forcing\n15.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"15.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3\nStratospheric ozone forcing\n16.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"16.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"17. Radiative Forcings --> Greenhouse Gases --> CFC\nOzone-depleting and non-ozone-depleting fluorinated gases forcing\n17.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"17.2. Equivalence Concentration\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nDetails of any equivalence concentrations used",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"Option 1\" \n# \"Option 2\" \n# \"Option 3\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"17.3. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"18. Radiative Forcings --> Aerosols --> SO4\nSO4 aerosol forcing\n18.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"18.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"19. Radiative Forcings --> Aerosols --> Black Carbon\nBlack carbon aerosol forcing\n19.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"19.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"20. Radiative Forcings --> Aerosols --> Organic Carbon\nOrganic carbon aerosol forcing\n20.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"20.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"21. Radiative Forcings --> Aerosols --> Nitrate\nNitrate forcing\n21.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"21.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect\nCloud albedo effect forcing (RFaci)\n22.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"22.2. Aerosol Effect On Ice Clouds\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nRadiative effects of aerosols on ice clouds are represented?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"22.3. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect\nCloud lifetime effect forcing (ERFaci)\n23.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"23.2. Aerosol Effect On Ice Clouds\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nRadiative effects of aerosols on ice clouds are represented?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"23.3. RFaci From Sulfate Only\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nRadiative forcing from aerosol cloud interactions from sulfate aerosol only?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"23.4. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"24. Radiative Forcings --> Aerosols --> Dust\nDust forcing\n24.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"24.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic\nTropospheric volcanic forcing\n25.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25.2. Historical Explosive Volcanic Aerosol Implementation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in historical simulations",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25.3. Future Explosive Volcanic Aerosol Implementation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in future simulations",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25.4. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic\nStratospheric volcanic forcing\n26.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"26.2. Historical Explosive Volcanic Aerosol Implementation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in historical simulations",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"26.3. Future Explosive Volcanic Aerosol Implementation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in future simulations",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"26.4. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"27. Radiative Forcings --> Aerosols --> Sea Salt\nSea salt forcing\n27.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"27.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"28. Radiative Forcings --> Other --> Land Use\nLand use forcing\n28.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"28.2. Crop Change Only\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nLand use change represented via crop change only?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"28.3. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"29. Radiative Forcings --> Other --> Solar\nSolar forcing\n29.1. Provision\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nHow solar forcing is provided",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"irradiance\" \n# \"proton\" \n# \"electron\" \n# \"cosmic ray\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"29.2. Additional Information\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"©2017 ES-DOC"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
yaoxx151/UCSB_Boot_Camp_copy
|
Day05_GraphAlgorithms1/notebooks/02 - Visualization.ipynb
|
cc0-1.0
|
[
"This is one of the 100 recipes of the IPython Cookbook, the definitive guide to high-performance scientific computing and data science in Python.\n\n6.4. Visualizing a NetworkX graph in the IPython notebook with d3.js\n\nLet's import the packages.",
"import json\nimport numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"We load a famous social graph published in 1977, called Zachary's Karate club graph. This graph represents the friendships between members of a Karate Club. The club's president and the instructor were involved in a dispute, resulting in a split-up of this group. Here, we simply display the graph with matplotlib (using networkx.draw()).",
"g = nx.karate_club_graph()\nplt.figure(figsize=(6,4));\nnx.draw(g)",
"Now, we're going to display this graph in the notebook with d3.js. The first step is to bring this graph to Javascript. We choose here to export the graph in JSON. Note that d3.js generally expects each edge to be an object with a source and a target. Also, we specify which side each member has taken (club attribute).",
"from networkx.readwrite import json_graph\ndata = json_graph.node_link_data(g)\nwith open('graph.json', 'w') as f:\n json.dump(data, f, indent=4)",
"The next step is to create an HTML object that will contain the visualization. Here, we create a <div> element in the notebook. We also specify a few CSS styles for nodes and links (also called edges).",
"%%html\n<div id=\"d3-example\"></div>\n<style>\n.node {stroke: #fff; stroke-width: 1.5px;}\n.link {stroke: #999; stroke-opacity: .6;}\n</style>",
"The last step is trickier. We write the Javascript code to load the graph from the JSON file, and display it with d3.js. Knowing the basics of d3.js is required here (see the documentation of d3.js). We also give detailled explanations in the code comments below. (http://d3js.org)",
"%%javascript\n// We load the d3.js library from the Web.\nrequire.config({paths: {d3: \"http://d3js.org/d3.v3.min\"}});\nrequire([\"d3\"], function(d3) {\n // The code in this block is executed when the \n // d3.js library has been loaded.\n \n // First, we specify the size of the canvas containing\n // the visualization (size of the <div> element).\n var width = 300,\n height = 300;\n\n // We create a color scale.\n var color = d3.scale.category10();\n\n // We create a force-directed dynamic graph layout.\n var force = d3.layout.force()\n .charge(-120)\n .linkDistance(30)\n .size([width, height]);\n\n // In the <div> element, we create a <svg> graphic\n // that will contain our interactive visualization.\n var svg = d3.select(\"#d3-example\").select(\"svg\")\n if (svg.empty()) {\n svg = d3.select(\"#d3-example\").append(\"svg\")\n .attr(\"width\", width)\n .attr(\"height\", height);\n }\n \n // We load the JSON file.\n d3.json(\"graph.json\", function(error, graph) {\n // In this block, the file has been loaded\n // and the 'graph' object contains our graph.\n \n // We load the nodes and links in the force-directed\n // graph.\n force.nodes(graph.nodes)\n .links(graph.links)\n .start();\n\n // We create a <line> SVG element for each link\n // in the graph.\n var link = svg.selectAll(\".link\")\n .data(graph.links)\n .enter().append(\"line\")\n .attr(\"class\", \"link\");\n\n // We create a <circle> SVG element for each node\n // in the graph, and we specify a few attributes.\n var node = svg.selectAll(\".node\")\n .data(graph.nodes)\n .enter().append(\"circle\")\n .attr(\"class\", \"node\")\n .attr(\"r\", 5) // radius\n .style(\"fill\", function(d) {\n // The node color depends on the club.\n return color(d.club); \n })\n .call(force.drag);\n\n // The name of each node is the node number.\n node.append(\"title\")\n .text(function(d) { return d.name; });\n\n // We bind the positions of the SVG elements\n // to the positions of the dynamic force-directed graph,\n // at each time step.\n force.on(\"tick\", function() {\n link.attr(\"x1\", function(d) { return d.source.x; })\n .attr(\"y1\", function(d) { return d.source.y; })\n .attr(\"x2\", function(d) { return d.target.x; })\n .attr(\"y2\", function(d) { return d.target.y; });\n\n node.attr(\"cx\", function(d) { return d.x; })\n .attr(\"cy\", function(d) { return d.y; });\n });\n });\n});",
"When we execute this cell, the HTML object created in the previous cell is updated. The graph is animated and interactive: we can click on nodes, see their labels, and move them within the canvas.\n\nYou'll find all the explanations, figures, references, and much more in the book (to be released later this summer).\nIPython Cookbook, by Cyrille Rossant, Packt Publishing, 2014 (400 pages). Get a 50% discount by pre-ordering now with the code mK00gPxQM (time-limited offer)!"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
kimkipyo/dss_git_kkp
|
통계, 머신러닝 복습/160517화_4일차_시각화 Visualization/4.웹 플롯을 위한 bokeh 패키지 소개.ipynb
|
mit
|
[
"웹 플롯을 위한 bokeh 패키지 소개\nBokeh ( http://bokeh.pydata.org )는 웹사이트에서 현대적인 웹 기반 인터랙티브 플롯을 제공하는 것을 목표로 만든 파이썬 라이브러리이다. \n여기에서는 \"파이썬을 활용한 금융분석(Python for Finance) 제14장의 예제 코드를 기반으로 bokeh 패키지의 기능을 간단히 소개한다\n자료의 준비\n우선 예제로 사용할 주식 가격 자료를 인터넷에서 다운받는다.",
"# import urllib\n\nurl = 'http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2009'\ndata = pd.read_csv(url, parse_dates=['Date'])",
"Bokeh 라이브러리 임포트\nBokeh는 원래 정적인 웹사이트로 렌더링(2차원의 화상에 광원·위치·색상 등 외부의 정보를 고려하여 사실감을 불어넣어, 3차원 화상을 만드는 과정을 뜻하는 컴퓨터그래픽스 용어) 할 html 파일을 출력하는 것을 목표로 한다. 따라서 출력을 저장할 html 파일 패스를 지정해야 한다. 만약 주피터 노트북에서 작업한다면 다음과 같이 output_notebook 명령을 실행해야 한다.",
"import bokeh.plotting as bp\n\n# 주피터 노트북이 아닌 파일로 출력하는 경우\n# bp.output_file(\"../images/msft_1.html\", title=\"Bokeh Example (Static)\")\n\n# 주피터 노트북에서 실행하여 출력하는 경우\nbp.output_notebook()",
"플롯팅\n이제 플롯을 위한 준비 작업을 완료하였다. 우선 figure 명령으로 Figure 클래스 객체를 생성해야한다. 이를 p라는 변수에 저장하자.\n\nhttp://bokeh.pydata.org/en/latest/docs/reference/plotting.html#bokeh.plotting.figure.figure",
"p = bp.figure(title='Historical Stock Quotes', # 플롯 제목\n x_axis_type = 'datetime', # x 축은 날짜 정보\n tools = '')",
"다음으로 Figure 클래스의 메서드를 호출하여 실제 플롯 객체를 추가한다. 우선 라인 플롯을 그리기 위해 line 메서드을 실행한다.\n\nhttp://bokeh.pydata.org/en/latest/docs/reference/plotting.html#bokeh.plotting.figure.Figure.line",
"p.line(\n data['Date'], # x 좌표\n data['Close'], # y 좌표\n color ='#0066cc', # 선 색상\n legend ='MSFT', # 범례 이름\n)",
"이제 show 명령어를 호출하여 실제 차트를 렌더링 한다.",
"bp.show(p)",
"상호작용 툴 추가하기\n만약 차트에 상호작용을 위한 툴을 추가하고 싶다면 Figure 객체 생성시 tools 인수를 설정한다.",
"p = bp.figure(title='Historical Stock Quotes', # 플롯 제목\n x_axis_type ='datetime', # x 축은 날짜 정보\n tools = 'pan, wheel_zoom, box_zoom, reset, previewsave')\n\np.line(\n data['Date'], # x 좌표\n data['Close'], # y 좌표\n color ='#0066cc', # 선 색상\n legend ='MSFT', # 범례 이름\n)\nbp.show(p)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
darcamo/pyphysim
|
ipython_notebooks/ZadoffchuChannelEstimation.ipynb
|
gpl-2.0
|
[
"%matplotlib inline\n\nimport sys\nsys.path.append('../')\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nfrom pyphysim.reference_signals.zadoffchu import calcBaseZC, get_shifted_root_seq\nfrom pyphysim.channels.fading import TdlChannel, TdlChannelProfile, COST259_TUx\nfrom pyphysim.channels.fading_generators import JakesSampleGenerator\nfrom pyphysim.util.conversion import linear2dB\n\nnum_prbs = 25; # Number of PRBs to simulate\nNsc = 12 * num_prbs; # Number of subcarriers\nNzc = 139; # Size of the sequence\nu1 = 25; # Root sequence index\nu2 = u1#12; # Root sequence index\nu3 = u1#7; # Root sequence index\n\n# Generate the root sequence\na_u1 = calcBaseZC(Nzc, u1);\na_u2 = calcBaseZC(Nzc, u2);\na_u3 = calcBaseZC(Nzc, u3);\n\nprint(\"Nsc: {0}\".format(Nsc))\nprint(\"a_u.shape: {0}\".format(a_u1.shape))",
"Note that the sequence size Nzc is lower then the number of subcarriers that will have elements of the Zadoff-Chu sequence. That is $Nzc \\leq 300/2 = 150$. Therefore, we will append new elements (creating a cyclic sequence).",
"# Considering a_u currently has 139 elements, we need to append 11 elements to make 150\n# TODO: Make this automatically depending on the Nsc and Nzc values\na_u1 = np.hstack([a_u1, a_u1[0:11]])\na_u2 = np.hstack([a_u2, a_u2[0:11]])\na_u3 = np.hstack([a_u3, a_u3[0:11]])",
"Create shifted sequences for 3 users\nFirst we arbitrarely choose some cyclic shift indexes and then we call zadoffchu.getShiftedZF to get the shifted sequence.",
"m_u1 = 1 # Cyclic shift index\nm_u2 = 4\nm_u3 = 7\nr1 = get_shifted_root_seq(a_u1, m_u1, denominator=8)\nr2 = get_shifted_root_seq(a_u2, m_u2, denominator=8)\nr3 = get_shifted_root_seq(a_u3, m_u3, denominator=8)",
"Generate channels from users to the BS\nNow it's time to transmit the shifted sequences. We need to create the fading channels from two users to some BS.",
"speedTerminal = 3/3.6 # Speed in m/s\nfcDbl = 2.6e9 # Central carrier frequency (in Hz)\ntimeTTIDbl = 1e-3 # Time of a single TTI\nsubcarrierBandDbl = 15e3 # Subcarrier bandwidth (in Hz)\nnumOfSubcarriersPRBInt = 12 # Number of subcarriers in each PRB \n\n# xxxxxxxxxx Dependent parametersxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\nlambdaDbl = 3e8/fcDbl # Carrier wave length\nFd = speedTerminal / lambdaDbl\nTs = 1./(Nsc * subcarrierBandDbl)\n\nL = 16 # The number of rays for the Jakes model.\n# Jakes sample generator for each user.\njakes1 = JakesSampleGenerator(Fd, Ts, L)\njakes2 = JakesSampleGenerator(Fd, Ts, L)\njakes3 = JakesSampleGenerator(Fd, Ts, L)\n\n\n# Create a TDL channel object for each user\ntdlchannel1 = TdlChannel(jakes1, COST259_TUx)\ntdlchannel2 = TdlChannel(jakes2, COST259_TUx)\ntdlchannel3 = TdlChannel(jakes3, COST259_TUx)\n\n# Compute the fading map for each user\ntdlchannel1.generate_impulse_response(1)\ntdlchannel2.generate_impulse_response(1)\ntdlchannel3.generate_impulse_response(1)\nimpulse_response1 = tdlchannel1.get_last_impulse_response()\nimpulse_response2 = tdlchannel2.get_last_impulse_response()\nimpulse_response3 = tdlchannel3.get_last_impulse_response()\n\nfreqResponse1 = impulse_response1.get_freq_response(Nsc)\nfreqResponse2 = impulse_response2.get_freq_response(Nsc)\nfreqResponse3 = impulse_response3.get_freq_response(Nsc)\n\n# OPTIONAL: Save the channels for loading in MATLAB\nimport scipy.io as sio\nsio.savemat('channel_freq_resp.mat', {\n 'freqResponse1':freqResponse1, \n 'freqResponse2':freqResponse2,\n 'freqResponse3':freqResponse3})",
"Finally we have a channel (freq. response) for each user.",
"# Each channel is the frequency response in 300 subcarriers\nH1 = freqResponse1[:,0]\nH2 = freqResponse2[:,0]\nH3 = freqResponse3[:,0]\n\nh1 = np.fft.ifft(H1)\nh2 = np.fft.ifft(H2)\nh3 = np.fft.ifft(H3)\n\nplt.figure(figsize=(16,6))\nplt.subplot(1,2,1)\nplt.plot(np.abs(H1))\nplt.title('Channel in Freq. Domain')\nplt.subplot(1,2,2)\nplt.stem(np.abs(h1[0:40]), use_line_collection=True)\nplt.title('Channel Impulse Response')\nplt.show()\n\nplt.figure(figsize=(16,6))\nplt.subplot(1,2,1)\nplt.plot(np.abs(H2))\nplt.title('Channel in Freq. Domain')\nplt.subplot(1,2,2)\nplt.stem(np.abs(h2[0:40]), use_line_collection=True)\nplt.title('Channel Impulse Response')\nplt.show()\n\nplt.figure(figsize=(16,6))\nplt.subplot(1,2,1)\nplt.plot(np.abs(H3))\nplt.title('Channel in Freq. Domain')\nplt.subplot(1,2,2)\nplt.stem(np.abs(h3[0:40]), use_line_collection=True)\nplt.title('Channel Impulse Response')\nplt.show()",
"Perform the transmission\nFirst we need to prepare the input data from our shifted Zadoff-Chu sequences.\nTo makes things clear, let's start transmiting a single sequence and we won't include the white noise. Since we use a comb to transmit the SRS sequence, we will use Nsc/2 subcarriers from the Nsc subcarriers from a comb like pattern.",
"comb_indexes = np.arange(0, Nsc, 2)\n\n# Note that this is the received signal in the frequency domain\n# Here we are not summing users\nY1 = H1[comb_indexes] * r1\nY2 = H2[comb_indexes] * r2\nY3 = H3[comb_indexes] * r3\n\n# Complete transmit signal summing all users\nY = Y1 + Y2 + Y3;\n\nprint(\"Size of Y: {0}\".format(Y.size))",
"According to the paper, \n\n... the received\nfrequency-domain sequence Y is element-wise multiplied with\nthe complex conjugate of the expected root sequence X before\nthe IDFT. This provides in one shot\nthe concatenated CIRs of all UEs multiplexed on the same root\nsequence.\n\nJust for checking let's get the plot of the received signal if only users 1 transmits.",
"# Just for checking let's get the plot of the received signal if only users 1 transmits.\ny1 = np.fft.ifft(np.conj(r1) * Y1)\nplt.figure(figsize=(16,6))\nplt.subplot(1,2,1)\nplt.stem(np.abs(y1[0:40]), use_line_collection=True)\nplt.title(\"Estimated impulse response\")\nplt.subplot(1,2,2)\nplt.stem(np.abs(h1[0:40]), use_line_collection=True)\nplt.title(\"True impulse response\")\nplt.show()",
"And for user 2.",
"# Just for checking let's get the plot of the received signal if only users 1 transmits.\ny2 = np.fft.ifft(np.conj(r2) * Y2)\nplt.figure(figsize=(16,6))\nplt.subplot(1,2,1)\nplt.stem(np.abs(y2[0:40]), use_line_collection=True)\nplt.title(\"Estimated impulse response\")\nplt.subplot(1,2,2)\nplt.stem(np.abs(h2[0:40]), use_line_collection=True)\nplt.title(\"True impulse response\")\nplt.show()",
"And for user 3.",
"# Just for checking let's get the plot of the received signal if only users 1 transmits.\ny3 = np.fft.ifft(np.conj(r3) * Y3)\nplt.figure(figsize=(16,6))\nplt.subplot(1,2,1)\nplt.stem(np.abs(y3[0:40]), use_line_collection=True)\nplt.title(\"Estimated impulse response\")\nplt.subplot(1,2,2)\nplt.stem(np.abs(h3[0:40]), use_line_collection=True)\nplt.title(\"True impulse response\")\nplt.show()",
"Now let's get the plot of the signal considering that all users transmitted. Notice how the part due to user 1 in the plot is the same channel when only user 1 transmitted. This indicates that Zadoff-chu 0 cross correlation is indeed working.",
"y = np.fft.ifft(np.conj(a_u1) * Y, 150)\nplt.figure(figsize=(12,6))\nplt.stem(np.abs(y), use_line_collection=True)\nplt.show()\n",
"Estimate the channels\nSince we get a concatenation of the impulse response of the different users, we need to know for each users we need to know the first and the last sample index corresponding to the particular user's impulse response.\nSince we have Nsc subcarriers, from which we will use $Nsc/2$, and we have 3 users, we can imagine that each user can have up to $Nsc/(2*3)$ samples, which for $Nsc=300$ corresponds to 50 subcarriers.\nNow let's estimate the channel of the first user.\nFirst let's check again what is the shift used by the first user.",
"m_u1",
"For an index equal to 1 the starting sample of the first user will be 101 and the ending sample will be 101+50-1=150.",
"def plot_channel_responses(h, tilde_h):\n \"\"\"Plot the estimated and true channel responses\n\n Parameters\n ----------\n h : numpy complex array\n The true channel impulse response\n tilde_h : numpy complex array\n The estimated channel impulse response\n \"\"\"\n H = np.fft.fft(h)\n tilde_H = np.fft.fft(tilde_h, Nsc)\n\n plt.figure(figsize=(16,12))\n\n # Plot estimated impulse response\n ax1 = plt.subplot2grid((3,2), (0,0))\n ax1.stem(np.abs(tilde_h[0:20]), use_line_collection=True)\n plt.xlabel(\"Time sample\")\n plt.ylabel(\"Amplitude (abs)\")\n plt.title(\"Estimated Impulse Response\")\n plt.grid()\n\n # Plot TRUE impulse response\n ax2 = plt.subplot2grid((3,2), (0,1))\n ax2.stem(np.abs(h[0:20]),linefmt='g', use_line_collection=True)\n plt.xlabel(\"Time sample\")\n plt.ylabel(\"Amplitude (abs)\")\n plt.xlabel(\"Time sample\")\n plt.title(\"True Impulse Response\")\n plt.grid()\n\n # Plot estimated frequency response (absolute value)\n ax3 = plt.subplot2grid((3,2), (1,0), colspan=2)\n plt.plot(np.abs(tilde_H))\n #plt.xlabel(\"Subcarrier\")\n plt.ylabel(\"Amplitude (abs)\")\n plt.title(\"Frequency Response (abs)\")\n\n # Plot TRUE frequency response (absolute value)\n #plt.subplot(3,2,4)\n ax3.plot(np.abs(H), 'g')\n plt.grid()\n plt.legend([\"Estimated Value\", \"True Value\"], loc='upper left')\n\n # Plot estimated frequency response (angle)\n ax4 = plt.subplot2grid((3,2), (2,0), colspan=2)\n ax4.plot(np.angle(tilde_H))\n plt.xlabel(\"Subcarrier\")\n plt.ylabel(\"Angle (phase)\")\n plt.title(\"Frequency Response (phase)\")\n\n # Plot TRUE frequency response (angle)\n ax4.plot(np.angle(H), 'g')\n plt.grid()\n plt.legend([\"Estimated Value\", \"True Value\"], loc='upper left')\n\n # Show the plots\n plt.show()\n\n\ndef plot_normalized_squared_error(H, tilde_H):\n \"\"\"Plot the normalized squared error (in dB).\n\n Parameters\n ----------\n H : numpy complex array\n The true channel frequency response\n tilde_H : numpy complex array\n The estimated channel frequency response\n \"\"\"\n plt.figure(figsize=(12,8))\n error = np.abs(tilde_H - H)**2 / (np.abs(H)**2)\n plt.plot(linear2dB(error))\n plt.title(\"Normalized Squared Error\")\n plt.xlabel(\"Subcarrier\")\n plt.ylabel(\"Normalized Squared Error (in dB)\")\n plt.grid()\n plt.show()\n\ny = np.fft.ifft(np.conj(r1) * Y, 150)\ntilde_h1 = y[0:20]\ntilde_H1 = np.fft.fft(tilde_h1, Nsc)\ntilde_Y1 = tilde_H1[comb_indexes] * r1\n\nplot_channel_responses(h1, tilde_h1)",
"Now we will compute the squared error in each subcarrier.",
"tilde_H1 = np.fft.fft(tilde_h1, Nsc)\nplot_normalized_squared_error(H1, tilde_H1)\n\ny = np.fft.ifft(np.conj(r2) * (Y), 150)\ntilde_h2 = y[0:20]\ntilde_H2 = np.fft.fft(tilde_h2, Nsc)\ntilde_Y2 = tilde_H2[comb_indexes] * r2\n\nplot_channel_responses(h2, tilde_h2)\n\ntilde_H2 = np.fft.fft(tilde_h2, Nsc)\nplot_normalized_squared_error(H2, tilde_H2)\n\ny = np.fft.ifft(np.conj(r3) * (Y), 150)\ntilde_h3 = y[0:11]\ntilde_H3 = np.fft.fft(tilde_h3, Nsc)\ntilde_Y3 = tilde_H3[comb_indexes] * r3\n\nplot_channel_responses(h3, tilde_h3)\n\ntilde_H3 = np.fft.fft(tilde_h3, Nsc)\nplot_normalized_squared_error(H3, tilde_H3)",
"Estimated the channels from corrupted (white noise) signal\nNow we will add some white noise to Y",
"# Add white noise\nnoise_var = 1e-2\nY_noised = Y + np.sqrt(noise_var/2.) * (np.random.randn(Nsc//2) + 1j * np.random.randn(Nsc//2))\n\ny_noised = np.fft.ifft(np.conj(r2) * (Y_noised), 150)\ntilde_h2_noised = y_noised[0:20]\n\nplot_channel_responses(h2, tilde_h2_noised)\n\ntilde_H2_noised = np.fft.fft(tilde_h2_noised, Nsc)\nplot_normalized_squared_error(H2, tilde_H2_noised)"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
bourneli/deep-learning-notes
|
DAT236x Deep Learning Explained/Lab2_LogisticRegression.ipynb
|
mit
|
[
"from IPython.display import Image",
"Lab 2 - Logistic Regression (LR) with MNIST\nThis lab corresponds to Module 2 of the \"Deep Learning Explained\" course. We assume that you have successfully completed Lab 1 (Downloading the MNIST data).\nIn this lab we will build and train a Multiclass Logistic Regression model using the MNIST data. \nIntroduction\nProblem:\nOptical Character Recognition (OCR) is a hot research area and there is a great demand for automation. The MNIST data is comprised of hand-written digits with little background noise making it a nice dataset to create, experiment and learn deep learning models with reasonably small comptuing resources.",
"# Figure 1\nImage(url= \"http://3.bp.blogspot.com/_UpN7DfJA0j4/TJtUBWPk0SI/AAAAAAAAABY/oWPMtmqJn3k/s1600/mnist_originals.png\", width=200, height=200)",
"Goal:\nOur goal is to train a classifier that will identify the digits in the MNIST dataset. \nApproach:\nThere are 4 stages in this lab: \n- Data reading: We will use the CNTK Text reader.\n- Data preprocessing: Covered in part A (suggested extension section). \n- Model creation: Multiclass Logistic Regression model.\n- Train-Test-Predict: This is the same workflow introduced in the lectures\nLogistic Regression\nLogistic Regression (LR) is a fundamental machine learning technique that uses a linear weighted combination of features and generates probability-based predictions of different classes. \nThere are two basic forms of LR: Binary LR (with a single output that can predict two classes) and multiclass LR (with multiple outputs, each of which is used to predict a single class). \n\nIn Binary Logistic Regression (see top of figure above), the input features are each scaled by an associated weight and summed together. The sum is passed through a squashing (aka activation) function and generates an output in [0,1]. This output value is then compared with a threshold (such as 0.5) to produce a binary label (0 or 1), predicting 1 of 2 classes. This technique supports only classification problems with two output classes, hence the name binary LR. In the binary LR example shown above, the sigmoid function is used as the squashing function.\nIn Multiclass Linear Regression (see bottom of figure above), 2 or more output nodes are used, one for each output class to be predicted. Each summation node uses its own set of weights to scale the input features and sum them together. Instead of passing the summed output of the weighted input features through a sigmoid squashing function, the output is often passed through a softmax function (which in addition to squashing, like the sigmoid, the softmax normalizes each nodes' output value using the sum of all unnormalized nodes). (Details in the context of MNIST image to follow)\nWe will use multiclass LR for classifying the MNIST digits (0-9) using 10 output nodes (1 for each of our output classes). In our approach, we will move the softmax function out of the model and into our Loss function used in training (details to follow).",
"# Import the relevant components\nfrom __future__ import print_function # Use a function definition from future version (say 3.x from 2.7 interpreter)\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nimport os\n\nimport cntk as C\n\n%matplotlib inline",
"In the block below, we check if we are running this notebook in the CNTK internal test machines by looking for environment variables defined there. We then select the right target device (GPU vs CPU) to test this notebook. In other cases, we use CNTK's default policy to use the best available device (GPU, if available, else CPU).",
"# Select the right target device when this notebook is being tested:\nif 'TEST_DEVICE' in os.environ:\n if os.environ['TEST_DEVICE'] == 'cpu':\n C.device.try_set_default_device(C.device.cpu())\n else:\n C.device.try_set_default_device(C.device.gpu(0))\n\n# Test for CNTK version\nif not C.__version__ == \"2.0\":\n raise Exception(\"this lab is designed to work with 2.0. Current Version: \" + C.__version__) ",
"Initialization",
"# Ensure we always get the same amount of randomness\nnp.random.seed(0)\nC.cntk_py.set_fixed_random_seed(1)\nC.cntk_py.force_deterministic_algorithms()\n\n# Define the data dimensions\ninput_dim = 784\nnum_output_classes = 10",
"Data reading\nThere are different ways one can read data into CNTK. The easiest way is to load the data in memory using NumPy / SciPy / Pandas readers. However, this can be done only for small data sets. Since deep learning requires large amount of data we have chosen in this course to show how to leverage built-in distributed readers that can scale to terrabytes of data with little extra effort. \nWe are using the MNIST data you have downloaded using Lab 1 DataLoader notebook. The dataset has 60,000 training images and 10,000 test images with each image being 28 x 28 pixels. Thus the number of features is equal to 784 (= 28 x 28 pixels), 1 per pixel. The variable num_output_classes is set to 10 corresponding to the number of digits (0-9) in the dataset.\nIn Lab 1, the data was downloaded and written to 2 CTF (CNTK Text Format) files, 1 for training, and 1 for testing. Each line of these text files takes the form:\n|labels 0 0 0 1 0 0 0 0 0 0 |features 0 0 0 0 ... \n (784 integers each representing a pixel)\n\nWe are going to use the image pixels corresponding the integer stream named \"features\". We define a create_reader function to read the training and test data using the CTF deserializer. The labels are 1-hot encoded. Refer to Lab 1 for data format visualizations.",
"# Read a CTF formatted text (as mentioned above) using the CTF deserializer from a file\ndef create_reader(path, is_training, input_dim, num_label_classes):\n \n labelStream = C.io.StreamDef(field='labels', shape=num_label_classes, is_sparse=False)\n featureStream = C.io.StreamDef(field='features', shape=input_dim, is_sparse=False)\n \n deserailizer = C.io.CTFDeserializer(path, C.io.StreamDefs(labels = labelStream, features = featureStream))\n \n return C.io.MinibatchSource(deserailizer,\n randomize = is_training, max_sweeps = C.io.INFINITELY_REPEAT if is_training else 1)\n\n# Ensure the training and test data is generated and available for this lab.\n# We search in two locations in the toolkit for the cached MNIST data set.\ndata_found = False\n\nfor data_dir in [os.path.join(\"..\", \"Examples\", \"Image\", \"DataSets\", \"MNIST\"),\n os.path.join(\"data\", \"MNIST\")]:\n train_file = os.path.join(data_dir, \"Train-28x28_cntk_text.txt\")\n test_file = os.path.join(data_dir, \"Test-28x28_cntk_text.txt\")\n if os.path.isfile(train_file) and os.path.isfile(test_file):\n data_found = True\n break\n \nif not data_found:\n raise ValueError(\"Please generate the data by completing Lab1_MNIST_DataLoader\")\n \nprint(\"Data directory is {0}\".format(data_dir))",
"Model Creation\nA multiclass logistic regression (LR) network is a simple building block that has been effectively powering many ML \napplications in the past decade. The figure below summarizes the model in the context of the MNIST data.\n\nLR is a simple linear model that takes as input, a vector of numbers describing the properties of what we are classifying (also known as a feature vector, $\\bf \\vec{x}$, the pixels in the input MNIST digit image) and emits the evidence ($z$). For each of the 10 digits, there is a vector of weights corresponding to the input pixels as show in the figure. These 10 weight vectors define the weight matrix ($\\bf {W}$) with dimension of 10 x 784. Each feature in the input layer is connected with a summation node by a corresponding weight $w$ (individual weight values from the $\\bf{W}$ matrix). Note there are 10 such nodes, 1 corresponding to each digit to be classified. \nThe first step is to compute the evidence for an observation. \n$$\\vec{z} = \\textbf{W} \\bf \\vec{x}^T + \\vec{b}$$ \nwhere $\\bf{W}$ is the weight matrix of dimension 10 x 784 and $\\vec{b}$ is known as the bias vector with lenght 10, one for each digit. \nThe evidence ($\\vec{z}$) is not squashed (hence no activation). Instead the output is normalized using a softmax function such that all the outputs add up to a value of 1, thus lending a probabilistic iterpretation to the prediction. In CNTK, we use the softmax operation combined with the cross entropy error as our Loss Function for training.",
"print(input_dim)\nprint(num_output_classes)",
"Network input and output: \n- input variable (a key CNTK concept): \n\nAn input variable is a container in which we fill different observations, in this case image pixels, during model learning (a.k.a.training) and model evaluation (a.k.a. testing). Thus, the shape of the input must match the shape of the data that will be provided. For example, when data are images each of height 10 pixels and width 5 pixels, the input feature dimension will be 50 (representing the total number of image pixels).\n\nKnowledge Check: What is the input dimension of your chosen model? This is fundamental to our understanding of variables in a network or model representation in CNTK.",
"input = C.input_variable(input_dim)\nlabel = C.input_variable(num_output_classes)",
"Logistic Regression network setup\nThe CNTK Layers module provides a Dense function that creates a fully connected layer which performs the above operations of weighted input summing and bias addition.",
"def create_model(features):\n with C.layers.default_options(init = C.glorot_uniform()):\n r = C.layers.Dense(num_output_classes, activation = None)(features)\n #r = C.layers.Dense(num_output_classes, activation = None)(C.ops.splice(C.ops.sqrt(features), features, C.ops.square(features)))\n return r",
"z will be used to represent the output of a network.",
"# Scale the input to 0-1 range by dividing each pixel by 255.\ninput_s = input/255\nz = create_model(input_s)\n\nprint(input_s)\nprint(input)",
"Training\nBelow, we define the Loss function, which is used to guide weight changes during training. \nAs explained in the lectures, we use the softmax function to map the accumulated evidences or activations to a probability distribution over the classes (Details of the softmax function and other activation functions).\nWe minimize the cross-entropy between the label and predicted probability by the network.",
"loss = C.cross_entropy_with_softmax(z, label)\n\nloss",
"Evaluation\nBelow, we define the Evaluation (or metric) function that is used to report a measurement of how well our model is performing.\nFor this problem, we choose the classification_error() function as our metric, which returns the average error over the associated samples (treating a match as \"1\", where the model's prediction matches the \"ground truth\" label, and a non-match as \"0\").",
"label_error = C.classification_error(z, label)",
"Configure training\nThe trainer strives to reduce the loss function by different optimization approaches, Stochastic Gradient Descent (sgd) being one of the most popular. Typically, one would start with random initialization of the model parameters. The sgd optimizer would calculate the loss or error between the predicted label against the corresponding ground-truth label and using gradient-decent generate a new set model parameters in a single iteration. \nThe aforementioned model parameter update using a single observation at a time is attractive since it does not require the entire data set (all observation) to be loaded in memory and also requires gradient computation over fewer datapoints, thus allowing for training on large data sets. However, the updates generated using a single observation sample at a time can vary wildly between iterations. An intermediate ground is to load a small set of observations and use an average of the loss or error from that set to update the model parameters. This subset is called a minibatch.\nWith minibatches, we sample observations from the larger training dataset. We repeat the process of model parameters update using different combination of training samples and over a period of time minimize the loss (and the error metric). When the incremental error rates are no longer changing significantly or after a preset number of maximum minibatches to train, we claim that our model is trained.\nOne of the key optimization parameters is called the learning_rate. For now, we can think of it as a scaling factor that modulates how much we change the parameters in any iteration.\nWith this information, we are ready to create our trainer.",
"# Instantiate the trainer object to drive the model training\nlearning_rate = 0.1 # 0.2\nlr_schedule = C.learning_rate_schedule(learning_rate, C.UnitType.minibatch)\nlearner = C.sgd(z.parameters, lr_schedule)\ntrainer = C.Trainer(z, (loss, label_error), [learner])",
"First let us create some helper functions that will be needed to visualize different functions associated with training.",
"# Define a utility function to compute the moving average sum.\n# A more efficient implementation is possible with np.cumsum() function\ndef moving_average(a, w=5):\n if len(a) < w:\n return a[:] # Need to send a copy of the array\n return [val if idx < w else sum(a[(idx-w):idx])/w for idx, val in enumerate(a)]\n\n\n# Defines a utility that prints the training progress\ndef print_training_progress(trainer, mb, frequency, verbose=1):\n training_loss = \"NA\"\n eval_error = \"NA\"\n\n if mb%frequency == 0:\n training_loss = trainer.previous_minibatch_loss_average\n eval_error = trainer.previous_minibatch_evaluation_average\n if verbose: \n print (\"Minibatch: {0}, Loss: {1:.4f}, Error: {2:.2f}%\".format(mb, training_loss, eval_error*100))\n \n return mb, training_loss, eval_error",
"<a id='#Run the trainer'></a>\nRun the trainer\nWe are now ready to train our fully connected neural net. We want to decide what data we need to feed into the training engine.\nIn this example, each iteration of the optimizer will work on minibatch_size sized samples. We would like to train on all 60000 observations. Additionally we will make multiple passes through the data specified by the variable num_sweeps_to_train_with. With these parameters we can proceed with training our simple feed forward network.",
"# Initialize the parameters for the trainer\nminibatch_size = 64\nnum_samples_per_sweep = 60000\nnum_sweeps_to_train_with = 10\nnum_minibatches_to_train = (num_samples_per_sweep * num_sweeps_to_train_with) / minibatch_size\n\nnum_minibatches_to_train\n\n# Create the reader to training data set\nreader_train = create_reader(train_file, True, input_dim, num_output_classes)\n\n# Map the data streams to the input and labels.\ninput_map = {\n label : reader_train.streams.labels,\n input : reader_train.streams.features\n} \n\n# Run the trainer on and perform model training\ntraining_progress_output_freq = 500\n\nplotdata = {\"batchsize\":[], \"loss\":[], \"error\":[]}\n\nimport time\nstart = time.clock()\nfor i in range(0, int(num_minibatches_to_train)):\n \n # Read a mini batch from the training data file\n data = reader_train.next_minibatch(minibatch_size, input_map = input_map)\n \n trainer.train_minibatch(data)\n batchsize, loss, error = print_training_progress(trainer, i, training_progress_output_freq, verbose=1)\n \n if not (loss == \"NA\" or error ==\"NA\"):\n plotdata[\"batchsize\"].append(batchsize)\n plotdata[\"loss\"].append(loss)\n plotdata[\"error\"].append(error)\n \nelapsed = (time.clock() - start)\nprint(\"Time used:\",elapsed)",
"Let us plot the errors over the different training minibatches. Note that as we progress in our training, the loss decreases though we do see some intermediate bumps.",
"# Compute the moving average loss to smooth out the noise in SGD\nplotdata[\"avgloss\"] = moving_average(plotdata[\"loss\"])\nplotdata[\"avgerror\"] = moving_average(plotdata[\"error\"])\n\n# Plot the training loss and the training error\nimport matplotlib.pyplot as plt\n\nplt.figure(1)\nplt.subplot(211)\nplt.plot(plotdata[\"batchsize\"], plotdata[\"avgloss\"], 'b--')\nplt.xlabel('Minibatch number')\nplt.ylabel('Loss')\nplt.title('Minibatch run vs. Training loss')\n\nplt.show()\n\nplt.subplot(212)\nplt.plot(plotdata[\"batchsize\"], plotdata[\"avgerror\"], 'r--')\nplt.xlabel('Minibatch number')\nplt.ylabel('Label Prediction Error')\nplt.title('Minibatch run vs. Label Prediction Error')\nplt.show()",
"Evaluation / Testing\nNow that we have trained the network, let us evaluate the trained network on the test data. This is done using trainer.test_minibatch.",
"# Read the training data\nreader_test = create_reader(test_file, False, input_dim, num_output_classes)\n\ntest_input_map = {\n label : reader_test.streams.labels,\n input : reader_test.streams.features,\n}\n\n# Test data for trained model\ntest_minibatch_size = 512\nnum_samples = 10000\nnum_minibatches_to_test = num_samples // test_minibatch_size\ntest_result = 0.0\n\nfor i in range(num_minibatches_to_test):\n \n # We are loading test data in batches specified by test_minibatch_size\n # Each data point in the minibatch is a MNIST digit image of 784 dimensions \n # with one pixel per dimension that we will encode / decode with the \n # trained model.\n data = reader_test.next_minibatch(test_minibatch_size,\n input_map = test_input_map)\n\n eval_error = trainer.test_minibatch(data)\n test_result = test_result + eval_error\n\n# Average of evaluation errors of all test minibatches\nprint(\"Average test error: {0:.2f}%\".format(test_result*100 / num_minibatches_to_test))",
"We have so far been dealing with aggregate measures of error. Let us now get the probabilities associated with individual data points. For each observation, the eval function returns the probability distribution across all the classes. The classifier is trained to recognize digits, hence has 10 classes. First let us route the network output through a softmax function. This maps the aggregated activations across the network to probabilities across the 10 classes.",
"out = C.softmax(z)",
"Let us test a small minibatch sample from the test data.",
"# Read the data for evaluation\nreader_eval = create_reader(test_file, False, input_dim, num_output_classes)\n\neval_minibatch_size = 25\neval_input_map = {input: reader_eval.streams.features} \n\ndata = reader_test.next_minibatch(eval_minibatch_size, input_map = test_input_map)\n\nimg_label = data[label].asarray()\nimg_data = data[input].asarray()\npredicted_label_prob = [out.eval(img_data[i]) for i in range(len(img_data))]\n\n# Find the index with the maximum value for both predicted as well as the ground truth\npred = [np.argmax(predicted_label_prob[i]) for i in range(len(predicted_label_prob))]\ngtlabel = [np.argmax(img_label[i]) for i in range(len(img_label))]\n\nprint(\"Label :\", gtlabel[:25])\nprint(\"Predicted:\", pred)",
"As you can see above, our model is not yet perfect. \nLet us visualize one of the test images and its associated label. Do they match?",
"# Plot a random image\nsample_number = 5\nplt.imshow(img_data[sample_number].reshape(28,28), cmap=\"gray_r\")\nplt.axis('off')\n\nimg_gt, img_pred = gtlabel[sample_number], pred[sample_number]\nprint(\"Image Label: \", img_pred)",
"Suggested Explorations\nA. Change the minibatch_size parameter (from 64) to 128 and then to 512 during training. What is the observed average test error rate (rounded to 2nd decimal place) with each new model?\nB. Increase the number of sweeps. How does the test error change?\nC. Can you change the network to reduce the training error rate? When do you see overfitting happening? \nD. Lets now add more features to our model. We will add square of the input values as additional features. You will take the input pixels, scale them by 255. Use C.square and C.splice functions to create a new model. Use this model to perform classification. Note: use the original setting for the rest of the notebook\nE. Now add sqrt as another set of features to the model. Use this model to perform classification."
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
goodwordalchemy/thinkstats_notes_and_exercises
|
code/chap04ex.ipynb
|
gpl-3.0
|
[
"Exercise from Think Stats, 2nd Edition (thinkstats2.com)<br>\nAllen Downey\nRead the pregnancy file.",
"%matplotlib inline\n\nimport nsfg\npreg = nsfg.ReadFemPreg()\n\nimport thinkstats2\nimport thinkplot\nimport numpy as np",
"Select live births, then make a CDF of <tt>totalwgt_lb</tt>.",
"live = preg[preg.outcome == 1]\nprint live\nwgt_cdf = thinkstats2.Cdf(live.totalwgt_lb, label='')",
"Display the CDF.",
"thinkplot.Cdf(wgt_cdf)\nthinkplot.Show(xlabel='birthweight',\n ylabel = 'CDF',\n title = 'Cumulative Distribution of Birthweights')",
"Find out how much you weighed at birth, if you can, and compute CDF(x).",
"wgt_cdf.PercentileRank(8.2)\n# wgt_cdf.PercentileRank(live.totalwgt_lb.mean())",
"If you are a first child, look up your birthweight in the CDF of first children; otherwise use the CDF of other children.",
"others = live[live.pregordr > 1]\nothers_wgt_cdf = thinkstats2.Cdf(others.totalwgt_lb)\nothers_wgt_cdf.PercentileRank(8.2)",
"Compute the percentile rank of your birthweight\nCompute the median birth weight by looking up the value associated with p=0.5.",
"wgt_cdf.Value(0.5)",
"Compute the interquartile range (IQR) by computing percentiles corresponding to 25 and 75.",
"iqr = (wgt_cdf.Percentile(25), wgt_cdf.Percentile(75))\niqr",
"Make a random selection from <tt>cdf</tt>.",
"wgt_cdf.Random()",
"Draw a random sample from <tt>cdf</tt>.",
"wgt_cdf.Sample(10)",
"Draw a random sample from <tt>cdf</tt>, then compute the percentile rank for each value, and plot the distribution of the percentile ranks.",
"values = wgt_cdf.Sample(1000)\nvalues_hist = thinkstats2.Hist(values, 'values')\n\nranks = [wgt_cdf.PercentileRank(v) for v in values]\nranks_hist = thinkstats2.Hist(ranks, 'ranks')\n\nthinkplot.PrePlot(3, rows=3)\nthinkplot.SubPlot(1)\nthinkplot.Hist(values_hist, label='values Hist')\n\nthinkplot.SubPlot(2)\nvalues_cdf = thinkstats2.Cdf(values, label='values CDF')\nthinkplot.Cdf(values_cdf)\n\n\nthinkplot.SubPlot(3)\nranks_cdf = thinkstats2.Cdf(ranks, label='ranks CDF')\nthinkplot.Cdf(ranks_cdf)",
"Generate 1000 random values using <tt>random.random()</tt> and plot their PMF.",
"rand_vals = [np.random.random() for i in range(100)]\n\nrv_pmf = thinkstats2.Pmf(rand_vals, label=\"random values\")\nthinkplot.Hist(rv_pmf)",
"Assuming that the PMF doesn't work very well, try plotting the CDF instead.",
"rv_cdf = thinkstats2.Cdf(rand_vals, label=\"random values\")\nthinkplot.Cdf(rv_cdf)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
tkurfurst/deep-learning
|
reinforcement/Q-learning-cart.ipynb
|
mit
|
[
"Deep Q-learning\nIn this notebook, we'll build a neural network that can learn to play games through reinforcement learning. More specifically, we'll use Q-learning to train an agent to play a game called Cart-Pole. In this game, a freely swinging pole is attached to a cart. The cart can move to the left and right, and the goal is to keep the pole upright as long as possible.\n\nWe can simulate this game using OpenAI Gym. First, let's check out how OpenAI Gym works. Then, we'll get into training an agent to play the Cart-Pole game.",
"import gym\nimport tensorflow as tf\nimport numpy as np",
"Note: Make sure you have OpenAI Gym cloned into the same directory with this notebook. I've included gym as a submodule, so you can run git submodule --init --recursive to pull the contents into the gym repo.",
"# Create the Cart-Pole game environment\nenv = gym.make('CartPole-v0')",
"We interact with the simulation through env. To show the simulation running, you can use env.render() to render one frame. Passing in an action as an integer to env.step will generate the next step in the simulation. You can see how many actions are possible from env.action_space and to get a random action you can use env.action_space.sample(). This is general to all Gym games. In the Cart-Pole game, there are two possible actions, moving the cart left or right. So there are two actions we can take, encoded as 0 and 1.\nRun the code below to watch the simulation run.",
"env.reset()\nrewards = []\nfor _ in range(100):\n env.render()\n state, reward, done, info = env.step(env.action_space.sample()) # take a random action\n rewards.append(reward)\n if done:\n rewards = []\n env.reset()\nenv.render(close=True)\nenv.reset()",
"To shut the window showing the simulation, use env.close().\nIf you ran the simulation above, we can look at the rewards:",
"print(rewards[-20:])\n\nprint(sum(rewards))\nprint(len(rewards))",
"The game resets after the pole has fallen past a certain angle. For each frame while the simulation is running, it returns a reward of 1.0. The longer the game runs, the more reward we get. Then, our network's goal is to maximize the reward by keeping the pole vertical. It will do this by moving the cart to the left and the right.\nQ-Network\nWe train our Q-learning agent using the Bellman Equation:\n$$\nQ(s, a) = r + \\gamma \\max{Q(s', a')}\n$$\nwhere $s$ is a state, $a$ is an action, and $s'$ is the next state from state $s$ and action $a$.\nBefore we used this equation to learn values for a Q-table. However, for this game there are a huge number of states available. The state has four values: the position and velocity of the cart, and the position and velocity of the pole. These are all real-valued numbers, so ignoring floating point precisions, you practically have infinite states. Instead of using a table then, we'll replace it with a neural network that will approximate the Q-table lookup function.\n<img src=\"assets/deep-q-learning.png\" width=450px>\nNow, our Q value, $Q(s, a)$ is calculated by passing in a state to the network. The output will be Q-values for each available action, with fully connected hidden layers.\n<img src=\"assets/q-network.png\" width=550px>\nAs I showed before, we can define our targets for training as $\\hat{Q}(s,a) = r + \\gamma \\max{Q(s', a')}$. Then we update the weights by minimizing $(\\hat{Q}(s,a) - Q(s,a))^2$. \nFor this Cart-Pole game, we have four inputs, one for each value in the state, and two outputs, one for each action. To get $\\hat{Q}$, we'll first choose an action, then simulate the game using that action. This will get us the next state, $s'$, and the reward. With that, we can calculate $\\hat{Q}$ then pass it back into the $Q$ network to run the optimizer and update the weights.\nBelow is my implementation of the Q-network. I used two fully connected layers with ReLU activations. Two seems to be good enough, three might be better. Feel free to try it out.",
"class QNetwork:\n def __init__(self, learning_rate=0.01, state_size=4, \n action_size=2, hidden_size=10, \n name='QNetwork'):\n # state inputs to the Q-network\n with tf.variable_scope(name):\n self.inputs_ = tf.placeholder(tf.float32, [None, state_size], name='inputs')\n \n # One hot encode the actions to later choose the Q-value for the action\n self.actions_ = tf.placeholder(tf.int32, [None], name='actions')\n one_hot_actions = tf.one_hot(self.actions_, action_size)\n \n # Target Q values for training\n self.targetQs_ = tf.placeholder(tf.float32, [None], name='target')\n \n # ReLU hidden layers\n self.fc1 = tf.contrib.layers.fully_connected(self.inputs_, hidden_size)\n self.fc2 = tf.contrib.layers.fully_connected(self.fc1, hidden_size)\n\n # Linear output layer\n self.output = tf.contrib.layers.fully_connected(self.fc2, action_size, \n activation_fn=None)\n \n ### Train with loss (targetQ - Q)^2\n # output has length 2, for two actions. This next line chooses\n # one value from output (per row) according to the one-hot encoded actions.\n self.Q = tf.reduce_sum(tf.multiply(self.output, one_hot_actions), axis=1)\n \n self.loss = tf.reduce_mean(tf.square(self.targetQs_ - self.Q))\n self.opt = tf.train.AdamOptimizer(learning_rate).minimize(self.loss)",
"Experience replay\nReinforcement learning algorithms can have stability issues due to correlations between states. To reduce correlations when training, we can store the agent's experiences and later draw a random mini-batch of those experiences to train on. \nHere, we'll create a Memory object that will store our experiences, our transitions $<s, a, r, s'>$. This memory will have a maxmium capacity, so we can keep newer experiences in memory while getting rid of older experiences. Then, we'll sample a random mini-batch of transitions $<s, a, r, s'>$ and train on those.\nBelow, I've implemented a Memory object. If you're unfamiliar with deque, this is a double-ended queue. You can think of it like a tube open on both sides. You can put objects in either side of the tube. But if it's full, adding anything more will push an object out the other side. This is a great data structure to use for the memory buffer.",
"from collections import deque\nclass Memory():\n def __init__(self, max_size = 1000):\n self.buffer = deque(maxlen=max_size)\n \n def add(self, experience):\n self.buffer.append(experience)\n \n def sample(self, batch_size):\n idx = np.random.choice(np.arange(len(self.buffer)), \n size=batch_size, \n replace=False)\n return [self.buffer[ii] for ii in idx]",
"Exploration - Exploitation\nTo learn about the environment and rules of the game, the agent needs to explore by taking random actions. We'll do this by choosing a random action with some probability $\\epsilon$ (epsilon). That is, with some probability $\\epsilon$ the agent will make a random action and with probability $1 - \\epsilon$, the agent will choose an action from $Q(s,a)$. This is called an $\\epsilon$-greedy policy.\nAt first, the agent needs to do a lot of exploring. Later when it has learned more, the agent can favor choosing actions based on what it has learned. This is called exploitation. We'll set it up so the agent is more likely to explore early in training, then more likely to exploit later in training.\nQ-Learning training algorithm\nPutting all this together, we can list out the algorithm we'll use to train the network. We'll train the network in episodes. One episode is one simulation of the game. For this game, the goal is to keep the pole upright for 195 frames. So we can start a new episode once meeting that goal. The game ends if the pole tilts over too far, or if the cart moves too far the left or right. When a game ends, we'll start a new episode. Now, to train the agent:\n\nInitialize the memory $D$\nInitialize the action-value network $Q$ with random weights\nFor episode = 1, $M$ do\nFor $t$, $T$ do\nWith probability $\\epsilon$ select a random action $a_t$, otherwise select $a_t = \\mathrm{argmax}_a Q(s,a)$\nExecute action $a_t$ in simulator and observe reward $r_{t+1}$ and new state $s_{t+1}$\nStore transition $<s_t, a_t, r_{t+1}, s_{t+1}>$ in memory $D$\nSample random mini-batch from $D$: $<s_j, a_j, r_j, s'_j>$\nSet $\\hat{Q}j = r_j$ if the episode ends at $j+1$, otherwise set $\\hat{Q}_j = r_j + \\gamma \\max{a'}{Q(s'_j, a')}$\nMake a gradient descent step with loss $(\\hat{Q}_j - Q(s_j, a_j))^2$\n\n\nendfor\nendfor\n\nHyperparameters\nOne of the more difficult aspects of reinforcememt learning are the large number of hyperparameters. Not only are we tuning the network, but we're tuning the simulation.",
"train_episodes = 1000 # max number of episodes to learn from\nmax_steps = 200 # max steps in an episode\ngamma = 0.99 # future reward discount\n\n# Exploration parameters\nexplore_start = 1.0 # exploration probability at start\nexplore_stop = 0.01 # minimum exploration probability \ndecay_rate = 0.0001 # exponential decay rate for exploration prob\n\n# Network parameters\nhidden_size = 64 # number of units in each Q-network hidden layer\nlearning_rate = 0.0001 # Q-network learning rate\n\n# Memory parameters\nmemory_size = 10000 # memory capacity\nbatch_size = 20 # experience mini-batch size\npretrain_length = batch_size # number experiences to pretrain the memory\n\ntf.reset_default_graph()\nmainQN = QNetwork(name='main', hidden_size=hidden_size, learning_rate=learning_rate)",
"Populate the experience memory\nHere I'm re-initializing the simulation and pre-populating the memory. The agent is taking random actions and storing the transitions in memory. This will help the agent with exploring the game.",
"# Initialize the simulation\nenv.reset()\n# Take one random step to get the pole and cart moving\nstate, reward, done, _ = env.step(env.action_space.sample())\n\nmemory = Memory(max_size=memory_size)\n\n# Make a bunch of random actions and store the experiences\nfor ii in range(pretrain_length):\n # Uncomment the line below to watch the simulation\n # env.render()\n\n # Make a random action\n action = env.action_space.sample()\n next_state, reward, done, _ = env.step(action)\n\n if done:\n # The simulation fails so no next state\n next_state = np.zeros(state.shape)\n # Add experience to memory\n memory.add((state, action, reward, next_state))\n \n # Start new episode\n env.reset()\n # Take one random step to get the pole and cart moving\n state, reward, done, _ = env.step(env.action_space.sample())\n else:\n # Add experience to memory\n memory.add((state, action, reward, next_state))\n state = next_state",
"Training\nBelow we'll train our agent. If you want to watch it train, uncomment the env.render() line. This is slow because it's rendering the frames slower than the network can train. But, it's cool to watch the agent get better at the game.",
"# Now train with experiences\nsaver = tf.train.Saver()\nrewards_list = []\nwith tf.Session() as sess:\n # Initialize variables\n sess.run(tf.global_variables_initializer())\n \n step = 0\n for ep in range(1, train_episodes):\n total_reward = 0\n t = 0\n while t < max_steps:\n step += 1\n # Uncomment this next line to watch the training\n # env.render() \n \n # Explore or Exploit\n explore_p = explore_stop + (explore_start - explore_stop)*np.exp(-decay_rate*step) \n if explore_p > np.random.rand():\n # Make a random action\n action = env.action_space.sample()\n else:\n # Get action from Q-network\n feed = {mainQN.inputs_: state.reshape((1, *state.shape))}\n Qs = sess.run(mainQN.output, feed_dict=feed)\n action = np.argmax(Qs)\n \n # Take action, get new state and reward\n next_state, reward, done, _ = env.step(action)\n \n total_reward += reward\n \n if done:\n # the episode ends so no next state\n next_state = np.zeros(state.shape)\n t = max_steps\n \n print('Episode: {}'.format(ep),\n 'Total reward: {}'.format(total_reward),\n 'Training loss: {:.4f}'.format(loss),\n 'Explore P: {:.4f}'.format(explore_p))\n rewards_list.append((ep, total_reward))\n \n # Add experience to memory\n memory.add((state, action, reward, next_state))\n \n # Start new episode\n env.reset()\n # Take one random step to get the pole and cart moving\n state, reward, done, _ = env.step(env.action_space.sample())\n\n else:\n # Add experience to memory\n memory.add((state, action, reward, next_state))\n state = next_state\n t += 1\n \n # Sample mini-batch from memory\n batch = memory.sample(batch_size)\n states = np.array([each[0] for each in batch])\n actions = np.array([each[1] for each in batch])\n rewards = np.array([each[2] for each in batch])\n next_states = np.array([each[3] for each in batch])\n \n # Train network\n target_Qs = sess.run(mainQN.output, feed_dict={mainQN.inputs_: next_states})\n \n # Set target_Qs to 0 for states where episode ends\n episode_ends = (next_states == np.zeros(states[0].shape)).all(axis=1)\n target_Qs[episode_ends] = (0, 0)\n \n targets = rewards + gamma * np.max(target_Qs, axis=1)\n\n loss, _ = sess.run([mainQN.loss, mainQN.opt],\n feed_dict={mainQN.inputs_: states,\n mainQN.targetQs_: targets,\n mainQN.actions_: actions})\n \n saver.save(sess, \"checkpoints/cartpole.ckpt\")\n",
"Visualizing training\nBelow I'll plot the total rewards for each episode. I'm plotting the rolling average too, in blue.",
"%matplotlib inline\nimport matplotlib.pyplot as plt\n\ndef running_mean(x, N):\n cumsum = np.cumsum(np.insert(x, 0, 0)) \n return (cumsum[N:] - cumsum[:-N]) / N \n\neps, rews = np.array(rewards_list).T\nsmoothed_rews = running_mean(rews, 10)\nplt.plot(eps[-len(smoothed_rews):], smoothed_rews)\nplt.plot(eps, rews, color='grey', alpha=0.3)\nplt.xlabel('Episode')\nplt.ylabel('Total Reward')",
"Testing\nLet's checkout how our trained agent plays the game.",
"test_episodes = 10\ntest_max_steps = 400\nenv.reset()\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n \n for ep in range(1, test_episodes):\n t = 0\n while t < test_max_steps:\n env.render() \n \n # Get action from Q-network\n feed = {mainQN.inputs_: state.reshape((1, *state.shape))}\n Qs = sess.run(mainQN.output, feed_dict=feed)\n action = np.argmax(Qs)\n \n # Take action, get new state and reward\n next_state, reward, done, _ = env.step(action)\n \n if done:\n t = test_max_steps\n env.reset()\n # Take one random step to get the pole and cart moving\n state, reward, done, _ = env.step(env.action_space.sample())\n\n else:\n state = next_state\n t += 1\n\nenv.close()",
"Extending this\nSo, Cart-Pole is a pretty simple game. However, the same model can be used to train an agent to play something much more complicated like Pong or Space Invaders. Instead of a state like we're using here though, you'd want to use convolutional layers to get the state from the screen images.\n\nI'll leave it as a challenge for you to use deep Q-learning to train an agent to play Atari games. Here's the original paper which will get you started: http://www.davidqiu.com:8888/research/nature14236.pdf."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ES-DOC/esdoc-jupyterhub
|
notebooks/cas/cmip6/models/sandbox-3/atmos.ipynb
|
gpl-3.0
|
[
"ES-DOC CMIP6 Model Properties - Atmos\nMIP Era: CMIP6\nInstitute: CAS\nSource ID: SANDBOX-3\nTopic: Atmos\nSub-Topics: Dynamical Core, Radiation, Turbulence Convection, Microphysics Precipitation, Cloud Scheme, Observation Simulation, Gravity Waves, Solar, Volcanos. \nProperties: 156 (127 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:53:45\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook",
"# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'cas', 'sandbox-3', 'atmos')",
"Document Authors\nSet document authors",
"# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Contributors\nSpecify document contributors",
"# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Publication\nSpecify document publication status",
"# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)",
"Document Table of Contents\n1. Key Properties --> Overview\n2. Key Properties --> Resolution\n3. Key Properties --> Timestepping\n4. Key Properties --> Orography\n5. Grid --> Discretisation\n6. Grid --> Discretisation --> Horizontal\n7. Grid --> Discretisation --> Vertical\n8. Dynamical Core\n9. Dynamical Core --> Top Boundary\n10. Dynamical Core --> Lateral Boundary\n11. Dynamical Core --> Diffusion Horizontal\n12. Dynamical Core --> Advection Tracers\n13. Dynamical Core --> Advection Momentum\n14. Radiation\n15. Radiation --> Shortwave Radiation\n16. Radiation --> Shortwave GHG\n17. Radiation --> Shortwave Cloud Ice\n18. Radiation --> Shortwave Cloud Liquid\n19. Radiation --> Shortwave Cloud Inhomogeneity\n20. Radiation --> Shortwave Aerosols\n21. Radiation --> Shortwave Gases\n22. Radiation --> Longwave Radiation\n23. Radiation --> Longwave GHG\n24. Radiation --> Longwave Cloud Ice\n25. Radiation --> Longwave Cloud Liquid\n26. Radiation --> Longwave Cloud Inhomogeneity\n27. Radiation --> Longwave Aerosols\n28. Radiation --> Longwave Gases\n29. Turbulence Convection\n30. Turbulence Convection --> Boundary Layer Turbulence\n31. Turbulence Convection --> Deep Convection\n32. Turbulence Convection --> Shallow Convection\n33. Microphysics Precipitation\n34. Microphysics Precipitation --> Large Scale Precipitation\n35. Microphysics Precipitation --> Large Scale Cloud Microphysics\n36. Cloud Scheme\n37. Cloud Scheme --> Optical Cloud Properties\n38. Cloud Scheme --> Sub Grid Scale Water Distribution\n39. Cloud Scheme --> Sub Grid Scale Ice Distribution\n40. Observation Simulation\n41. Observation Simulation --> Isscp Attributes\n42. Observation Simulation --> Cosp Attributes\n43. Observation Simulation --> Radar Inputs\n44. Observation Simulation --> Lidar Inputs\n45. Gravity Waves\n46. Gravity Waves --> Orographic Gravity Waves\n47. Gravity Waves --> Non Orographic Gravity Waves\n48. Solar\n49. Solar --> Solar Pathways\n50. Solar --> Solar Constant\n51. Solar --> Orbital Parameters\n52. Solar --> Insolation Ozone\n53. Volcanos\n54. Volcanos --> Volcanoes Treatment \n1. Key Properties --> Overview\nTop level key properties\n1.1. Model Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.overview.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.2. Model Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nName of atmosphere model code (CAM 4.0, ARPEGE 3.2,...)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.overview.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.3. Model Family\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of atmospheric model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.overview.model_family') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"AGCM\" \n# \"ARCM\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"1.4. Basic Approximations\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nBasic approximations made in the atmosphere.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"primitive equations\" \n# \"non-hydrostatic\" \n# \"anelastic\" \n# \"Boussinesq\" \n# \"hydrostatic\" \n# \"quasi-hydrostatic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"2. Key Properties --> Resolution\nCharacteristics of the model resolution\n2.1. Horizontal Resolution Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of the model grid, e.g. T42, N48.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"2.2. Canonical Horizontal Resolution\nIs Required: TRUE Type: STRING Cardinality: 1.1\nExpression quoted for gross comparisons of resolution, e.g. 2.5 x 3.75 degrees lat-lon.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"2.3. Range Horizontal Resolution\nIs Required: TRUE Type: STRING Cardinality: 1.1\nRange of horizontal resolution with spatial details, eg. 1 deg (Equator) - 0.5 deg",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"2.4. Number Of Vertical Levels\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nNumber of vertical levels resolved on the computational grid.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"2.5. High Top\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDoes the atmosphere have a high-top? High-Top atmospheres have a fully resolved stratosphere with a model top above the stratopause.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.high_top') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"3. Key Properties --> Timestepping\nCharacteristics of the atmosphere model time stepping\n3.1. Timestep Dynamics\nIs Required: TRUE Type: STRING Cardinality: 1.1\nTimestep for the dynamics, e.g. 30 min.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3.2. Timestep Shortwave Radiative Transfer\nIs Required: FALSE Type: STRING Cardinality: 0.1\nTimestep for the shortwave radiative transfer, e.g. 1.5 hours.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3.3. Timestep Longwave Radiative Transfer\nIs Required: FALSE Type: STRING Cardinality: 0.1\nTimestep for the longwave radiative transfer, e.g. 3 hours.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4. Key Properties --> Orography\nCharacteristics of the model orography\n4.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTime adaptation of the orography.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.orography.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"present day\" \n# \"modified\" \n# TODO - please enter value(s)\n",
"4.2. Changes\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nIf the orography type is modified describe the time adaptation changes.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.orography.changes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"related to ice sheets\" \n# \"related to tectonics\" \n# \"modified mean\" \n# \"modified variance if taken into account in model (cf gravity waves)\" \n# TODO - please enter value(s)\n",
"5. Grid --> Discretisation\nAtmosphere grid discretisation\n5.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of grid discretisation in the atmosphere",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6. Grid --> Discretisation --> Horizontal\nAtmosphere discretisation in the horizontal\n6.1. Scheme Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHorizontal discretisation type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"spectral\" \n# \"fixed grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"6.2. Scheme Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHorizontal discretisation method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"finite elements\" \n# \"finite volumes\" \n# \"finite difference\" \n# \"centered finite difference\" \n# TODO - please enter value(s)\n",
"6.3. Scheme Order\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHorizontal discretisation function order",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"second\" \n# \"third\" \n# \"fourth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"6.4. Horizontal Pole\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nHorizontal discretisation pole singularity treatment",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"filter\" \n# \"pole rotation\" \n# \"artificial island\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"6.5. Grid Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHorizontal grid type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Gaussian\" \n# \"Latitude-Longitude\" \n# \"Cubed-Sphere\" \n# \"Icosahedral\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"7. Grid --> Discretisation --> Vertical\nAtmosphere discretisation in the vertical\n7.1. Coordinate Type\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nType of vertical coordinate system",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"isobaric\" \n# \"sigma\" \n# \"hybrid sigma-pressure\" \n# \"hybrid pressure\" \n# \"vertically lagrangian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"8. Dynamical Core\nCharacteristics of the dynamical core\n8.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of atmosphere dynamical core",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.2. Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name for the dynamical core of the model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.3. Timestepping Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTimestepping framework type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.timestepping_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Adams-Bashforth\" \n# \"explicit\" \n# \"implicit\" \n# \"semi-implicit\" \n# \"leap frog\" \n# \"multi-step\" \n# \"Runge Kutta fifth order\" \n# \"Runge Kutta second order\" \n# \"Runge Kutta third order\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"8.4. Prognostic Variables\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nList of the model prognostic variables",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"surface pressure\" \n# \"wind components\" \n# \"divergence/curl\" \n# \"temperature\" \n# \"potential temperature\" \n# \"total water\" \n# \"water vapour\" \n# \"water liquid\" \n# \"water ice\" \n# \"total water moments\" \n# \"clouds\" \n# \"radiation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"9. Dynamical Core --> Top Boundary\nType of boundary layer at the top of the model\n9.1. Top Boundary Condition\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTop boundary condition",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"sponge layer\" \n# \"radiation boundary condition\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"9.2. Top Heat\nIs Required: TRUE Type: STRING Cardinality: 1.1\nTop boundary heat treatment",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"9.3. Top Wind\nIs Required: TRUE Type: STRING Cardinality: 1.1\nTop boundary wind treatment",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"10. Dynamical Core --> Lateral Boundary\nType of lateral boundary condition (if the model is a regional model)\n10.1. Condition\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nType of lateral boundary condition",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"sponge layer\" \n# \"radiation boundary condition\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"11. Dynamical Core --> Diffusion Horizontal\nHorizontal diffusion scheme\n11.1. Scheme Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nHorizontal diffusion scheme name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"11.2. Scheme Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHorizontal diffusion scheme method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"iterated Laplacian\" \n# \"bi-harmonic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"12. Dynamical Core --> Advection Tracers\nTracer advection scheme\n12.1. Scheme Name\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nTracer advection scheme name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Heun\" \n# \"Roe and VanLeer\" \n# \"Roe and Superbee\" \n# \"Prather\" \n# \"UTOPIA\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"12.2. Scheme Characteristics\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nTracer advection scheme characteristics",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Eulerian\" \n# \"modified Euler\" \n# \"Lagrangian\" \n# \"semi-Lagrangian\" \n# \"cubic semi-Lagrangian\" \n# \"quintic semi-Lagrangian\" \n# \"mass-conserving\" \n# \"finite volume\" \n# \"flux-corrected\" \n# \"linear\" \n# \"quadratic\" \n# \"quartic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"12.3. Conserved Quantities\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nTracer advection scheme conserved quantities",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"dry mass\" \n# \"tracer mass\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"12.4. Conservation Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTracer advection scheme conservation method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"conservation fixer\" \n# \"Priestley algorithm\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13. Dynamical Core --> Advection Momentum\nMomentum advection scheme\n13.1. Scheme Name\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nMomentum advection schemes name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"VanLeer\" \n# \"Janjic\" \n# \"SUPG (Streamline Upwind Petrov-Galerkin)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.2. Scheme Characteristics\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nMomentum advection scheme characteristics",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"2nd order\" \n# \"4th order\" \n# \"cell-centred\" \n# \"staggered grid\" \n# \"semi-staggered grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.3. Scheme Staggering Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMomentum advection scheme staggering type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Arakawa B-grid\" \n# \"Arakawa C-grid\" \n# \"Arakawa D-grid\" \n# \"Arakawa E-grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.4. Conserved Quantities\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nMomentum advection scheme conserved quantities",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Angular momentum\" \n# \"Horizontal momentum\" \n# \"Enstrophy\" \n# \"Mass\" \n# \"Total energy\" \n# \"Vorticity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.5. Conservation Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMomentum advection scheme conservation method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"conservation fixer\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14. Radiation\nCharacteristics of the atmosphere radiation process\n14.1. Aerosols\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nAerosols whose radiative effect is taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.aerosols') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"sulphate\" \n# \"nitrate\" \n# \"sea salt\" \n# \"dust\" \n# \"ice\" \n# \"organic\" \n# \"BC (black carbon / soot)\" \n# \"SOA (secondary organic aerosols)\" \n# \"POM (particulate organic matter)\" \n# \"polar stratospheric ice\" \n# \"NAT (nitric acid trihydrate)\" \n# \"NAD (nitric acid dihydrate)\" \n# \"STS (supercooled ternary solution aerosol particle)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"15. Radiation --> Shortwave Radiation\nProperties of the shortwave radiation scheme\n15.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of shortwave radiation in the atmosphere",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"15.2. Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name for the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"15.3. Spectral Integration\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nShortwave radiation scheme spectral integration",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"wide-band model\" \n# \"correlated-k\" \n# \"exponential sum fitting\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"15.4. Transport Calculation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nShortwave radiation transport calculation methods",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"two-stream\" \n# \"layer interaction\" \n# \"bulk\" \n# \"adaptive\" \n# \"multi-stream\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"15.5. Spectral Intervals\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nShortwave radiation scheme number of spectral intervals",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"16. Radiation --> Shortwave GHG\nRepresentation of greenhouse gases in the shortwave radiation scheme\n16.1. Greenhouse Gas Complexity\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nComplexity of greenhouse gases whose shortwave radiative effects are taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CO2\" \n# \"CH4\" \n# \"N2O\" \n# \"CFC-11 eq\" \n# \"CFC-12 eq\" \n# \"HFC-134a eq\" \n# \"Explicit ODSs\" \n# \"Explicit other fluorinated gases\" \n# \"O3\" \n# \"H2O\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"16.2. ODS\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nOzone depleting substances whose shortwave radiative effects are explicitly taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CFC-12\" \n# \"CFC-11\" \n# \"CFC-113\" \n# \"CFC-114\" \n# \"CFC-115\" \n# \"HCFC-22\" \n# \"HCFC-141b\" \n# \"HCFC-142b\" \n# \"Halon-1211\" \n# \"Halon-1301\" \n# \"Halon-2402\" \n# \"methyl chloroform\" \n# \"carbon tetrachloride\" \n# \"methyl chloride\" \n# \"methylene chloride\" \n# \"chloroform\" \n# \"methyl bromide\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"16.3. Other Flourinated Gases\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nOther flourinated gases whose shortwave radiative effects are explicitly taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"HFC-134a\" \n# \"HFC-23\" \n# \"HFC-32\" \n# \"HFC-125\" \n# \"HFC-143a\" \n# \"HFC-152a\" \n# \"HFC-227ea\" \n# \"HFC-236fa\" \n# \"HFC-245fa\" \n# \"HFC-365mfc\" \n# \"HFC-43-10mee\" \n# \"CF4\" \n# \"C2F6\" \n# \"C3F8\" \n# \"C4F10\" \n# \"C5F12\" \n# \"C6F14\" \n# \"C7F16\" \n# \"C8F18\" \n# \"c-C4F8\" \n# \"NF3\" \n# \"SF6\" \n# \"SO2F2\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"17. Radiation --> Shortwave Cloud Ice\nShortwave radiative properties of ice crystals in clouds\n17.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral shortwave radiative interactions with cloud ice crystals",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"17.2. Physical Representation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical representation of cloud ice crystals in the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"bi-modal size distribution\" \n# \"ensemble of ice crystals\" \n# \"mean projected area\" \n# \"ice water path\" \n# \"crystal asymmetry\" \n# \"crystal aspect ratio\" \n# \"effective crystal radius\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"17.3. Optical Methods\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOptical methods applicable to cloud ice crystals in the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"T-matrix\" \n# \"geometric optics\" \n# \"finite difference time domain (FDTD)\" \n# \"Mie theory\" \n# \"anomalous diffraction approximation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"18. Radiation --> Shortwave Cloud Liquid\nShortwave radiative properties of liquid droplets in clouds\n18.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral shortwave radiative interactions with cloud liquid droplets",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"18.2. Physical Representation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical representation of cloud liquid droplets in the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"cloud droplet number concentration\" \n# \"effective cloud droplet radii\" \n# \"droplet size distribution\" \n# \"liquid water path\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"18.3. Optical Methods\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOptical methods applicable to cloud liquid droplets in the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"geometric optics\" \n# \"Mie theory\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"19. Radiation --> Shortwave Cloud Inhomogeneity\nCloud inhomogeneity in the shortwave radiation scheme\n19.1. Cloud Inhomogeneity\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMethod for taking into account horizontal cloud inhomogeneity",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Monte Carlo Independent Column Approximation\" \n# \"Triplecloud\" \n# \"analytic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"20. Radiation --> Shortwave Aerosols\nShortwave radiative properties of aerosols\n20.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral shortwave radiative interactions with aerosols",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"20.2. Physical Representation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical representation of aerosols in the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"number concentration\" \n# \"effective radii\" \n# \"size distribution\" \n# \"asymmetry\" \n# \"aspect ratio\" \n# \"mixing state\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"20.3. Optical Methods\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOptical methods applicable to aerosols in the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"T-matrix\" \n# \"geometric optics\" \n# \"finite difference time domain (FDTD)\" \n# \"Mie theory\" \n# \"anomalous diffraction approximation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"21. Radiation --> Shortwave Gases\nShortwave radiative properties of gases\n21.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral shortwave radiative interactions with gases",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"22. Radiation --> Longwave Radiation\nProperties of the longwave radiation scheme\n22.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of longwave radiation in the atmosphere",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"22.2. Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name for the longwave radiation scheme.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"22.3. Spectral Integration\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nLongwave radiation scheme spectral integration",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"wide-band model\" \n# \"correlated-k\" \n# \"exponential sum fitting\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"22.4. Transport Calculation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nLongwave radiation transport calculation methods",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"two-stream\" \n# \"layer interaction\" \n# \"bulk\" \n# \"adaptive\" \n# \"multi-stream\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"22.5. Spectral Intervals\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nLongwave radiation scheme number of spectral intervals",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"23. Radiation --> Longwave GHG\nRepresentation of greenhouse gases in the longwave radiation scheme\n23.1. Greenhouse Gas Complexity\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nComplexity of greenhouse gases whose longwave radiative effects are taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CO2\" \n# \"CH4\" \n# \"N2O\" \n# \"CFC-11 eq\" \n# \"CFC-12 eq\" \n# \"HFC-134a eq\" \n# \"Explicit ODSs\" \n# \"Explicit other fluorinated gases\" \n# \"O3\" \n# \"H2O\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"23.2. ODS\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nOzone depleting substances whose longwave radiative effects are explicitly taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CFC-12\" \n# \"CFC-11\" \n# \"CFC-113\" \n# \"CFC-114\" \n# \"CFC-115\" \n# \"HCFC-22\" \n# \"HCFC-141b\" \n# \"HCFC-142b\" \n# \"Halon-1211\" \n# \"Halon-1301\" \n# \"Halon-2402\" \n# \"methyl chloroform\" \n# \"carbon tetrachloride\" \n# \"methyl chloride\" \n# \"methylene chloride\" \n# \"chloroform\" \n# \"methyl bromide\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"23.3. Other Flourinated Gases\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nOther flourinated gases whose longwave radiative effects are explicitly taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"HFC-134a\" \n# \"HFC-23\" \n# \"HFC-32\" \n# \"HFC-125\" \n# \"HFC-143a\" \n# \"HFC-152a\" \n# \"HFC-227ea\" \n# \"HFC-236fa\" \n# \"HFC-245fa\" \n# \"HFC-365mfc\" \n# \"HFC-43-10mee\" \n# \"CF4\" \n# \"C2F6\" \n# \"C3F8\" \n# \"C4F10\" \n# \"C5F12\" \n# \"C6F14\" \n# \"C7F16\" \n# \"C8F18\" \n# \"c-C4F8\" \n# \"NF3\" \n# \"SF6\" \n# \"SO2F2\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"24. Radiation --> Longwave Cloud Ice\nLongwave radiative properties of ice crystals in clouds\n24.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral longwave radiative interactions with cloud ice crystals",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"24.2. Physical Reprenstation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical representation of cloud ice crystals in the longwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"bi-modal size distribution\" \n# \"ensemble of ice crystals\" \n# \"mean projected area\" \n# \"ice water path\" \n# \"crystal asymmetry\" \n# \"crystal aspect ratio\" \n# \"effective crystal radius\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"24.3. Optical Methods\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOptical methods applicable to cloud ice crystals in the longwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"T-matrix\" \n# \"geometric optics\" \n# \"finite difference time domain (FDTD)\" \n# \"Mie theory\" \n# \"anomalous diffraction approximation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25. Radiation --> Longwave Cloud Liquid\nLongwave radiative properties of liquid droplets in clouds\n25.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral longwave radiative interactions with cloud liquid droplets",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25.2. Physical Representation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical representation of cloud liquid droplets in the longwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"cloud droplet number concentration\" \n# \"effective cloud droplet radii\" \n# \"droplet size distribution\" \n# \"liquid water path\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25.3. Optical Methods\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOptical methods applicable to cloud liquid droplets in the longwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"geometric optics\" \n# \"Mie theory\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"26. Radiation --> Longwave Cloud Inhomogeneity\nCloud inhomogeneity in the longwave radiation scheme\n26.1. Cloud Inhomogeneity\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMethod for taking into account horizontal cloud inhomogeneity",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Monte Carlo Independent Column Approximation\" \n# \"Triplecloud\" \n# \"analytic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"27. Radiation --> Longwave Aerosols\nLongwave radiative properties of aerosols\n27.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral longwave radiative interactions with aerosols",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"27.2. Physical Representation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical representation of aerosols in the longwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"number concentration\" \n# \"effective radii\" \n# \"size distribution\" \n# \"asymmetry\" \n# \"aspect ratio\" \n# \"mixing state\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"27.3. Optical Methods\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOptical methods applicable to aerosols in the longwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"T-matrix\" \n# \"geometric optics\" \n# \"finite difference time domain (FDTD)\" \n# \"Mie theory\" \n# \"anomalous diffraction approximation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"28. Radiation --> Longwave Gases\nLongwave radiative properties of gases\n28.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral longwave radiative interactions with gases",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"29. Turbulence Convection\nAtmosphere Convective Turbulence and Clouds\n29.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of atmosphere convection and turbulence",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"30. Turbulence Convection --> Boundary Layer Turbulence\nProperties of the boundary layer turbulence scheme\n30.1. Scheme Name\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nBoundary layer turbulence scheme name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Mellor-Yamada\" \n# \"Holtslag-Boville\" \n# \"EDMF\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"30.2. Scheme Type\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nBoundary layer turbulence scheme type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"TKE prognostic\" \n# \"TKE diagnostic\" \n# \"TKE coupled with water\" \n# \"vertical profile of Kz\" \n# \"non-local diffusion\" \n# \"Monin-Obukhov similarity\" \n# \"Coastal Buddy Scheme\" \n# \"Coupled with convection\" \n# \"Coupled with gravity waves\" \n# \"Depth capped at cloud base\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"30.3. Closure Order\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nBoundary layer turbulence scheme closure order",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"30.4. Counter Gradient\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nUses boundary layer turbulence scheme counter gradient",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"31. Turbulence Convection --> Deep Convection\nProperties of the deep convection scheme\n31.1. Scheme Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDeep convection scheme name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"31.2. Scheme Type\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nDeep convection scheme type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"mass-flux\" \n# \"adjustment\" \n# \"plume ensemble\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"31.3. Scheme Method\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nDeep convection scheme method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CAPE\" \n# \"bulk\" \n# \"ensemble\" \n# \"CAPE/WFN based\" \n# \"TKE/CIN based\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"31.4. Processes\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical processes taken into account in the parameterisation of deep convection",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"vertical momentum transport\" \n# \"convective momentum transport\" \n# \"entrainment\" \n# \"detrainment\" \n# \"penetrative convection\" \n# \"updrafts\" \n# \"downdrafts\" \n# \"radiative effect of anvils\" \n# \"re-evaporation of convective precipitation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"31.5. Microphysics\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nMicrophysics scheme for deep convection. Microphysical processes directly control the amount of detrainment of cloud hydrometeor and water vapor from updrafts",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"tuning parameter based\" \n# \"single moment\" \n# \"two moment\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"32. Turbulence Convection --> Shallow Convection\nProperties of the shallow convection scheme\n32.1. Scheme Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nShallow convection scheme name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"32.2. Scheme Type\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nshallow convection scheme type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"mass-flux\" \n# \"cumulus-capped boundary layer\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"32.3. Scheme Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nshallow convection scheme method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"same as deep (unified)\" \n# \"included in boundary layer turbulence\" \n# \"separate diagnosis\" \n# TODO - please enter value(s)\n",
"32.4. Processes\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical processes taken into account in the parameterisation of shallow convection",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"convective momentum transport\" \n# \"entrainment\" \n# \"detrainment\" \n# \"penetrative convection\" \n# \"re-evaporation of convective precipitation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"32.5. Microphysics\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nMicrophysics scheme for shallow convection",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"tuning parameter based\" \n# \"single moment\" \n# \"two moment\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"33. Microphysics Precipitation\nLarge Scale Cloud Microphysics and Precipitation\n33.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of large scale cloud microphysics and precipitation",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"34. Microphysics Precipitation --> Large Scale Precipitation\nProperties of the large scale precipitation scheme\n34.1. Scheme Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name of the large scale precipitation parameterisation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"34.2. Hydrometeors\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPrecipitating hydrometeors taken into account in the large scale precipitation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"liquid rain\" \n# \"snow\" \n# \"hail\" \n# \"graupel\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"35. Microphysics Precipitation --> Large Scale Cloud Microphysics\nProperties of the large scale cloud microphysics scheme\n35.1. Scheme Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name of the microphysics parameterisation scheme used for large scale clouds.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"35.2. Processes\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nLarge scale cloud microphysics processes",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"mixed phase\" \n# \"cloud droplets\" \n# \"cloud ice\" \n# \"ice nucleation\" \n# \"water vapour deposition\" \n# \"effect of raindrops\" \n# \"effect of snow\" \n# \"effect of graupel\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"36. Cloud Scheme\nCharacteristics of the cloud scheme\n36.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of the atmosphere cloud scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"36.2. Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name for the cloud scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"36.3. Atmos Coupling\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nAtmosphere components that are linked to the cloud scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"atmosphere_radiation\" \n# \"atmosphere_microphysics_precipitation\" \n# \"atmosphere_turbulence_convection\" \n# \"atmosphere_gravity_waves\" \n# \"atmosphere_solar\" \n# \"atmosphere_volcano\" \n# \"atmosphere_cloud_simulator\" \n# TODO - please enter value(s)\n",
"36.4. Uses Separate Treatment\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDifferent cloud schemes for the different types of clouds (convective, stratiform and boundary layer)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"36.5. Processes\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nProcesses included in the cloud scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"entrainment\" \n# \"detrainment\" \n# \"bulk cloud\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"36.6. Prognostic Scheme\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs the cloud scheme a prognostic scheme?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"36.7. Diagnostic Scheme\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs the cloud scheme a diagnostic scheme?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"36.8. Prognostic Variables\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nList the prognostic variables used by the cloud scheme, if applicable.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"cloud amount\" \n# \"liquid\" \n# \"ice\" \n# \"rain\" \n# \"snow\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"37. Cloud Scheme --> Optical Cloud Properties\nOptical cloud properties\n37.1. Cloud Overlap Method\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nMethod for taking into account overlapping of cloud layers",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"random\" \n# \"maximum\" \n# \"maximum-random\" \n# \"exponential\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"37.2. Cloud Inhomogeneity\nIs Required: FALSE Type: STRING Cardinality: 0.1\nMethod for taking into account cloud inhomogeneity",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"38. Cloud Scheme --> Sub Grid Scale Water Distribution\nSub-grid scale water distribution\n38.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nSub-grid scale water distribution type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# TODO - please enter value(s)\n",
"38.2. Function Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nSub-grid scale water distribution function name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"38.3. Function Order\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nSub-grid scale water distribution function type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"38.4. Convection Coupling\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nSub-grid scale water distribution coupling with convection",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"coupled with deep\" \n# \"coupled with shallow\" \n# \"not coupled with convection\" \n# TODO - please enter value(s)\n",
"39. Cloud Scheme --> Sub Grid Scale Ice Distribution\nSub-grid scale ice distribution\n39.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nSub-grid scale ice distribution type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# TODO - please enter value(s)\n",
"39.2. Function Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nSub-grid scale ice distribution function name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"39.3. Function Order\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nSub-grid scale ice distribution function type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"39.4. Convection Coupling\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nSub-grid scale ice distribution coupling with convection",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"coupled with deep\" \n# \"coupled with shallow\" \n# \"not coupled with convection\" \n# TODO - please enter value(s)\n",
"40. Observation Simulation\nCharacteristics of observation simulation\n40.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of observation simulator characteristics",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"41. Observation Simulation --> Isscp Attributes\nISSCP Characteristics\n41.1. Top Height Estimation Method\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nCloud simulator ISSCP top height estimation methodUo",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"no adjustment\" \n# \"IR brightness\" \n# \"visible optical depth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"41.2. Top Height Direction\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nCloud simulator ISSCP top height direction",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"lowest altitude level\" \n# \"highest altitude level\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"42. Observation Simulation --> Cosp Attributes\nCFMIP Observational Simulator Package attributes\n42.1. Run Configuration\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nCloud simulator COSP run configuration",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Inline\" \n# \"Offline\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"42.2. Number Of Grid Points\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nCloud simulator COSP number of grid points",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"42.3. Number Of Sub Columns\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nCloud simulator COSP number of sub-cloumns used to simulate sub-grid variability",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"42.4. Number Of Levels\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nCloud simulator COSP number of levels",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"43. Observation Simulation --> Radar Inputs\nCharacteristics of the cloud radar simulator\n43.1. Frequency\nIs Required: TRUE Type: FLOAT Cardinality: 1.1\nCloud simulator radar frequency (Hz)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"43.2. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nCloud simulator radar type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"surface\" \n# \"space borne\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"43.3. Gas Absorption\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nCloud simulator radar uses gas absorption",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"43.4. Effective Radius\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nCloud simulator radar uses effective radius",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"44. Observation Simulation --> Lidar Inputs\nCharacteristics of the cloud lidar simulator\n44.1. Ice Types\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nCloud simulator lidar ice type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"ice spheres\" \n# \"ice non-spherical\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"44.2. Overlap\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nCloud simulator lidar overlap",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"max\" \n# \"random\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"45. Gravity Waves\nCharacteristics of the parameterised gravity waves in the atmosphere, whether from orography or other sources.\n45.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of gravity wave parameterisation in the atmosphere",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"45.2. Sponge Layer\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nSponge layer in the upper levels in order to avoid gravity wave reflection at the top.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.sponge_layer') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Rayleigh friction\" \n# \"Diffusive sponge layer\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"45.3. Background\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nBackground wave distribution",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"continuous spectrum\" \n# \"discrete spectrum\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"45.4. Subgrid Scale Orography\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nSubgrid scale orography effects taken into account.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"effect on drag\" \n# \"effect on lifting\" \n# \"enhanced topography\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"46. Gravity Waves --> Orographic Gravity Waves\nGravity waves generated due to the presence of orography\n46.1. Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name for the orographic gravity wave scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"46.2. Source Mechanisms\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOrographic gravity wave source mechanisms",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"linear mountain waves\" \n# \"hydraulic jump\" \n# \"envelope orography\" \n# \"low level flow blocking\" \n# \"statistical sub-grid scale variance\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"46.3. Calculation Method\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOrographic gravity wave calculation method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"non-linear calculation\" \n# \"more than two cardinal directions\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"46.4. Propagation Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nOrographic gravity wave propogation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"linear theory\" \n# \"non-linear theory\" \n# \"includes boundary layer ducting\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"46.5. Dissipation Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nOrographic gravity wave dissipation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"total wave\" \n# \"single wave\" \n# \"spectral\" \n# \"linear\" \n# \"wave saturation vs Richardson number\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"47. Gravity Waves --> Non Orographic Gravity Waves\nGravity waves generated by non-orographic processes.\n47.1. Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name for the non-orographic gravity wave scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"47.2. Source Mechanisms\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nNon-orographic gravity wave source mechanisms",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"convection\" \n# \"precipitation\" \n# \"background spectrum\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"47.3. Calculation Method\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nNon-orographic gravity wave calculation method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"spatially dependent\" \n# \"temporally dependent\" \n# TODO - please enter value(s)\n",
"47.4. Propagation Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nNon-orographic gravity wave propogation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"linear theory\" \n# \"non-linear theory\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"47.5. Dissipation Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nNon-orographic gravity wave dissipation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"total wave\" \n# \"single wave\" \n# \"spectral\" \n# \"linear\" \n# \"wave saturation vs Richardson number\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"48. Solar\nTop of atmosphere solar insolation characteristics\n48.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of solar insolation of the atmosphere",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"49. Solar --> Solar Pathways\nPathways for solar forcing of the atmosphere\n49.1. Pathways\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPathways for the solar forcing of the atmosphere model domain",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.solar_pathways.pathways') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"SW radiation\" \n# \"precipitating energetic particles\" \n# \"cosmic rays\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"50. Solar --> Solar Constant\nSolar constant and top of atmosphere insolation characteristics\n50.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTime adaptation of the solar constant.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.solar_constant.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"fixed\" \n# \"transient\" \n# TODO - please enter value(s)\n",
"50.2. Fixed Value\nIs Required: FALSE Type: FLOAT Cardinality: 0.1\nIf the solar constant is fixed, enter the value of the solar constant (W m-2).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"50.3. Transient Characteristics\nIs Required: TRUE Type: STRING Cardinality: 1.1\nsolar constant transient characteristics (W m-2)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"51. Solar --> Orbital Parameters\nOrbital parameters and top of atmosphere insolation characteristics\n51.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTime adaptation of orbital parameters",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.orbital_parameters.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"fixed\" \n# \"transient\" \n# TODO - please enter value(s)\n",
"51.2. Fixed Reference Date\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nReference date for fixed orbital parameters (yyyy)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"51.3. Transient Method\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescription of transient orbital parameters",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"51.4. Computation Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMethod used for computing orbital parameters.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Berger 1978\" \n# \"Laskar 2004\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"52. Solar --> Insolation Ozone\nImpact of solar insolation on stratospheric ozone\n52.1. Solar Ozone Impact\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDoes top of atmosphere insolation impact on stratospheric ozone?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"53. Volcanos\nCharacteristics of the implementation of volcanoes\n53.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of the implementation of volcanic effects in the atmosphere",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.volcanos.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"54. Volcanos --> Volcanoes Treatment\nTreatment of volcanoes in the atmosphere\n54.1. Volcanoes Implementation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow volcanic effects are modeled in the atmosphere.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"high frequency solar constant anomaly\" \n# \"stratospheric aerosols optical thickness\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"©2017 ES-DOC"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
zzsza/TIL
|
Tensorflow/mnist.ipynb
|
mit
|
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Imports\nimport numpy as np\nimport tensorflow as tf\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n# Our application logic will be added here\n\n\nif __name__ == \"__main__\":\n tf.app.run()",
"Conv layer #1 : 32 5x5 filters, ReLU\nPooling layer #1 : 2x2 filter, stride 2\nConv layer #2 : 64 5x5, ReLU\nPooling layer #2 : 2x2 filter, stride 2\nDense Layer #1 : 1024, with dropout regularization rate of 0.4\n\nDense layer #2(logit) : 10 neurons, one for each digit target class\n\n\ntf.layer module\n\nconv2d()\nmax_pooling2d()\ndense()",
"def cnn_model_fn(features, labels, mode):\n input_layer = tf.reshape(features[\"x\"], [-1, 28, 28, 1])\n \n conv1 = tf.layers.conv2d(\n inputs=input_layer,\n filters=32, \n kernel_size=[5, 5],\n padding=\"same\",\n activation=tf.nn.relu)\n \n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2],\n strides=2)\n \n conv2 = tf.layers.conv2d(inputs=pool1, filters=64, kernel_size=[5, 5],\n padding=\"same\", activation=tf.nn.relu)\n \n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], \n strides=2)\n \n pool2_float = tf.reshape(pool2, [-1, 7 * 7 * 64])\n dense = tf.layers.dense(inputs=pool2_float, units=1024, activation=tf.nn.relu)\n \n dropout = tf.layers.dropout(\n inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)\n \n # tf.estimator.ModeKeys.TRAIN은 torch의 model.eval()이런식으로 모드 설정하는 거인듯\n \n logits = tf.layers.dense(inputs=dropout, units=10)\n \n predictions = {\n \"classes\" : tf.argmax(input=logits, axis=1),\n \"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\")\n }\n \n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n \n # Calculate Loss\n loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n \n # Configure the Training Op\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)\n train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n \n # Add evaluation metrics\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(labels=labels, predictions=predictions[\"classes\"])\n }\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)",
"데이터 로드",
"mnist = tf.contrib.learn.datasets.load_dataset(\"mnist\")\n\ntrain_data = mnist.train.images\ntrain_labels = np.asarray(mnist.train.labels, dtype=np.int32)\neval_data = mnist.test.images \neval_labels = np.asarray(mnist.test.labels, dtype=np.int32)",
"Create Estimator",
"mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn,\n model_dir=\"./tmp/mnist_convnet\")\n\ntensors_to_log = {\"probabilities\": \"softmax_tensor\"}\n\nlogging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=50)",
"Train model",
"%%time\ntrain_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\":train_data},\n y=train_labels,\n batch_size=100,\n num_epochs=None,\n shuffle=True)\nmnist_classifier.train(input_fn=train_input_fn, steps=500, hooks=[logging_hook])\n\neval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": eval_data},\n y=eval_labels,\n num_epochs=1,\n shuffle=False)\n\neval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)\nprint(eval_results)"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
dadosgovbr/catalogos-dados-brasil
|
scripts/uso/como-usar-com-o-pandas.ipynb
|
mit
|
[
"Como usar com o Pandas\nOs catálogos de dados abertos podem ser consultados facilmente com a ferramenta\nPandas, com ou sem Jupyter Notebook.\nEsse tutorial inspirado na\ndemonstração\ndo Open Knowledge Labs.\nPacotes necessários\nFrictionless Data\nAlém do Pandas, será necessário instalar alguns pacotes para trabalhar com Frictionless Data. Para isso,\nexecute:\npip install datapackage tableschema-pandas\nPara maiores informações sobre como usar esses pacotes, veja o exemplo contido\nno repositório do\ntableschema-pandas.\nPlotly e Plotly Express\nPara visualizar os dados, utilizaremos Plotly e Plotly Express.\nEntretanto, sinta-se à vontade para usar a biblioteca de visualização de sua\npreferência.\nPara instalar:\npip install plotly plotly_express\nVersões\nPara este tutorial, estamos usando a versão 1.10.0 do datapackage, a\nversão 1.1.0 do tableschema-pandas. Quanto às bibliotecas de\nvisualização, usamos Plotly versão 4.1.0 e Plotly Express versão 0.4.1.\nPara saber a sua versão, após ter instalado, use os comandos\npip freeze | grep datapackage\npip freeze | grep tableschema\npip freeze | grep plotly\nSe desejar instalar essas versões exatas, é possível executar o comando\npip install -r requirements.txt\npois esse arquivo já contém as versões afixadas.",
"import pandas as pd\n\n# Para trabalhar com Frictionless Data – frictionlessdata.io\nfrom tableschema import Storage\nfrom datapackage import Package\n\n# Para visualização\nimport plotly_express as px\nimport plotly as py, plotly.graph_objects as go",
"Lendo o pacote de dados\nÉ possível ler o pacote de dados diretamente a partir da URL:",
"# Gravar no Pandas\n\nurl = 'https://github.com/dadosgovbr/catalogos-dados-brasil/raw/master/datapackage.json'\n\nstorage = Storage.connect('pandas')\npackage = Package(url)\npackage.save(storage=storage)",
"Um pacote de dados pode conter uma quantidade de recursos. Pense em um\nrecurso como uma tabela em um banco de dados. Cada um é um arquivo CSV.\nNo contexto do armazenamento dos dados, esses recursos também são chamados de buckets (numa tradução livre, \"baldes\").",
"storage.buckets",
"Que são também Dataframes do Pandas:",
"type(storage['catalogos'])",
"Por isso, funcionam todas as operações que podem ser feitas com um DataFrames\ndo Pandas:",
"storage['solucao']",
"Como, por exemplo, mostrar o início da tabela.",
"storage['catalogos'].head()",
"Ou ver quantos portais existem por tipo de solução, ou por UF, ou por poder,\netc.\nPor tipo de solução",
"tipo_solucao = storage['catalogos'].groupby('Solução').count()['URL'].rename('quantidade')\ntipo_solucao\n\npx.bar(\n pd.DataFrame(tipo_solucao).reset_index(),\n x = 'Solução',\n y = 'quantidade',\n color = 'Solução',\n color_discrete_sequence = py.colors.qualitative.Set2\n)",
"Por poder da república",
"poder = storage['catalogos'].groupby('Poder').count()['URL'].rename('quantidade')\npoder\n\ngo.Figure(\n data=go.Pie(\n labels=poder.index,\n values=poder.values,\n hole=.4\n )\n).show()",
"Por esfera",
"esfera = storage['catalogos'].groupby('Esfera').count()['URL'].rename('quantidade')\nesfera\n\ngo.Figure(\n data=go.Pie(\n labels=esfera.index,\n values=esfera.values,\n hole=.4\n )\n).show()",
"Por unidade federativa",
"uf = storage['catalogos'].groupby('UF').count()['URL'].rename('quantidade')\nuf\n\npx.bar(\n pd.DataFrame(uf).reset_index(),\n x = 'UF',\n y = 'quantidade',\n color = 'UF',\n color_discrete_sequence = py.colors.qualitative.Set3\n)",
"Obs.: Neste caderno usamos imagens estáticas em vez das saídas interativas\ndo Plotly, considerando que estas ocupariam um espaço imenso no repositório.\nAlém disso, o Github não mostra as saídas interativas na pré-visualização dos\ncadernos ao ver um repositório. Entretanto, ao baixar e executar o caderno, as\nvisualizações interativas estarão disponíveis."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
djgagne/SHARPpy
|
tutorials/SHARPpy_basics.ipynb
|
bsd-3-clause
|
[
"Basic Coding With SHARPpy\nWritten by: Greg Blumberg (OU/CIMMS)\nThis IPython Notebook tutorial is meant to teach the user how to directly interact with the SHARPpy libraries using the Python interpreter. This tutorial will cover reading in files into the the Profile object, plotting the data using Matplotlib, and computing various indices from the data. It is also a reference to the different functions and variables SHARPpy has available to the user.\nIn order to work with SHARPpy, you need to perform 3 steps before you can begin running routines such as CAPE/CIN on the data.\nStep 1: Read in the data to work with.\n1.) The Pilger, NE tornado proximity sounding from 19 UTC within the tutorial/ directory is an example of the SPC sounding file format that can be read in by the GUI. Here we'll read it in manually.",
"%matplotlib inline\nspc_file = open('14061619.OAX', 'r').read()",
"All of the SHARPpy routines (parcel lifting, composite indices, etc.) reside within the SHARPTAB module.\nSHARPTAB contains 6 modules:\nparams, winds, thermo, utils, interp, fire, constants, watch_type\nEach module has different functions:\ninterp - interpolates different variables (temperature, dewpoint, wind, etc.) to a specified pressure\nwinds - functions used to compute different wind-related variables (shear, helicity, mean winds, storm relative vectors)\nthermo - temperature unit conversions, theta-e, theta, wetbulb, lifting functions\nutils - wind speed unit conversions, wind speed and direction to u and v conversions, QC\nparams - computation of different parameters, indices, etc. from the Profile object\nfire - fire weather indices\n\nStep 2: Load in the SHARPTAB module.",
"import sharppy.sharptab as tab",
"Step 3: Making a Profile object.\nBefore running any analysis routines on the data, we have to create a Profile object first. A Profile object describes the vertical thermodynamic and kinematic profiles and is the key object that all SHARPpy routines need to run. Any data source can be passed into a Profile object (i.e. radiosonde, RASS, satellite sounding retrievals, etc.) as long as it has these profiles:\n\ntemperature (C)\ndewpoint (C)\nheight (meters above mean sea level)\npressure (millibars)\nwind speed (kts)\nwind direction (degrees)\n\nor (optional)\n- zonal wind component U (kts)\n- meridional wind component V (kts)\nFor example, after reading in the data in the example above, a Profile object can be created. Since this file uses the value -9999 to indicate missing values, we need to tell SHARPpy to ignore these values in its calculations by including the missing field to be -9999. In addition, we tell SHARPpy we want to create a default BasicProfile object. Telling SHARPpy to create a \"convective\" profile object will generate a Profile object with all of the indices computed in the SHARPpy GUI. If you are only wanting to compute a few indices, you probably don't want to do that.",
"import numpy as np\nfrom StringIO import StringIO\n\ndef parseSPC(spc_file):\n ## read in the file\n data = np.array([l.strip() for l in spc_file.split('\\n')])\n\n ## necessary index points\n title_idx = np.where( data == '%TITLE%')[0][0]\n start_idx = np.where( data == '%RAW%' )[0] + 1\n finish_idx = np.where( data == '%END%')[0]\n\n ## create the plot title\n data_header = data[title_idx + 1].split()\n location = data_header[0]\n time = data_header[1][:11]\n\n ## put it all together for StringIO\n full_data = '\\n'.join(data[start_idx : finish_idx][:])\n sound_data = StringIO( full_data )\n\n ## read the data into arrays\n p, h, T, Td, wdir, wspd = np.genfromtxt( sound_data, delimiter=',', comments=\"%\", unpack=True )\n\n return p, h, T, Td, wdir, wspd\n\npres, hght, tmpc, dwpc, wdir, wspd = parseSPC(spc_file)\n\nprof = tab.profile.create_profile(profile='default', pres=pres, hght=hght, tmpc=tmpc, \\\n dwpc=dwpc, wspd=wspd, wdir=wdir, missing=-9999, strictQC=True)",
"In SHARPpy, Profile objects have quality control checks built into them to alert the user to bad data and in order to prevent the program from crashing on computational routines. For example, upon construction of the Profile object, the SHARPpy will check for unrealistic values (i.e. dewpoint or temperature below absolute zero, negative wind speeds) and incorrect ordering of the height and pressure arrays. Height arrays must be increasing with array index, and pressure arrays must be decreasing with array index. Repeat values are not allowed. \nIf the user wishes to avoid these checks, set the \"strictQC\" flag to False when constructing an object.\nBecause Python is an interpreted language, it can be quite slow for certain processes. When working with soundings in SHARPpy, we recommend the profiles contain a maximum of 200-500 points. High resolution radiosonde profiles (i.e. 1 second profiles) contain thousands of points and some of the SHARPpy functions that involve lifting parcels (i.e. parcelx) may take a long time to run. To filter your data to make it easier for SHARPpy to work with, you can use a sounding filter such as the one found here:\nhttps://github.com/tsupinie/SoundingFilter\nWorking with the data:\nOnce you have a Profile object, you can begin running analysis routines and plotting the data. The following sections show different examples of how to do this.\nPlotting the data:",
"import matplotlib.pyplot as plt\nplt.plot(prof.tmpc, prof.hght, 'r-')\nplt.plot(prof.dwpc, prof.hght, 'g-')\n#plt.barbs(40*np.ones(len(prof.hght)), prof.hght, prof.u, prof.v)\nplt.xlabel(\"Temperature [C]\")\nplt.ylabel(\"Height [m above MSL]\")\nplt.grid()\nplt.show()",
"SHARPpy Profile objects keep track of the height grid the profile lies on. Within the profile object, the height grid is assumed to be in meters above mean sea level.\nIn the example data provided, the profile can be converted to and from AGL from MSL:",
"msl_hght = prof.hght[prof.sfc] # Grab the surface height value\nprint \"SURFACE HEIGHT (m MSL):\",msl_hght\nagl_hght = tab.interp.to_agl(prof, msl_hght) # Converts to AGL\nprint \"SURFACE HEIGHT (m AGL):\", agl_hght\nmsl_hght = tab.interp.to_msl(prof, agl_hght) # Converts to MSL\nprint \"SURFACE HEIGHT (m MSL):\",msl_hght",
"Showing derived profiles:\nBy default, Profile objects also create derived profiles such as Theta-E and Wet-Bulb when they are constructed. These profiles are accessible to the user too.",
"plt.plot(tab.thermo.ktoc(prof.thetae), prof.hght, 'r-', label='Theta-E')\nplt.plot(prof.wetbulb, prof.hght, 'c-', label='Wetbulb')\nplt.xlabel(\"Temperature [C]\")\nplt.ylabel(\"Height [m above MSL]\")\nplt.legend()\nplt.grid()\nplt.show()",
"Lifting Parcels:\nIn SHARPpy, parcels are lifted via the params.parcelx() routine. The parcelx() routine takes in the arguments of a Profile object and a flag to indicate what type of parcel you would like to be lifted. Additional arguments can allow for custom/user defined parcels to be passed to the parcelx() routine, however most users will likely be using only the Most-Unstable, Surface, 100 mb Mean Layer, and Forecast parcels.\nThe parcelx() routine by default utilizes the virtual temperature correction to compute variables such as CAPE and CIN. If the dewpoint profile contains missing data, parcelx() will disregard using the virtual temperature correction.",
"sfcpcl = tab.params.parcelx( prof, flag=1 ) # Surface Parcel\nfcstpcl = tab.params.parcelx( prof, flag=2 ) # Forecast Parcel\nmupcl = tab.params.parcelx( prof, flag=3 ) # Most-Unstable Parcel\nmlpcl = tab.params.parcelx( prof, flag=4 ) # 100 mb Mean Layer Parcel",
"Once your parcel attributes are computed by params.parcelx(), you can extract information about the parcel such as CAPE, CIN, LFC height, LCL height, EL height, etc.",
"print \"Most-Unstable CAPE:\", mupcl.bplus # J/kg\nprint \"Most-Unstable CIN:\", mupcl.bminus # J/kg\nprint \"Most-Unstable LCL:\", mupcl.lclhght # meters AGL\nprint \"Most-Unstable LFC:\", mupcl.lfchght # meters AGL\nprint \"Most-Unstable EL:\", mupcl.elhght # meters AGL\nprint \"Most-Unstable LI:\", mupcl.li5 # C",
"Other Parcel Object Attributes:\nHere is a list of the attributes and their units contained in each parcel object (pcl):\npcl.pres - Parcel beginning pressure (mb)\npcl.tmpc - Parcel beginning temperature (C)\npcl.dwpc - Parcel beginning dewpoint (C)\npcl.ptrace - Parcel trace pressure (mb)\npcl.ttrace - Parcel trace temperature (C)\npcl.blayer - Pressure of the bottom of the layer the parcel is lifted (mb)\npcl.tlayer - Pressure of the top of the layer the parcel is lifted (mb)\npcl.lclpres - Parcel LCL (lifted condensation level) pressure (mb)\npcl.lclhght - Parcel LCL height (m AGL)\npcl.lfcpres - Parcel LFC (level of free convection) pressure (mb)\npcl.lfchght - Parcel LFC height (m AGL)\npcl.elpres - Parcel EL (equilibrium level) pressure (mb)\npcl.elhght - Parcel EL height (m AGL)\npcl.mplpres - Maximum Parcel Level (mb)\npcl.mplhght - Maximum Parcel Level (m AGL)\npcl.bplus - Parcel CAPE (J/kg)\npcl.bminus - Parcel CIN (J/kg)\npcl.bfzl - Parcel CAPE up to freezing level (J/kg)\npcl.b3km - Parcel CAPE up to 3 km (J/kg)\npcl.b6km - Parcel CAPE up to 6 km (J/kg)\npcl.p0c - Pressure value at 0 C (mb)\npcl.pm10c - Pressure value at -10 C (mb)\npcl.pm20c - Pressure value at -20 C (mb)\npcl.pm30c - Pressure value at -30 C (mb)\npcl.hght0c - Height value at 0 C (m AGL)\npcl.hghtm10c - Height value at -10 C (m AGL)\npcl.hghtm20c - Height value at -20 C (m AGL)\npcl.hghtm30c - Height value at -30 C (m AGL)\npcl.wm10c - Wet bulb velocity at -10 C \npcl.wm20c - Wet bulb velocity at -20 C\npcl.wm30c - Wet bulb at -30 C\npcl.li5 = - Lifted Index at 500 mb (C)\npcl.li3 = - Lifted Index at 300 mb (C)\npcl.brnshear - Bulk Richardson Number Shear\npcl.brnu - Bulk Richardson Number U (kts)\npcl.brnv - Bulk Richardson Number V (kts)\npcl.brn - Bulk Richardson Number (unitless)\npcl.limax - Maximum Lifted Index (C)\npcl.limaxpres - Pressure at Maximum Lifted Index (mb)\npcl.cap - Cap Strength (C)\npcl.cappres - Cap strength pressure (mb)\npcl.bmin - Buoyancy minimum in profile (C)\npcl.bminpres - Buoyancy minimum pressure (mb)\n\nAdding a Parcel Trace and plotting Moist and Dry Adiabats:",
"# This serves as an intensive exercise of matplotlib's transforms\n# and custom projection API. This example produces a so-called\n# SkewT-logP diagram, which is a common plot in meteorology for\n# displaying vertical profiles of temperature. As far as matplotlib is\n# concerned, the complexity comes from having X and Y axes that are\n# not orthogonal. This is handled by including a skew component to the\n# basic Axes transforms. Additional complexity comes in handling the\n# fact that the upper and lower X-axes have different data ranges, which\n# necessitates a bunch of custom classes for ticks,spines, and the axis\n# to handle this.\n\nfrom matplotlib.axes import Axes\nimport matplotlib.transforms as transforms\nimport matplotlib.axis as maxis\nimport matplotlib.spines as mspines\nimport matplotlib.path as mpath\nfrom matplotlib.projections import register_projection\n\n# The sole purpose of this class is to look at the upper, lower, or total\n# interval as appropriate and see what parts of the tick to draw, if any.\nclass SkewXTick(maxis.XTick):\n def draw(self, renderer):\n if not self.get_visible(): return\n renderer.open_group(self.__name__)\n\n lower_interval = self.axes.xaxis.lower_interval\n upper_interval = self.axes.xaxis.upper_interval\n\n if self.gridOn and transforms.interval_contains(\n self.axes.xaxis.get_view_interval(), self.get_loc()):\n self.gridline.draw(renderer)\n\n if transforms.interval_contains(lower_interval, self.get_loc()):\n if self.tick1On:\n self.tick1line.draw(renderer)\n if self.label1On:\n self.label1.draw(renderer)\n\n if transforms.interval_contains(upper_interval, self.get_loc()):\n if self.tick2On:\n self.tick2line.draw(renderer)\n if self.label2On:\n self.label2.draw(renderer)\n\n renderer.close_group(self.__name__)\n\n\n# This class exists to provide two separate sets of intervals to the tick,\n# as well as create instances of the custom tick\nclass SkewXAxis(maxis.XAxis):\n def __init__(self, *args, **kwargs):\n maxis.XAxis.__init__(self, *args, **kwargs)\n self.upper_interval = 0.0, 1.0\n\n def _get_tick(self, major):\n return SkewXTick(self.axes, 0, '', major=major)\n\n @property\n def lower_interval(self):\n return self.axes.viewLim.intervalx\n\n def get_view_interval(self):\n return self.upper_interval[0], self.axes.viewLim.intervalx[1]\n\n\n# This class exists to calculate the separate data range of the\n# upper X-axis and draw the spine there. It also provides this range\n# to the X-axis artist for ticking and gridlines\nclass SkewSpine(mspines.Spine):\n def _adjust_location(self):\n trans = self.axes.transDataToAxes.inverted()\n if self.spine_type == 'top':\n yloc = 1.0\n else:\n yloc = 0.0\n left = trans.transform_point((0.0, yloc))[0]\n right = trans.transform_point((1.0, yloc))[0]\n\n pts = self._path.vertices\n pts[0, 0] = left\n pts[1, 0] = right\n self.axis.upper_interval = (left, right)\n\n\n# This class handles registration of the skew-xaxes as a projection as well\n# as setting up the appropriate transformations. It also overrides standard\n# spines and axes instances as appropriate.\nclass SkewXAxes(Axes):\n # The projection must specify a name. This will be used be the\n # user to select the projection, i.e. ``subplot(111,\n # projection='skewx')``.\n name = 'skewx'\n\n def _init_axis(self):\n #Taken from Axes and modified to use our modified X-axis\n self.xaxis = SkewXAxis(self)\n self.spines['top'].register_axis(self.xaxis)\n self.spines['bottom'].register_axis(self.xaxis)\n self.yaxis = maxis.YAxis(self)\n self.spines['left'].register_axis(self.yaxis)\n self.spines['right'].register_axis(self.yaxis)\n\n def _gen_axes_spines(self):\n spines = {'top':SkewSpine.linear_spine(self, 'top'),\n 'bottom':mspines.Spine.linear_spine(self, 'bottom'),\n 'left':mspines.Spine.linear_spine(self, 'left'),\n 'right':mspines.Spine.linear_spine(self, 'right')}\n return spines\n\n def _set_lim_and_transforms(self):\n \"\"\"\n This is called once when the plot is created to set up all the\n transforms for the data, text and grids.\n \"\"\"\n rot = 30\n\n #Get the standard transform setup from the Axes base class\n Axes._set_lim_and_transforms(self)\n\n # Need to put the skew in the middle, after the scale and limits,\n # but before the transAxes. This way, the skew is done in Axes\n # coordinates thus performing the transform around the proper origin\n # We keep the pre-transAxes transform around for other users, like the\n # spines for finding bounds\n self.transDataToAxes = self.transScale + (self.transLimits +\n transforms.Affine2D().skew_deg(rot, 0))\n\n # Create the full transform from Data to Pixels\n self.transData = self.transDataToAxes + self.transAxes\n\n # Blended transforms like this need to have the skewing applied using\n # both axes, in axes coords like before.\n self._xaxis_transform = (transforms.blended_transform_factory(\n self.transScale + self.transLimits,\n transforms.IdentityTransform()) +\n transforms.Affine2D().skew_deg(rot, 0)) + self.transAxes\n\n# Now register the projection with matplotlib so the user can select\n# it.\nregister_projection(SkewXAxes)\n\npcl = mupcl\n# Create a new figure. The dimensions here give a good aspect ratio\nfig = plt.figure(figsize=(6.5875, 6.2125))\nax = fig.add_subplot(111, projection='skewx')\nax.grid(True)\n\npmax = 1000\npmin = 10\ndp = -10\npresvals = np.arange(int(pmax), int(pmin)+dp, dp)\n\n# plot the moist-adiabats\nfor t in np.arange(-10,45,5):\n tw = []\n for p in presvals:\n tw.append(tab.thermo.wetlift(1000., t, p))\n ax.semilogy(tw, presvals, 'k-', alpha=.2)\n\ndef thetas(theta, presvals):\n return ((theta + tab.thermo.ZEROCNK) / (np.power((1000. / presvals),tab.thermo.ROCP))) - tab.thermo.ZEROCNK\n\n# plot the dry adiabats\nfor t in np.arange(-50,110,10):\n ax.semilogy(thetas(t, presvals), presvals, 'r-', alpha=.2)\n\nplt.title(' OAX 140616/1900 (Observed)', fontsize=14, loc='left')\n# Plot the data using normal plotting functions, in this case using\n# log scaling in Y, as dicatated by the typical meteorological plot\nax.semilogy(prof.tmpc, prof.pres, 'r', lw=2)\nax.semilogy(prof.dwpc, prof.pres, 'g', lw=2)\nax.semilogy(pcl.ttrace, pcl.ptrace, 'k-.', lw=2)\n\n# An example of a slanted line at constant X\nl = ax.axvline(0, color='b', linestyle='--')\nl = ax.axvline(-20, color='b', linestyle='--')\n\n# Disables the log-formatting that comes with semilogy\nax.yaxis.set_major_formatter(plt.ScalarFormatter())\nax.set_yticks(np.linspace(100,1000,10))\nax.set_ylim(1050,100)\n\nax.xaxis.set_major_locator(plt.MultipleLocator(10))\nax.set_xlim(-50,50)\nplt.show()\n ",
"Calculating Kinematic Variables:\nSHARPpy also allows the user to compute kinematic variables such as shear, mean-winds, and storm relative helicity. SHARPpy will also compute storm motion vectors based off of the work by Stephen Corfidi and Matthew Bunkers. Below is some example code to compute the following:\n1.) 0-3 km Pressure-Weighted Mean Wind\n2.) 0-6 km Shear (kts)\n3.) Bunker's Storm Motion (right-mover) (Bunkers et al. 2014 version)\n4.) Bunker's Storm Motion (left-mover) (Bunkers et al. 2014 version)\n5.) 0-3 Storm Relative Helicity",
"sfc = prof.pres[prof.sfc]\np3km = tab.interp.pres(prof, tab.interp.to_msl(prof, 3000.))\np6km = tab.interp.pres(prof, tab.interp.to_msl(prof, 6000.))\np1km = tab.interp.pres(prof, tab.interp.to_msl(prof, 1000.))\nmean_3km = tab.winds.mean_wind(prof, pbot=sfc, ptop=p3km)\nsfc_6km_shear = tab.winds.wind_shear(prof, pbot=sfc, ptop=p6km)\nsfc_3km_shear = tab.winds.wind_shear(prof, pbot=sfc, ptop=p3km)\nsfc_1km_shear = tab.winds.wind_shear(prof, pbot=sfc, ptop=p1km)\nprint \"0-3 km Pressure-Weighted Mean Wind (kt):\", tab.utils.comp2vec(mean_3km[0], mean_3km[1])[1]\nprint \"0-6 km Shear (kt):\", tab.utils.comp2vec(sfc_6km_shear[0], sfc_6km_shear[1])[1]\nsrwind = tab.params.bunkers_storm_motion(prof)\nprint \"Bunker's Storm Motion (right-mover) [deg,kts]:\", tab.utils.comp2vec(srwind[0], srwind[1])\nprint \"Bunker's Storm Motion (left-mover) [deg,kts]:\", tab.utils.comp2vec(srwind[2], srwind[3])\nsrh3km = tab.winds.helicity(prof, 0, 3000., stu = srwind[0], stv = srwind[1])\nsrh1km = tab.winds.helicity(prof, 0, 1000., stu = srwind[0], stv = srwind[1])\nprint \"0-3 km Storm Relative Helicity [m2/s2]:\",srh3km[0]",
"Calculating variables based off of the effective inflow layer:\nThe effective inflow layer concept is used to obtain the layer of buoyant parcels that feed a storm's inflow. Here are a few examples of how to compute variables that require the effective inflow layer in order to calculate them:",
"stp_fixed = tab.params.stp_fixed(sfcpcl.bplus, sfcpcl.lclhght, srh1km[0], tab.utils.comp2vec(sfc_6km_shear[0], sfc_6km_shear[1])[1])\nship = tab.params.ship(prof)\neff_inflow = tab.params.effective_inflow_layer(prof)\nebot_hght = tab.interp.to_agl(prof, tab.interp.hght(prof, eff_inflow[0]))\netop_hght = tab.interp.to_agl(prof, tab.interp.hght(prof, eff_inflow[1]))\nprint \"Effective Inflow Layer Bottom Height (m AGL):\", ebot_hght\nprint \"Effective Inflow Layer Top Height (m AGL):\", etop_hght\neffective_srh = tab.winds.helicity(prof, ebot_hght, etop_hght, stu = srwind[0], stv = srwind[1])\nprint \"Effective Inflow Layer SRH (m2/s2):\", effective_srh[0]\nebwd = tab.winds.wind_shear(prof, pbot=eff_inflow[0], ptop=eff_inflow[1])\nebwspd = tab.utils.mag( ebwd[0], ebwd[1] )\nprint \"Effective Bulk Wind Difference:\", ebwspd\nscp = tab.params.scp(mupcl.bplus, effective_srh[0], ebwspd)\nstp_cin = tab.params.stp_cin(mlpcl.bplus, effective_srh[0], ebwspd, mlpcl.lclhght, mlpcl.bminus)\nprint \"Supercell Composite Parameter:\", scp\nprint \"Significant Tornado Parameter (w/CIN):\", stp_cin\nprint \"Significant Tornado Parameter (fixed):\", stp_fixed\n",
"Putting it all together into one plot:",
"indices = {'SBCAPE': [int(sfcpcl.bplus), 'J/kg'],\\\n 'SBCIN': [int(sfcpcl.bminus), 'J/kg'],\\\n 'SBLCL': [int(sfcpcl.lclhght), 'm AGL'],\\\n 'SBLFC': [int(sfcpcl.lfchght), 'm AGL'],\\\n 'SBEL': [int(sfcpcl.elhght), 'm AGL'],\\\n 'SBLI': [int(sfcpcl.li5), 'C'],\\\n 'MLCAPE': [int(mlpcl.bplus), 'J/kg'],\\\n 'MLCIN': [int(mlpcl.bminus), 'J/kg'],\\\n 'MLLCL': [int(mlpcl.lclhght), 'm AGL'],\\\n 'MLLFC': [int(mlpcl.lfchght), 'm AGL'],\\\n 'MLEL': [int(mlpcl.elhght), 'm AGL'],\\\n 'MLLI': [int(mlpcl.li5), 'C'],\\\n 'MUCAPE': [int(mupcl.bplus), 'J/kg'],\\\n 'MUCIN': [int(mupcl.bminus), 'J/kg'],\\\n 'MULCL': [int(mupcl.lclhght), 'm AGL'],\\\n 'MULFC': [int(mupcl.lfchght), 'm AGL'],\\\n 'MUEL': [int(mupcl.elhght), 'm AGL'],\\\n 'MULI': [int(mupcl.li5), 'C'],\\\n '0-1 km SRH': [int(srh1km[0]), 'm2/s2'],\\\n '0-1 km Shear': [int(tab.utils.comp2vec(sfc_1km_shear[0], sfc_1km_shear[1])[1]), 'kts'],\\\n '0-3 km SRH': [int(srh3km[0]), 'm2/s2'],\\\n 'Eff. SRH': [int(effective_srh[0]), 'm2/s2'],\\\n 'EBWD': [int(ebwspd), 'kts'],\\\n 'PWV': [round(tab.params.precip_water(prof), 2), 'inch'],\\\n 'K-index': [int(tab.params.k_index(prof)), ''],\\\n 'STP(fix)': [round(stp_fixed, 1), ''],\\\n 'SHIP': [round(ship, 1), ''],\\\n 'SCP': [round(scp, 1), ''],\\\n 'STP(cin)': [round(stp_cin, 1), '']}\n\n# Set the parcel trace to be plotted as the Most-Unstable parcel.\npcl = mupcl\n\n# Create a new figure. The dimensions here give a good aspect ratio\nfig = plt.figure(figsize=(6.5875, 6.2125))\nax = fig.add_subplot(111, projection='skewx')\nax.grid(True)\n\npmax = 1000\npmin = 10\ndp = -10\npresvals = np.arange(int(pmax), int(pmin)+dp, dp)\n\n# plot the moist-adiabats\nfor t in np.arange(-10,45,5):\n tw = []\n for p in presvals:\n tw.append(tab.thermo.wetlift(1000., t, p))\n ax.semilogy(tw, presvals, 'k-', alpha=.2)\n\ndef thetas(theta, presvals):\n return ((theta + tab.thermo.ZEROCNK) / (np.power((1000. / presvals),tab.thermo.ROCP))) - tab.thermo.ZEROCNK\n\n# plot the dry adiabats\nfor t in np.arange(-50,110,10):\n ax.semilogy(thetas(t, presvals), presvals, 'r-', alpha=.2)\n\nplt.title(' OAX 140616/1900 (Observed)', fontsize=12, loc='left')\n# Plot the data using normal plotting functions, in this case using\n# log scaling in Y, as dicatated by the typical meteorological plot\nax.semilogy(prof.tmpc, prof.pres, 'r', lw=2) # Plot the temperature profile\nax.semilogy(prof.wetbulb, prof.pres, 'c-') # Plot the wetbulb profile\nax.semilogy(prof.dwpc, prof.pres, 'g', lw=2) # plot the dewpoint profile\nax.semilogy(pcl.ttrace, pcl.ptrace, 'k-.', lw=2) # plot the parcel trace \n# An example of a slanted line at constant X\nl = ax.axvline(0, color='b', linestyle='--')\nl = ax.axvline(-20, color='b', linestyle='--')\n\n# Plot the effective inflow layer using blue horizontal lines\nax.axhline(eff_inflow[0], color='b')\nax.axhline(eff_inflow[1], color='b')\n\n#plt.barbs(10*np.ones(len(prof.pres)), prof.pres, prof.u, prof.v)\n# Disables the log-formatting that comes with semilogy\nax.yaxis.set_major_formatter(plt.ScalarFormatter())\nax.set_yticks(np.linspace(100,1000,10))\nax.set_ylim(1050,100)\nax.xaxis.set_major_locator(plt.MultipleLocator(10))\nax.set_xlim(-50,50)\n\n# List the indices within the indices dictionary on the side of the plot.\nstring = ''\nfor key in np.sort(indices.keys()):\n string = string + key + ': ' + str(indices[key][0]) + ' ' + indices[key][1] + '\\n'\nplt.text(1.02, 1, string, verticalalignment='top', transform=plt.gca().transAxes)\n\n# Draw the hodograph on the Skew-T.\n# TAS 2015-4-16: hodograph doesn't plot for some reason ...\nax2 = plt.axes([.625,.625,.25,.25])\nbelow_12km = np.where(tab.interp.to_agl(prof, prof.hght) < 12000)[0]\nu_prof = prof.u[below_12km]\nv_prof = prof.v[below_12km]\nax2.plot(u_prof[~u_prof.mask], v_prof[~u_prof.mask], 'k-', lw=2)\nax2.get_xaxis().set_visible(False)\nax2.get_yaxis().set_visible(False)\nfor i in range(10,90,10):\n # Draw the range rings around the hodograph.\n circle = plt.Circle((0,0),i,color='k',alpha=.3, fill=False)\n ax2.add_artist(circle)\nax2.plot(srwind[0], srwind[1], 'ro') # Plot Bunker's Storm motion right mover as a red dot\nax2.plot(srwind[2], srwind[3], 'bo') # Plot Bunker's Storm motion left mover as a blue dot\n\nax2.set_xlim(-60,60)\nax2.set_ylim(-60,60)\nax2.axhline(y=0, color='k')\nax2.axvline(x=0, color='k')\nplt.show()",
"List of functions in each module:\nThis tutorial cannot cover all of the functions in SHARPpy. Below is a list of all of the functions accessible through SHARPTAB. In order to learn more about the function in this IPython Notebook, open up a new \"In[]:\" field and type in the path to the function (for example):\ntab.params.dcape()\n\nDocumentation should appear below the cursor describing the function itself, the function's arguments, its output values, and any references to meteorological literature the function was based on.",
"print \"Functions within params.py:\"\nfor key in tab.params.__all__:\n print \"\\ttab.params.\" + key + \"()\"\nprint \"\\nFunctions within winds.py:\"\nfor key in tab.winds.__all__:\n print \"\\ttab.winds.\" + key + \"()\"\nprint \"\\nFunctions within thermo.py:\"\nfor key in tab.thermo.__all__:\n print \"\\ttab.thermo.\" + key + \"()\"\nprint \"\\nFunctions within interp.py:\"\nfor key in tab.interp.__all__:\n print \"\\ttab.interp.\" + key + \"()\"\nprint \"\\nFunctions within utils.py:\"\nfor key in tab.utils.__all__:\n print \"\\ttab.utils.\" + key + \"()\""
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
tensorflow/docs-l10n
|
site/ko/tutorials/images/transfer_learning_with_hub.ipynb
|
apache-2.0
|
[
"Copyright 2018 The TensorFlow Authors.",
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"텐서플로 허브와 전이학습\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/tutorials/images/transfer_learning_with_hub\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/tutorials/images/transfer_learning_with_hub.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ko/tutorials/images/transfer_learning_with_hub.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/tutorials/images/transfer_learning_with_hub.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>\n\nNote: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도\n불구하고 공식 영문 문서의 내용과 일치하지 않을 수 있습니다.\n이 번역에 개선할 부분이 있다면\ntensorflow/docs-l10n 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.\n문서 번역이나 리뷰에 참여하려면\ndocs-ko@tensorflow.org로\n메일을 보내주시기 바랍니다.\n텐서플로 허브텐서플로 허브는 이 전에 학습된 모델들의 요소들을 공유하는 하나의 방법입니다. 학습된 모델들의 검색가능한 리스트에 대한 [텐서플로 모듈 허브] (https://tfhub.dev/)를 보세요. 이 튜토리얼은 입증합니다:\n\ntf.keras로 텐서플로 허브를 사용하는 방법.\n텐서플로 허브를 사용하여 이미지 분류를 하는 방법.\n간단한 전이학습을 하는 방법.\n\n설치하기",
"import matplotlib.pylab as plt\n\nimport tensorflow as tf\n\n!pip install -U tf-hub-nightly\nimport tensorflow_hub as hub\n\nfrom tensorflow.keras import layers",
"ImageNet 분류기\n분류기 다운로드하기\n이동 네트워크 컴퓨터를 로드하기 위해 hub.module을, 그리고 하나의 keras층으로 감싸기 위해 tf.keras.layers.Lambda를 사용하세요. Fthub.dev의 텐서플로2.0 버전의 양립 가능한 이미지 분류기 URL 는 이곳에서 작동할 것입니다.",
"classifier_url =\"https://tfhub.dev/google/tf2-preview/mobilenet_v2/classification/2\" #@param {type:\"string\"}\n\nIMAGE_SHAPE = (224, 224)\n\nclassifier = tf.keras.Sequential([\n hub.KerasLayer(classifier_url, input_shape=IMAGE_SHAPE+(3,))\n])",
"싱글 이미지 실행시키기\n모델을 시도하기 위해 싱글 이미지를 다운로드하세요.",
"import numpy as np\nimport PIL.Image as Image\n\ngrace_hopper = tf.keras.utils.get_file('image.jpg','https://storage.googleapis.com/download.tensorflow.org/example_images/grace_hopper.jpg')\ngrace_hopper = Image.open(grace_hopper).resize(IMAGE_SHAPE)\ngrace_hopper\n\ngrace_hopper = np.array(grace_hopper)/255.0\ngrace_hopper.shape",
"차원 배치를 추가하세요, 그리고 이미지를 모델에 통과시키세요.",
"result = classifier.predict(grace_hopper[np.newaxis, ...])\nresult.shape",
"그 결과는 로지트의 1001 요소 벡터입니다. 이는 이미지에 대한 각각의 클래스 확률을 계산합니다.\n그래서 탑 클래스인 ID는 최대값을 알 수 있습니다:",
"predicted_class = np.argmax(result[0], axis=-1)\npredicted_class",
"예측 해독하기\n우리는 클래스 ID를 예측하고,\nImageNet라벨을 불러오고, 그리고 예측을 해독합니다.",
"labels_path = tf.keras.utils.get_file('ImageNetLabels.txt','https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')\nimagenet_labels = np.array(open(labels_path).read().splitlines())\n\nplt.imshow(grace_hopper)\nplt.axis('off')\npredicted_class_name = imagenet_labels[predicted_class]\n_ = plt.title(\"Prediction: \" + predicted_class_name.title())",
"간단한 전이 학습\n텐서플로 허브를 사용함으로써, 우리의 데이터셋에 있는 클래스들을 인지하기 위해 모델의 최상위 층을 재학습 시키는 것이 쉬워졌습니다.\n데이터셋\n이 예제를 해결하기 위해, 텐서플로의 flowers 데이터셋을 사용할 것입니다:",
"data_root = tf.keras.utils.get_file(\n 'flower_photos','https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',\n untar=True)",
"우리의 모델에 이 데이터를 가장 간단하게 로딩 하는 방법은 tf.keras.preprocessing.image.image.ImageDataGenerator를 사용하는 것이고,\n모든 텐서플로 허브의 이미지 모듈들은 0과 1사이의 상수들의 입력을 기대합니다. 이를 만족 시키기 위해 ImageDataGenerator의 rescale인자를 사용하세요.\n그 이미지의 사이즈는 나중에 다뤄질 것입니다.",
"image_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1/255)\nimage_data = image_generator.flow_from_directory(str(data_root), target_size=IMAGE_SHAPE)",
"결과로 나온 오브젝트는 image_batch와 label_batch를 같이 리턴 하는 반복자입니다.",
"for image_batch, label_batch in image_data:\n print(\"Image batch shape: \", image_batch.shape)\n print(\"Label batch shape: \", label_batch.shape)\n break",
"이미지 배치에 대한 분류기를 실행해보자\n이제 이미지 배치에 대한 분류기를 실행해봅시다.",
"result_batch = classifier.predict(image_batch)\nresult_batch.shape\n\npredicted_class_names = imagenet_labels[np.argmax(result_batch, axis=-1)]\npredicted_class_names",
"얼마나 많은 예측들이 이미지에 맞는지 검토해봅시다:",
"plt.figure(figsize=(10,9))\nplt.subplots_adjust(hspace=0.5)\nfor n in range(30):\n plt.subplot(6,5,n+1)\n plt.imshow(image_batch[n])\n plt.title(predicted_class_names[n])\n plt.axis('off')\n_ = plt.suptitle(\"ImageNet predictions\")",
"이미지 속성을 가진 LICENSE.txt 파일을 보세요.\n결과가 완벽과는 거리가 멀지만, 모델이 (\"daisy\"를 제외한) 모든 것을 대비해서 학습된 클래스가 아니라는 것을 고려하면 합리적입니다.\n헤드리스 모델을 다운로드하세요\n텐서플로 허브는 맨 위 분류층이 없어도 모델을 분배 시킬 수 있습니다. 이는 전이 학습을 쉽게 할 수 있게 만들었습니다.\nfthub.dev의 텐서플로 2.0버전의 양립 가능한 이미지 특성 벡터 URL 은 모두 이 곳에서 작동할 것입니다.",
"feature_extractor_url = \"https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/2\" #@param {type:\"string\"}",
"특성 추출기를 만들어봅시다.",
"feature_extractor_layer = hub.KerasLayer(feature_extractor_url,\n input_shape=(224,224,3))",
"이 것은 각각의 이미지마다 길이가 1280인 벡터가 반환됩니다:",
"feature_batch = feature_extractor_layer(image_batch)\nprint(feature_batch.shape)",
"특성 추출기 계층에 있는 변수들을 굳히면, 학습은 오직 새로운 분류 계층만 변경시킬 수 있습니다.",
"feature_extractor_layer.trainable = False",
"분류 head를 붙이세요.\n이제 tf.keras.Sequential 모델에 있는 허브 계층을 포장하고, 새로운 분류 계층을 추가하세요.",
"model = tf.keras.Sequential([\n feature_extractor_layer,\n layers.Dense(image_data.num_classes, activation='softmax')\n])\n\nmodel.summary()\n\npredictions = model(image_batch)\n\npredictions.shape",
"모델을 학습시키세요\n학습 과정 환경을 설정하기 위해 컴파일을 사용하세요:",
"model.compile(\n optimizer=tf.keras.optimizers.Adam(),\n loss='categorical_crossentropy',\n metrics=['acc'])",
"이제 모델을 학습시키기 위해 .fit방법을 사용하세요.\n예제를 짧게 유지시키기 위해 오로지 2세대만 학습시키세요. 학습 과정을 시각화하기 위해서, 맞춤형 회신을 사용하면 손실과, 세대 평균이 아닌 배치 개별의 정확도를 기록할 수 있습니다.",
"class CollectBatchStats(tf.keras.callbacks.Callback):\n def __init__(self):\n self.batch_losses = []\n self.batch_acc = []\n\n def on_train_batch_end(self, batch, logs=None):\n self.batch_losses.append(logs['loss'])\n self.batch_acc.append(logs['acc'])\n self.model.reset_metrics()\n\nsteps_per_epoch = np.ceil(image_data.samples/image_data.batch_size)\n\nbatch_stats_callback = CollectBatchStats()\n\nhistory = model.fit_generator(image_data, epochs=2,\n steps_per_epoch=steps_per_epoch,\n callbacks = [batch_stats_callback])",
"지금부터, 단순한 학습 반복이지만, 우리는 항상 모델이 프로세스를 만드는 중이라는 것을 알 수 있습니다.",
"plt.figure()\nplt.ylabel(\"Loss\")\nplt.xlabel(\"Training Steps\")\nplt.ylim([0,2])\nplt.plot(batch_stats_callback.batch_losses)\n\nplt.figure()\nplt.ylabel(\"Accuracy\")\nplt.xlabel(\"Training Steps\")\nplt.ylim([0,1])\nplt.plot(batch_stats_callback.batch_acc)",
"예측을 확인하세요\n이 전의 계획을 다시하기 위해서, 클래스 이름들의 정렬된 리스트를 첫번째로 얻으세요:",
"class_names = sorted(image_data.class_indices.items(), key=lambda pair:pair[1])\nclass_names = np.array([key.title() for key, value in class_names])\nclass_names",
"모델을 통해 이미지 배치를 실행시키세요. 그리고 인덱스들을 클래스 이름으로 바꾸세요.",
"predicted_batch = model.predict(image_batch)\npredicted_id = np.argmax(predicted_batch, axis=-1)\npredicted_label_batch = class_names[predicted_id]",
"결과를 계획하세요",
"label_id = np.argmax(label_batch, axis=-1)\n\nplt.figure(figsize=(10,9))\nplt.subplots_adjust(hspace=0.5)\nfor n in range(30):\n plt.subplot(6,5,n+1)\n plt.imshow(image_batch[n])\n color = \"green\" if predicted_id[n] == label_id[n] else \"red\"\n plt.title(predicted_label_batch[n].title(), color=color)\n plt.axis('off')\n_ = plt.suptitle(\"Model predictions (green: correct, red: incorrect)\")",
"당신의 모델을 내보내세요\n당신은 모델을 학습시켜왔기 때문에, 저장된 모델을 내보내세요:",
"import time\nt = time.time()\n\nexport_path = \"/tmp/saved_models/{}\".format(int(t))\nmodel.save(export_path, save_format='tf')\n\nexport_path",
"이제 우리는 그것을 새롭게 로딩 할 수 있고, 이는 같은 결과를 줄 것입니다:",
"reloaded = tf.keras.models.load_model(export_path)\n\nresult_batch = model.predict(image_batch)\nreloaded_result_batch = reloaded.predict(image_batch)\n\nabs(reloaded_result_batch - result_batch).max()",
"저장된 모델은 추후에 추론을 할 수도 있고, TFLite 나 TFjs 로 변환할 수 있습니다."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ES-DOC/esdoc-jupyterhub
|
notebooks/ncc/cmip6/models/noresm2-mh/seaice.ipynb
|
gpl-3.0
|
[
"ES-DOC CMIP6 Model Properties - Seaice\nMIP Era: CMIP6\nInstitute: NCC\nSource ID: NORESM2-MH\nTopic: Seaice\nSub-Topics: Dynamics, Thermodynamics, Radiative Processes. \nProperties: 80 (63 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:24\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook",
"# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'ncc', 'noresm2-mh', 'seaice')",
"Document Authors\nSet document authors",
"# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Contributors\nSpecify document contributors",
"# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Publication\nSpecify document publication status",
"# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)",
"Document Table of Contents\n1. Key Properties --> Model\n2. Key Properties --> Variables\n3. Key Properties --> Seawater Properties\n4. Key Properties --> Resolution\n5. Key Properties --> Tuning Applied\n6. Key Properties --> Key Parameter Values\n7. Key Properties --> Assumptions\n8. Key Properties --> Conservation\n9. Grid --> Discretisation --> Horizontal\n10. Grid --> Discretisation --> Vertical\n11. Grid --> Seaice Categories\n12. Grid --> Snow On Seaice\n13. Dynamics\n14. Thermodynamics --> Energy\n15. Thermodynamics --> Mass\n16. Thermodynamics --> Salt\n17. Thermodynamics --> Salt --> Mass Transport\n18. Thermodynamics --> Salt --> Thermodynamics\n19. Thermodynamics --> Ice Thickness Distribution\n20. Thermodynamics --> Ice Floe Size Distribution\n21. Thermodynamics --> Melt Ponds\n22. Thermodynamics --> Snow Processes\n23. Radiative Processes \n1. Key Properties --> Model\nName of seaice model used.\n1.1. Model Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of sea ice model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.model.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.2. Model Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nName of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.model.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"2. Key Properties --> Variables\nList of prognostic variable in the sea ice model.\n2.1. Prognostic\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nList of prognostic variables in the sea ice component.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.variables.prognostic') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sea ice temperature\" \n# \"Sea ice concentration\" \n# \"Sea ice thickness\" \n# \"Sea ice volume per grid cell area\" \n# \"Sea ice u-velocity\" \n# \"Sea ice v-velocity\" \n# \"Sea ice enthalpy\" \n# \"Internal ice stress\" \n# \"Salinity\" \n# \"Snow temperature\" \n# \"Snow depth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"3. Key Properties --> Seawater Properties\nProperties of seawater relevant to sea ice\n3.1. Ocean Freezing Point\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nEquation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"TEOS-10\" \n# \"Constant\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"3.2. Ocean Freezing Point Value\nIs Required: FALSE Type: FLOAT Cardinality: 0.1\nIf using a constant seawater freezing point, specify this value.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"4. Key Properties --> Resolution\nResolution of the sea ice grid\n4.1. Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.2. Canonical Horizontal Resolution\nIs Required: TRUE Type: STRING Cardinality: 1.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.3. Number Of Horizontal Gridpoints\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"5. Key Properties --> Tuning Applied\nTuning applied to sea ice model component\n5.1. Description\nIs Required: TRUE Type: STRING Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5.2. Target\nIs Required: TRUE Type: STRING Cardinality: 1.1\nWhat was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.target') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5.3. Simulations\nIs Required: TRUE Type: STRING Cardinality: 1.1\n*Which simulations had tuning applied, e.g. all, not historical, only pi-control? *",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5.4. Metrics Used\nIs Required: TRUE Type: STRING Cardinality: 1.1\nList any observed metrics used in tuning model/parameters",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5.5. Variables\nIs Required: FALSE Type: STRING Cardinality: 0.1\nWhich variables were changed during the tuning process?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6. Key Properties --> Key Parameter Values\nValues of key parameters\n6.1. Typical Parameters\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nWhat values were specificed for the following parameters if used?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ice strength (P*) in units of N m{-2}\" \n# \"Snow conductivity (ks) in units of W m{-1} K{-1} \" \n# \"Minimum thickness of ice created in leads (h0) in units of m\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"6.2. Additional Parameters\nIs Required: FALSE Type: STRING Cardinality: 0.N\nIf you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7. Key Properties --> Assumptions\nAssumptions made in the sea ice model\n7.1. Description\nIs Required: TRUE Type: STRING Cardinality: 1.N\nGeneral overview description of any key assumptions made in this model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.description') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.2. On Diagnostic Variables\nIs Required: TRUE Type: STRING Cardinality: 1.N\nNote any assumptions that specifically affect the CMIP6 diagnostic sea ice variables.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.3. Missing Processes\nIs Required: TRUE Type: STRING Cardinality: 1.N\nList any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8. Key Properties --> Conservation\nConservation in the sea ice component\n8.1. Description\nIs Required: TRUE Type: STRING Cardinality: 1.1\nProvide a general description of conservation methodology.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.2. Properties\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nProperties conserved in sea ice by the numerical schemes.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.properties') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Energy\" \n# \"Mass\" \n# \"Salt\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"8.3. Budget\nIs Required: TRUE Type: STRING Cardinality: 1.1\nFor each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.budget') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.4. Was Flux Correction Used\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDoes conservation involved flux correction?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"8.5. Corrected Conserved Prognostic Variables\nIs Required: TRUE Type: STRING Cardinality: 1.1\nList any variables which are conserved by more than the numerical scheme alone.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"9. Grid --> Discretisation --> Horizontal\nSea ice discretisation in the horizontal\n9.1. Grid\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nGrid on which sea ice is horizontal discretised?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ocean grid\" \n# \"Atmosphere Grid\" \n# \"Own Grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"9.2. Grid Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhat is the type of sea ice grid?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Structured grid\" \n# \"Unstructured grid\" \n# \"Adaptive grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"9.3. Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhat is the advection scheme?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Finite differences\" \n# \"Finite elements\" \n# \"Finite volumes\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"9.4. Thermodynamics Time Step\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nWhat is the time step in the sea ice model thermodynamic component in seconds.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"9.5. Dynamics Time Step\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nWhat is the time step in the sea ice model dynamic component in seconds.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"9.6. Additional Details\nIs Required: FALSE Type: STRING Cardinality: 0.1\nSpecify any additional horizontal discretisation details.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"10. Grid --> Discretisation --> Vertical\nSea ice vertical properties\n10.1. Layering\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nWhat type of sea ice vertical layers are implemented for purposes of thermodynamic calculations?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Zero-layer\" \n# \"Two-layers\" \n# \"Multi-layers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"10.2. Number Of Layers\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nIf using multi-layers specify how many.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"10.3. Additional Details\nIs Required: FALSE Type: STRING Cardinality: 0.1\nSpecify any additional vertical grid details.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"11. Grid --> Seaice Categories\nWhat method is used to represent sea ice categories ?\n11.1. Has Mulitple Categories\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nSet to true if the sea ice model has multiple sea ice categories.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"11.2. Number Of Categories\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nIf using sea ice categories specify how many.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"11.3. Category Limits\nIs Required: TRUE Type: STRING Cardinality: 1.1\nIf using sea ice categories specify each of the category limits.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"11.4. Ice Thickness Distribution Scheme\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe the sea ice thickness distribution scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"11.5. Other\nIs Required: FALSE Type: STRING Cardinality: 0.1\nIf the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.other') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"12. Grid --> Snow On Seaice\nSnow on sea ice details\n12.1. Has Snow On Ice\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs snow on ice represented in this model?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"12.2. Number Of Snow Levels\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nNumber of vertical levels of snow on ice?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"12.3. Snow Fraction\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe how the snow fraction on sea ice is determined",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"12.4. Additional Details\nIs Required: FALSE Type: STRING Cardinality: 0.1\nSpecify any additional details related to snow on ice.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"13. Dynamics\nSea Ice Dynamics\n13.1. Horizontal Transport\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhat is the method of horizontal advection of sea ice?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.horizontal_transport') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Incremental Re-mapping\" \n# \"Prather\" \n# \"Eulerian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.2. Transport In Thickness Space\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhat is the method of sea ice transport in thickness space (i.e. in thickness categories)?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Incremental Re-mapping\" \n# \"Prather\" \n# \"Eulerian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.3. Ice Strength Formulation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhich method of sea ice strength formulation is used?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Hibler 1979\" \n# \"Rothrock 1975\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.4. Redistribution\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nWhich processes can redistribute sea ice (including thickness)?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.redistribution') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Rafting\" \n# \"Ridging\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.5. Rheology\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nRheology, what is the ice deformation formulation?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.rheology') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Free-drift\" \n# \"Mohr-Coloumb\" \n# \"Visco-plastic\" \n# \"Elastic-visco-plastic\" \n# \"Elastic-anisotropic-plastic\" \n# \"Granular\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14. Thermodynamics --> Energy\nProcesses related to energy in sea ice thermodynamics\n14.1. Enthalpy Formulation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhat is the energy formulation?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pure ice latent heat (Semtner 0-layer)\" \n# \"Pure ice latent and sensible heat\" \n# \"Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)\" \n# \"Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14.2. Thermal Conductivity\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhat type of thermal conductivity is used?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pure ice\" \n# \"Saline ice\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14.3. Heat Diffusion\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhat is the method of heat diffusion?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Conduction fluxes\" \n# \"Conduction and radiation heat fluxes\" \n# \"Conduction, radiation and latent heat transport\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14.4. Basal Heat Flux\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMethod by which basal ocean heat flux is handled?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Heat Reservoir\" \n# \"Thermal Fixed Salinity\" \n# \"Thermal Varying Salinity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14.5. Fixed Salinity Value\nIs Required: FALSE Type: FLOAT Cardinality: 0.1\nIf you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"14.6. Heat Content Of Precipitation\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe the method by which the heat content of precipitation is handled.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"14.7. Precipitation Effects On Salinity\nIs Required: FALSE Type: STRING Cardinality: 0.1\nIf precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"15. Thermodynamics --> Mass\nProcesses related to mass in sea ice thermodynamics\n15.1. New Ice Formation\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe the method by which new sea ice is formed in open water.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"15.2. Ice Vertical Growth And Melt\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe the method that governs the vertical growth and melt of sea ice.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"15.3. Ice Lateral Melting\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhat is the method of sea ice lateral melting?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Floe-size dependent (Bitz et al 2001)\" \n# \"Virtual thin ice melting (for single-category)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"15.4. Ice Surface Sublimation\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe the method that governs sea ice surface sublimation.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"15.5. Frazil Ice\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe the method of frazil ice formation.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"16. Thermodynamics --> Salt\nProcesses related to salt in sea ice thermodynamics.\n16.1. Has Multiple Sea Ice Salinities\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDoes the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"16.2. Sea Ice Salinity Thermal Impacts\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDoes sea ice salinity impact the thermal properties of sea ice?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"17. Thermodynamics --> Salt --> Mass Transport\nMass transport of salt\n17.1. Salinity Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow is salinity determined in the mass transport of salt calculation?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Prescribed salinity profile\" \n# \"Prognostic salinity profile\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"17.2. Constant Salinity Value\nIs Required: FALSE Type: FLOAT Cardinality: 0.1\nIf using a constant salinity value specify this value in PSU?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"17.3. Additional Details\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe the salinity profile used.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"18. Thermodynamics --> Salt --> Thermodynamics\nSalt thermodynamics\n18.1. Salinity Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow is salinity determined in the thermodynamic calculation?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Prescribed salinity profile\" \n# \"Prognostic salinity profile\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"18.2. Constant Salinity Value\nIs Required: FALSE Type: FLOAT Cardinality: 0.1\nIf using a constant salinity value specify this value in PSU?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"18.3. Additional Details\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe the salinity profile used.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"19. Thermodynamics --> Ice Thickness Distribution\nIce thickness distribution details.\n19.1. Representation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow is the sea ice thickness distribution represented?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Virtual (enhancement of thermal conductivity, thin ice melting)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"20. Thermodynamics --> Ice Floe Size Distribution\nIce floe-size distribution details.\n20.1. Representation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow is the sea ice floe-size represented?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Parameterised\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"20.2. Additional Details\nIs Required: FALSE Type: STRING Cardinality: 0.1\nPlease provide further details on any parameterisation of floe-size.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"21. Thermodynamics --> Melt Ponds\nCharacteristics of melt ponds.\n21.1. Are Included\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nAre melt ponds included in the sea ice model?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"21.2. Formulation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhat method of melt pond formulation is used?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Flocco and Feltham (2010)\" \n# \"Level-ice melt ponds\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"21.3. Impacts\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nWhat do melt ponds have an impact on?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Albedo\" \n# \"Freshwater\" \n# \"Heat\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"22. Thermodynamics --> Snow Processes\nThermodynamic processes in snow on sea ice\n22.1. Has Snow Aging\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.N\nSet to True if the sea ice model has a snow aging scheme.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"22.2. Snow Aging Scheme\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe the snow aging scheme.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"22.3. Has Snow Ice Formation\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.N\nSet to True if the sea ice model has snow ice formation.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"22.4. Snow Ice Formation Scheme\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe the snow ice formation scheme.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"22.5. Redistribution\nIs Required: TRUE Type: STRING Cardinality: 1.1\nWhat is the impact of ridging on snow cover?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"22.6. Heat Diffusion\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nWhat is the heat diffusion through snow methodology in sea ice thermodynamics?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Single-layered heat diffusion\" \n# \"Multi-layered heat diffusion\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"23. Radiative Processes\nSea Ice Radiative Processes\n23.1. Surface Albedo\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMethod used to handle surface albedo.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.radiative_processes.surface_albedo') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Delta-Eddington\" \n# \"Parameterized\" \n# \"Multi-band albedo\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"23.2. Ice Radiation Transmission\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nMethod by which solar radiation through sea ice is handled.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Delta-Eddington\" \n# \"Exponential attenuation\" \n# \"Ice radiation transmission per category\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"©2017 ES-DOC"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
jsharpna/DavisSML
|
lectures/lecture5/.ipynb_checkpoints/lecture5-checkpoint.ipynb
|
mit
|
[
"The Lasso\nStats 208: Lecture 5\nProf. Sharpnack\n\nLecture slides at course github page\nSome content of these slides are from STA 251 notes and STA 141B lectures. \nSome content is from Elements of Statistical Learning\n\nRecall Convex Optimization\nDef A function $f : \\mathbb R^p \\to \\mathbb R$ is convex if for any $0 \\le \\alpha \\le 1$, $x_0, x_1 \\in \\mathbb R^p$,\n$$\nf(\\alpha x_0 + (1 - \\alpha) x_1) \\le \\alpha f(x_0) + (1 - \\alpha) f(x_1).\n$$\n\nFor convex functions, local minima are global minima\n\nRecall 1st Order Condition. If f is differentiable then it is convex if \n$$\nf(x) \\ge f(x_0) + \\nabla f(x_0)^\\top (x - x_0), \\forall x,x_0\n$$\nand when $\\nabla f(x_0) = 0$ then \n$$\nf(x) \\ge f(x_0), \\forall x\n$$\nso any fixed point of gradient descent is a global min (for convex, differentiable f)\nSubdifferential\nDef. $g(x_0) \\in \\mathbb R^p$ is a subgradient of $f$ at $x_0$ if\n$$\nf(x) \\ge f(x_0) + g(x_0)^\\top (x - x_0), \\forall x.\n$$\nThe set of all subgradients at $x_0$ is call the subdifferential, denoted $\\partial f(x_0)$.\n\nFor any global optima, $0 \\in \\partial f(x_0)$.\n\nWavelet denoising\nSoft thresholding is commonly used for orthonormal bases.\n- Suppose that we have a vector $y_1,\\ldots, y_T$ (like a time series).\n- And we want to reconstruct $y$ with $W \\beta$ where $\\beta$ has a small sum of absolute values $\\sum_i |\\beta_i|$ \n- $W$ is $T \\times T$ and $W W^\\top = W^\\top W = I$ (orthonormal full rank design)\nWant to minimize \n$$\n\\frac 12 \\sum_{i=1}^T (y - W \\beta)i^2 + \\lambda \\sum{i=1}^T |\\beta_i|.\n$$",
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n## Explore Turkish stock exchange dataset\n\ntse = pd.read_excel('../../data/data_akbilgic.xlsx',skiprows=1)\n\ntse = tse.rename(columns={'ISE':'TLISE','ISE.1':'USDISE'})\n\ndef const_wave(T,a,b):\n wave = np.zeros(T)\n s1 = (b-a) // 2\n s2 = (b-a) - s1\n norm_C = (s1*s2 / (s1+s2))**0.5\n wave[a:a+s1] = norm_C / s1\n wave[a+s1:b] = -norm_C / s2\n return wave\n\ndef _const_wave_basis(T,a,b):\n if b-a < 2:\n return []\n wave_basis = []\n wave_basis.append(const_wave(T,a,b))\n mid_pt = a + (b-a)//2\n wave_basis += _const_wave_basis(T,a,mid_pt)\n wave_basis += _const_wave_basis(T,mid_pt,b)\n return wave_basis\n\ndef const_wave_basis(T,a,b):\n father = np.ones(T) / T**0.5\n return [father] + _const_wave_basis(T,a,b)\n\n# Construct discrete Haar wavelet basis\nT,p = tse.shape\nwave_basis = const_wave_basis(T,0,T)\nW = np.array(wave_basis).T\n\n_ = plt.plot(W[:,:3])\n\ndef soft(y,lamb):\n pos_part = (y - lamb) * (y > lamb)\n neg_part = (y + lamb) * (y < -lamb)\n return pos_part + neg_part\n\n## Volatility seems most interesting\n## will construct local measure of volatility\n## remove rolling window estimate (local centering)\n## square the residuals\n\n#tse = tse.set_index('date')\ntse_trem = tse - tse.rolling(\"7D\").mean()\ntse_vol = tse_trem**2.\n\n## Make wavelet transformation and soft threshold\n\ntse_wave = W.T @ tse_vol.values\nlamb = .001\ntse_soft = soft(tse_wave,lamb)\ntse_rec = W @ tse_soft\ntse_den = tse_vol.copy()\ntse_den.iloc[:,:] = tse_rec\n\n_ = tse_vol.plot(subplots=True,figsize=(10,10))\n\n_ = tse_den.plot(subplots=True,figsize=(10,10))",
"Wavelet reconstruction\nCan reconstruct the sequence by\n$$\n\\hat y = W \\hat \\beta.\n$$\nThe objective is likelihood term + L1 penalty term,\n$$\n\\frac 12 \\sum_{i=1}^T (y - W \\beta)i^2 + \\lambda \\sum{i=1}^T |\\beta_i|.\n$$\n\nThe L1 penalty \"forces\" some $\\beta_i = 0$, inducing sparsity",
"plt.plot(tse_soft[:,4])\nhigh_idx = np.where(np.abs(tse_soft[:,5]) > .0001)[0]\nprint(high_idx)\n\nfig, axs = plt.subplots(len(high_idx) + 1,1)\nfor i, idx in enumerate(high_idx):\n axs[i].plot(W[:,idx])\nplt.plot(tse_den['FTSE'],c='r')",
"Non-orthogonal design\nThe objective is likelihood term + L1 penalty term,\n$$\n\\frac 12 \\sum_{i=1}^T (y - X \\beta)i^2 + \\lambda \\sum{i=1}^T |\\beta_i|.\n$$\ndoes not have closed form for $X$ that is non-orthogonal.\n\nit is convex\nit is non-smooth (recall $|x|$)\nhas tuning parameter $\\lambda$\n\nCompare to best subset selection (NP-hard):\n$$\n\\min \\frac 12 \\sum_{i=1}^T (y - X \\beta)_i^2.\n$$\nfor\n$$\n\\| \\beta \\|_0 = |{\\rm supp}(\\beta)| < s.\n$$\nImage of Lasso solution\n<img src=\"lasso_soln.PNG\" width=100%>\nSolving the Lasso\nThe lasso can be written in regularized form,\n$$\n\\min \\frac 12 \\sum_{i=1}^T (y - X \\beta)i^2 + \\lambda \\sum{i=1}^T |\\beta_i|,\n$$\nor in constrained form,\n$$\n\\min \\frac 12 \\sum_{i=1}^T (y - X \\beta)i^2, \\quad \\textrm{s.t.} \\sum{i=1}^T |\\beta_i| \\le C,\n$$\n\nFor every $\\lambda$ there is a $C$ such that the regularized form and constrained form have the same argmin\nThis correspondence is data dependent\n\nSolving Lasso\nA quadratic program (QP) is a convex optimization of the form \n$$\n\\min \\beta^\\top Q \\beta + \\beta^\\top a \\quad \\textrm{ s.t. } A\\beta \\le c\n$$\nwhere $Q$ is positive semi-definite.\nclaim: The lasso (constrained form) is a QP.\n$$\n\\sum_{i=1}^T (y - X \\beta)_i^2 = \\frac 12 \\beta^\\top (X^\\top X) \\beta + \\beta^\\top (X^\\top y) + C\n$$\nbut what about $\\| \\beta \\|_1$?\n\nSolving the lasso\nFor a single $\\lambda$ (or $C$ in constrained form) can solve the lasso with many specialized methods\n- quadratic program solver\n- proximal gradient\n- alternating direction method of multipliers\nbut $\\lambda$ is a tuning parameter. Options\n1. Construct a grid of $\\lambda$ and solve each lasso\n2. Solve for all $\\lambda$ values - path algorithm\nActive sets and why lasso works better\n\nLet $\\hat \\beta_\\lambda$ be the $\\hat \\beta$ at tuning parameter $\\lambda$.\nDefine $\\mathcal A_\\lambda = {\\rm supp}(\\hat \\beta_\\lambda)$ the non-zero elements of $\\hat \\beta_\\lambda$.\nFor large $\\lambda = \\infty$, $|\\mathcal A_\\lambda| = 0$ (penalty dominates)\nFor small $\\lambda = 0$, $|\\mathcal A_\\lambda| = p$ (loss dominates)\n\nForward greedy selection only adds elements to the active set, does not remove elements.\n\nLasso Path\n\nStart at $\\lambda = +\\infty, \\hat \\beta = 0$.\nDecrease $\\lambda$ until $\\hat \\beta_{j_1} \\ne 0$, $\\mathcal A \\gets {j_1}$. (Hitting event)\n\nContinue decreasing $\\lambda$ updating $\\mathcal A$ with hitting and leaving events\n\n\n$x_{j_1}$ is the predictor variable most correlated with $y$\n\nHitting events are when element is added to $\\mathcal A$\nLeaving events are when element is removed from $\\mathcal A$\n$\\hat \\beta_{\\lambda,j}$ is piecewise linear, continuous, as a function of $\\lambda$\nknots are at \"hitting\" and \"leaving\" events\n\n\nfrom sklearn.org\nLeast Angle Regression (LAR)\n\nStandardize predictors and start with residual $r = y - \\bar y$, $\\hat beta = 0$\nFind $x_j$ most correlated with $r$\nMove $\\beta_j$ in the direction of $x_j^\\top r$ until the residual is more correlated with another $x_k$\nMove $\\beta_j,\\beta_k$ in the direction of their joint OLS coefficients of $r$ on $(x_j,x_k)$ until some other competitor $x_l$ has as much correlation with the current residual\nContinue until all predictors have been entered.\n\nLasso modification\n4.5 If a non-zero coefficient drops to 0 then remove it from the active set and recompute the restricted OLS.\n\nfrom ESL",
"# %load ../standard_import.txt\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing, model_selection, linear_model\n\n%matplotlib inline\n\n## Modified from the github repo: https://github.com/JWarmenhoven/ISLR-python \n## which is based on the book by James et al. Intro to Statistical Learning.\n\ndf = pd.read_csv('../../data/Hitters.csv', index_col=0).dropna()\ndf.index.name = 'Player'\ndf.info()\n\n## Simulate a dataset for lasso\n\nn=100\np=1000\nX = np.random.randn(n,p)\nX = preprocessing.scale(X)\n\n## Subselect true active set\n\nsprob = 0.02\nSbool = np.random.rand(p) < sprob\ns = np.sum(Sbool)\nprint(\"Number of non-zero's: {}\".format(s))\n\n## Construct beta and y\n\nmu = 100.\nbeta = np.zeros(p)\nbeta[Sbool] = mu * np.random.randn(s)\n\neps = np.random.randn(n)\ny = X.dot(beta) + eps\n\n## Run lars with lasso mod, find active set\n\nlarper = linear_model.lars_path(X,y,method=\"lasso\")\nS = set(np.where(Sbool)[0])\n\ndef plot_it():\n for j in S:\n _ = plt.plot(larper[0],larper[2][j,:],'r')\n for j in set(range(p)) - S:\n _ = plt.plot(larper[0],larper[2][j,:],'k',linewidth=.75)\n _ = plt.title('Lasso path for simulated data')\n _ = plt.xlabel('lambda')\n _ = plt.ylabel('Coef')\n\nplot_it()\n\n## Hitters dataset\n\ndf = pd.read_csv('../../data/Hitters.csv', index_col=0).dropna()\ndf.index.name = 'Player'\ndf.info()\n\ndf.head()\n\ndummies = pd.get_dummies(df[['League', 'Division', 'NewLeague']])\ndummies.info()\nprint(dummies.head())\n\ny = df.Salary\n\n# Drop the column with the independent variable (Salary), and columns for which we created dummy variables\nX_ = df.drop(['Salary', 'League', 'Division', 'NewLeague'], axis=1).astype('float64')\n# Define the feature set X.\nX = pd.concat([X_, dummies[['League_N', 'Division_W', 'NewLeague_N']]], axis=1)\nX.info()\n\nX.head(5)\n\nloo = model_selection.LeaveOneOut()\nlooiter = loo.split(X)\nhitlasso = linear_model.LassoCV(cv=looiter)\nhitlasso.fit(X,y)\n\nprint(\"The selected lambda value is {:.2f}\".format(hitlasso.alpha_))\n\nhitlasso.coef_",
"We can also compare this to the selected model from forward stagewise regression:\n[-0.21830515, 0.38154135, 0. , 0. , 0. ,\n 0.16139123, 0. , 0. , 0. , 0. ,\n 0.09994524, 0.56696569, -0.16872682, 0.16924078, 0. ,\n 0. , 0. , -0.19429699, 0. ]\nThis is not exactly the same model with differences in the inclusion or exclusion of AtBat, HmRun, Runs, RBI, Years, CHmRun, Errors, League_N, Division_W, NewLeague_N",
"bforw = [-0.21830515, 0.38154135, 0. , 0. , 0. ,\n 0.16139123, 0. , 0. , 0. , 0. ,\n 0.09994524, 0.56696569, -0.16872682, 0.16924078, 0. ,\n 0. , 0. , -0.19429699, 0. ]\n\nprint(\", \".join(X.columns[(hitlasso.coef_ != 0.) != (bforw != 0.)]))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
Diyago/Machine-Learning-scripts
|
statistics/Доверительные интервалы для двух долей stat.two_proportions_diff_test.ipynb
|
apache-2.0
|
[
"Z-критерий для двух долей",
"import numpy as np\nimport pandas as pd\n\nimport scipy\nfrom statsmodels.stats.weightstats import *\nfrom statsmodels.stats.proportion import proportion_confint",
"Загрузка данных",
"data = pd.read_csv('banner_click_stat.txt', header = None, sep = '\\t')\ndata.columns = ['banner_a', 'banner_b']\n\ndata.head()\n\ndata.describe()",
"Интервальные оценки долей\n$$\\frac1{ 1 + \\frac{z^2}{n} } \\left( \\hat{p} + \\frac{z^2}{2n} \\pm z \\sqrt{ \\frac{ \\hat{p}\\left(1-\\hat{p}\\right)}{n} + \\frac{z^2}{4n^2} } \\right), \\;\\; z \\equiv z_{1-\\frac{\\alpha}{2}}$$",
"conf_interval_banner_a = proportion_confint(sum(data.banner_a), \n data.shape[0],\n method = 'wilson')\nconf_interval_banner_b = proportion_confint(sum(data.banner_b), \n data.shape[0],\n method = 'wilson')\n\nprint '95%% confidence interval for a click probability, banner a: [%f, %f]' % conf_interval_banner_a\nprint '95%% confidence interval for a click probability, banner b [%f, %f]' % conf_interval_banner_b",
"Z-критерий для разности долей (независимые выборки)\n| $X_1$ | $X_2$\n ------------- | -------------|\n 1 | a | b \n 0 | c | d \n $\\sum$ | $n_1$| $n_2$\n$$ \\hat{p}_1 = \\frac{a}{n_1}$$\n$$ \\hat{p}_2 = \\frac{b}{n_2}$$\n$$\\text{Доверительный интервал для }p_1 - p_2\\colon \\;\\; \\hat{p}1 - \\hat{p}_2 \\pm z{1-\\frac{\\alpha}{2}}\\sqrt{\\frac{\\hat{p}_1(1 - \\hat{p}_1)}{n_1} + \\frac{\\hat{p}_2(1 - \\hat{p}_2)}{n_2}}$$\n$$Z-статистика: Z({X_1, X_2}) = \\frac{\\hat{p}_1 - \\hat{p}_2}{\\sqrt{P(1 - P)(\\frac{1}{n_1} + \\frac{1}{n_2})}}$$\n$$P = \\frac{\\hat{p}_1{n_1} + \\hat{p}_2{n_2}}{{n_1} + {n_2}} $$",
"def proportions_diff_confint_ind(sample1, sample2, alpha = 0.05): \n z = scipy.stats.norm.ppf(1 - alpha / 2.)\n \n p1 = float(sum(sample1)) / len(sample1)\n p2 = float(sum(sample2)) / len(sample2)\n \n left_boundary = (p1 - p2) - z * np.sqrt(p1 * (1 - p1)/ len(sample1) + p2 * (1 - p2)/ len(sample2))\n right_boundary = (p1 - p2) + z * np.sqrt(p1 * (1 - p1)/ len(sample1) + p2 * (1 - p2)/ len(sample2))\n \n return (left_boundary, right_boundary)\n\ndef proportions_diff_z_stat_ind(sample1, sample2):\n n1 = len(sample1)\n n2 = len(sample2)\n \n p1 = float(sum(sample1)) / n1\n p2 = float(sum(sample2)) / n2 \n P = float(p1*n1 + p2*n2) / (n1 + n2)\n \n return (p1 - p2) / np.sqrt(P * (1 - P) * (1. / n1 + 1. / n2))\n\ndef proportions_diff_z_test(z_stat, alternative = 'two-sided'):\n if alternative not in ('two-sided', 'less', 'greater'):\n raise ValueError(\"alternative not recognized\\n\"\n \"should be 'two-sided', 'less' or 'greater'\")\n \n if alternative == 'two-sided':\n return 2 * (1 - scipy.stats.norm.cdf(np.abs(z_stat)))\n \n if alternative == 'less':\n return scipy.stats.norm.cdf(z_stat)\n\n if alternative == 'greater':\n return 1 - scipy.stats.norm.cdf(z_stat)\n\nprint \"95%% confidence interval for a difference between proportions: [%f, %f]\" %\\\n proportions_diff_confint_ind(data.banner_a, data.banner_b)\n\nprint \"p-value: %f\" % proportions_diff_z_test(proportions_diff_z_stat_ind(data.banner_a, data.banner_b))\n\nprint \"p-value: %f\" % proportions_diff_z_test(proportions_diff_z_stat_ind(data.banner_a, data.banner_b), 'less')",
"Z-критерий для разности долей (связанные выборки)\n$X_1$ \\ $X_2$ | 1| 0 | $\\sum$\n ------------- | -------------|\n 1 | e | f | e + f\n 0 | g | h | g + h\n $\\sum$ | e + g| f + h | n \n$$ \\hat{p}_1 = \\frac{e + f}{n}$$\n$$ \\hat{p}_2 = \\frac{e + g}{n}$$\n$$ \\hat{p}_1 - \\hat{p}_2 = \\frac{f - g}{n}$$\n$$\\text{Доверительный интервал для }p_1 - p_2\\colon \\;\\; \\frac{f - g}{n} \\pm z_{1-\\frac{\\alpha}{2}}\\sqrt{\\frac{f + g}{n^2} - \\frac{(f - g)^2}{n^3}}$$\n$$Z-статистика: Z({X_1, X_2}) = \\frac{f - g}{\\sqrt{f + g - \\frac{(f-g)^2}{n}}}$$",
"def proportions_diff_confint_rel(sample1, sample2, alpha = 0.05):\n z = scipy.stats.norm.ppf(1 - alpha / 2.)\n sample = zip(sample1, sample2)\n n = len(sample)\n \n f = sum([1 if (x[0] == 1 and x[1] == 0) else 0 for x in sample])\n g = sum([1 if (x[0] == 0 and x[1] == 1) else 0 for x in sample])\n \n left_boundary = float(f - g) / n - z * np.sqrt(float((f + g)) / n**2 - float((f - g)**2) / n**3)\n right_boundary = float(f - g) / n + z * np.sqrt(float((f + g)) / n**2 - float((f - g)**2) / n**3)\n return (left_boundary, right_boundary)\n\ndef proportions_diff_z_stat_rel(sample1, sample2):\n sample = zip(sample1, sample2)\n n = len(sample)\n \n f = sum([1 if (x[0] == 1 and x[1] == 0) else 0 for x in sample])\n g = sum([1 if (x[0] == 0 and x[1] == 1) else 0 for x in sample])\n \n return float(f - g) / np.sqrt(f + g - float((f - g)**2) / n )\n\nprint \"95%% confidence interval for a difference between proportions: [%f, %f]\" \\\n % proportions_diff_confint_rel(data.banner_a, data.banner_b)\n\nprint \"p-value: %f\" % proportions_diff_z_test(proportions_diff_z_stat_rel(data.banner_a, data.banner_b))\n\nprint \"p-value: %f\" % proportions_diff_z_test(proportions_diff_z_stat_rel(data.banner_a, data.banner_b), 'less')"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
mne-tools/mne-tools.github.io
|
0.22/_downloads/dfd4175ec1a2c7f21de3596573c74301/plot_multidict_reweighted_tfmxne.ipynb
|
bsd-3-clause
|
[
"%matplotlib inline",
"Compute iterative reweighted TF-MxNE with multiscale time-frequency dictionary\nThe iterative reweighted TF-MxNE solver is a distributed inverse method\nbased on the TF-MxNE solver, which promotes focal (sparse) sources\n:footcite:StrohmeierEtAl2015. The benefit of this approach is that:\n\nit is spatio-temporal without assuming stationarity (sources properties\n can vary over time),\nactivations are localized in space, time and frequency in one step,\nthe solver uses non-convex penalties in the TF domain, which results in a\n solution less biased towards zero than when simple TF-MxNE is used,\nusing a multiscale dictionary allows to capture short transient\n activations along with slower brain waves :footcite:BekhtiEtAl2016.",
"# Author: Mathurin Massias <mathurin.massias@gmail.com>\n# Yousra Bekhti <yousra.bekhti@gmail.com>\n# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>\n# Alexandre Gramfort <alexandre.gramfort@inria.fr>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\n\nimport mne\nfrom mne.datasets import somato\nfrom mne.inverse_sparse import tf_mixed_norm, make_stc_from_dipoles\nfrom mne.viz import plot_sparse_source_estimates\n\nprint(__doc__)",
"Load somatosensory MEG data",
"data_path = somato.data_path()\nsubject = '01'\ntask = 'somato'\nraw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',\n 'sub-{}_task-{}_meg.fif'.format(subject, task))\nfwd_fname = op.join(data_path, 'derivatives', 'sub-{}'.format(subject),\n 'sub-{}_task-{}-fwd.fif'.format(subject, task))\n\ncondition = 'Unknown'\n\n# Read evoked\nraw = mne.io.read_raw_fif(raw_fname)\nevents = mne.find_events(raw, stim_channel='STI 014')\nreject = dict(grad=4000e-13, eog=350e-6)\npicks = mne.pick_types(raw.info, meg=True, eog=True)\n\nevent_id, tmin, tmax = 1, -1., 3.\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n reject=reject, preload=True)\nevoked = epochs.filter(1, None).average()\nevoked = evoked.pick_types(meg=True)\nevoked.crop(tmin=0.008, tmax=0.2)\n\n# Compute noise covariance matrix\ncov = mne.compute_covariance(epochs, rank='info', tmax=0.)\n\n# Handling forward solution\nforward = mne.read_forward_solution(fwd_fname)",
"Run iterative reweighted multidict TF-MxNE solver",
"alpha, l1_ratio = 20, 0.05\nloose, depth = 1, 0.95\n# Use a multiscale time-frequency dictionary\nwsize, tstep = [4, 16], [2, 4]\n\n\nn_tfmxne_iter = 10\n# Compute TF-MxNE inverse solution with dipole output\ndipoles, residual = tf_mixed_norm(\n evoked, forward, cov, alpha=alpha, l1_ratio=l1_ratio,\n n_tfmxne_iter=n_tfmxne_iter, loose=loose,\n depth=depth, tol=1e-3,\n wsize=wsize, tstep=tstep, return_as_dipoles=True,\n return_residual=True)\n\n# Crop to remove edges\nfor dip in dipoles:\n dip.crop(tmin=-0.05, tmax=0.3)\nevoked.crop(tmin=-0.05, tmax=0.3)\nresidual.crop(tmin=-0.05, tmax=0.3)",
"Generate stc from dipoles",
"stc = make_stc_from_dipoles(dipoles, forward['src'])\n\nplot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),\n opacity=0.1, fig_name=\"irTF-MxNE (cond %s)\"\n % condition)",
"Show the evoked response and the residual for gradiometers",
"ylim = dict(grad=[-300, 300])\nevoked.pick_types(meg='grad')\nevoked.plot(titles=dict(grad='Evoked Response: Gradiometers'), ylim=ylim,\n proj=True)\n\nresidual.pick_types(meg='grad')\nresidual.plot(titles=dict(grad='Residuals: Gradiometers'), ylim=ylim,\n proj=True)",
"References\n.. footbibliography::"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.