repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
cells
list
types
list
samuelsinayoko/kaggle-housing-prices
examples/detailed-data-exploration.ipynb
mit
[ "Overall", "# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nfrom subprocess import check_output\nprint(check_output([\"ls\", \"../input\"]).decode(\"utf8\"))\n\n# Any results you write to the current directory are saved as output.\n\ndf = pd.read_csv('../input/train.csv')\n#df.drop('SalePrice', axis = 1, inplace = True)\n#test = pd.read_csv('../input/test.csv')\n#df = df.append(test, ignore_index = True)\ndf.head()\n\n\ndf.describe()\n\ndf.columns\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline \n\nprint(\"Some Statistics of the Housing Price:\\n\")\nprint(df['SalePrice'].describe())\nprint(\"\\nThe median of the Housing Price is: \", df['SalePrice'].median(axis = 0))\n\nsns.distplot(df['SalePrice'], kde = False, color = 'b', hist_kws={'alpha': 0.9})", "Numerical Features", "corr = df.select_dtypes(include = ['float64', 'int64']).iloc[:, 1:].corr()\nplt.figure(figsize=(12, 12))\nsns.heatmap(corr, vmax=1, square=True)\n\ncor_dict = corr['SalePrice'].to_dict()\ndel cor_dict['SalePrice']\nprint(\"List the numerical features decendingly by their correlation with Sale Price:\\n\")\nfor ele in sorted(cor_dict.items(), key = lambda x: -abs(x[1])):\n print(\"{0}: \\t{1}\".format(*ele))", "The housing price correlates strongly with OverallQual, GrLivArea(GarageCars), GargeArea, TotalBsmtSF, 1stFlrSF, FullBath, TotRmsAbvGrd, YearBuilt, YearRemodAdd, GargeYrBlt, MasVnrArea and Fireplaces. But some of those features are highly correlated among each others.", "sns.regplot(x = 'OverallQual', y = 'SalePrice', data = df, color = 'Orange')\n\nplt.figure(1)\nf, axarr = plt.subplots(3, 2, figsize=(10, 9))\nprice = df.SalePrice.values\naxarr[0, 0].scatter(df.GrLivArea.values, price)\naxarr[0, 0].set_title('GrLiveArea')\naxarr[0, 1].scatter(df.GarageArea.values, price)\naxarr[0, 1].set_title('GarageArea')\naxarr[1, 0].scatter(df.TotalBsmtSF.values, price)\naxarr[1, 0].set_title('TotalBsmtSF')\naxarr[1, 1].scatter(df['1stFlrSF'].values, price)\naxarr[1, 1].set_title('1stFlrSF')\naxarr[2, 0].scatter(df.TotRmsAbvGrd.values, price)\naxarr[2, 0].set_title('TotRmsAbvGrd')\naxarr[2, 1].scatter(df.MasVnrArea.values, price)\naxarr[2, 1].set_title('MasVnrArea')\nf.text(-0.01, 0.5, 'Sale Price', va='center', rotation='vertical', fontsize = 12)\nplt.tight_layout()\nplt.show()\n\nfig = plt.figure(2, figsize=(9, 7))\nplt.subplot(211)\nplt.scatter(df.YearBuilt.values, price)\nplt.title('YearBuilt')\n\nplt.subplot(212)\nplt.scatter(df.YearRemodAdd.values, price)\nplt.title('YearRemodAdd')\n\nfig.text(-0.01, 0.5, 'Sale Price', va = 'center', rotation = 'vertical', fontsize = 12)\n\nplt.tight_layout()", "Categorical Features", "print(df.select_dtypes(include=['object']).columns.values)", "Neighborhood", "plt.figure(figsize = (12, 6))\nsns.boxplot(x = 'Neighborhood', y = 'SalePrice', data = df)\nxt = plt.xticks(rotation=45)\n\nplt.figure(figsize = (12, 6))\nsns.countplot(x = 'Neighborhood', data = df)\nxt = plt.xticks(rotation=45)", "Could group those Neighborhoods with similar housing price into a same bucket for dimension-reduction.\nHousing Price vs Sales\n\nSale Type & Condition\nSales Seasonality", "fig, ax = plt.subplots(2, 1, figsize = (10, 6))\nsns.boxplot(x = 'SaleType', y = 'SalePrice', data = df, ax = ax[0])\nsns.boxplot(x = 'SaleCondition', y = 'SalePrice', data = df, ax = ax[1])\nplt.tight_layout()\n\ng = sns.FacetGrid(df, col = 'YrSold', col_wrap = 3)\ng.map(sns.boxplot, 'MoSold', 'SalePrice', palette='Set2', order = range(1, 13))\\\n.set(ylim = (0, 500000))\nplt.tight_layout()", "Sale's timing does not seem to hugely affect the house. \n\nHousing Style", "fig, ax = plt.subplots(2, 1, figsize = (10, 8))\nsns.boxplot(x = 'BldgType', y = 'SalePrice', data = df, ax = ax[0])\nsns.boxplot(x = 'HouseStyle', y = 'SalePrice', data = df, ax = ax[1])", "Housing Condition", "fig, ax = plt.subplots(2, 1, figsize = (10, 8))\nsns.boxplot(x = 'Condition1', y = 'SalePrice', data = df, ax = ax[0])\nsns.boxplot(x = 'Exterior1st', y = 'SalePrice', data = df, ax = ax[1])\nx = plt.xticks(rotation = 45)\nplt.show()", "Basement Conditions", "fig, ax = plt.subplots(2, 2, figsize = (10, 8))\nsns.boxplot('BsmtCond', 'SalePrice', data = df, ax = ax[0, 0])\nsns.boxplot('BsmtQual', 'SalePrice', data = df, ax = ax[0, 1])\nsns.boxplot('BsmtExposure', 'SalePrice', data = df, ax = ax[1, 0])\nsns.boxplot('BsmtFinType1', 'SalePrice', data = df, ax = ax[1, 1])", "Home Functionality", "sns.violinplot('Functional', 'SalePrice', data = df)", "FirePlaceQu", "sns.factorplot('FireplaceQu', 'SalePrice', data = df, color = 'm', \\\n estimator = np.median, order = ['Ex', 'Gd', 'TA', 'Fa', 'Po'], size = 4.5, aspect=1.35)\n\npd.crosstab(df.Fireplaces, df.FireplaceQu)\n\ng = sns.FacetGrid(df, col = 'FireplaceQu', col_wrap = 3, col_order=['Ex', 'Gd', 'TA', 'Fa', 'Po'])\ng.map(sns.boxplot, 'Fireplaces', 'SalePrice', order = [1, 2, 3], palette = 'Set2')", "Heating\n\nAmes is a cold place in winter, so heating (as well as fireplace qualities) are quite important.", "pd.crosstab(df.HeatingQC, df.CentralAir)\n\npd.crosstab(df.HeatingQC, df.FireplaceQu)\n\n\nsns.factorplot('HeatingQC', 'SalePrice', hue = 'CentralAir', estimator = np.mean, data = df, \n size = 4.5, aspect = 1.4)", "Clearly, having AC or not has a big impact on housing price.", "fig, ax = plt.subplots(1, 2, figsize = (10, 4))\nsns.boxplot('Electrical', 'SalePrice', data = df, ax = ax[0]).set(ylim = (0, 400000))\nsns.countplot('Electrical', data = df)\nplt.tight_layout()", "Kitchen Quality", "sns.factorplot('KitchenQual', 'SalePrice', estimator = np.mean, \n size = 4.5, aspect = 1.4, data = df, order = ['Ex', 'Gd', 'TA', 'Fa'])", "MSZonig", "sns.boxplot(x = 'MSZoning', y = 'SalePrice', data = df)", "Street & Alley Access", "\nfig, ax = plt.subplots(1, 2, figsize = (10, 4))\nsns.boxplot(x = 'Street', y = 'SalePrice', data = df, ax = ax[0])\nsns.boxplot(x = 'Alley', y = 'SalePrice', data = df, ax = ax[1])\nplt.tight_layout()\n\nprint(\"The NA's in Alley is: \", df['Alley'].isnull().sum())\nprint(\"\\nThere are so many NA's in Alley. When Alley is NA, Street = \", \n df[df.Alley.notnull()].Street.unique())\nprint(\"\\n\", pd.crosstab(df.Street, df.Alley))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Pybonacci/notebooks
Microentradas - Evitar ciertas etiquetas en la leyenda en Matplotlib.ipynb
bsd-2-clause
[ "A veces, me llegan ficheros de datos con datos cada hora o cada día y los quiero representar en un plot. Para ello, podría acumular los ficheros en uno solo y luego pintarlo pero como lo debo hacer en 'tiempo casi-real' se puede meter todo en un bucle while que espera los ficheros cada hora/día/lo que sea y va pintando cada variable por tramos. Por ejemplo, una aproximación podría ser la siguiente:", "import numpy as np\nimport matplotlib.pyplot as plt\nplt.style.use('bmh')\n%matplotlib inline\n\nplt.figure(figsize = (12, 6))\nfor i in range(10):\n x = np.arange(i * 10, i * 10 + 10)\n y_var1 = np.random.randint(1, 5, 10)\n y_var2 = np.random.randint(5, 8, 10)\n plt.plot(x, y_var1, color = 'k', label = 'variable1')\n plt.plot(x, y_var2, color = 'g', label = 'variable2')\n plt.legend()\n plt.ylim(0, 9)", "Como véis, en la gráfica anterior hay varios problemas pero como esta es una MicroEntrada solo nos vamos a centrar en el problema de las etiquetas repetidas en la leyenda.\n¿Cómo podríamos evitar el meter tantas veces una etiqueta repetida?\nMi problema es que el bucle es o podría ser 'infinito' y tengo que inicializar las etiquetas de alguna forma. Si miro en esta respuesta encontrada en Stackoverflow dice que en la documentación se indica que \"If label attribute is empty string or starts with “_”, those artists will be ignored.\" pero si busco aquí o en el enlace que indican en la respuesta en Stackoverflow no veo esa funcionalidad indicada en ningún sitio. Eso es porque aparecía en la versión 1.3.1 pero luego desapareció... Sin embargo podemos seguir usando esa funcionalidad aunque actualmente no esté documentada:", "plt.figure(figsize = (12, 6))\nfor i in range(10):\n x = np.arange(i * 10, i * 10 + 10)\n y_var1 = np.random.randint(1, 5, 10)\n y_var2 = np.random.randint(5, 8, 10)\n plt.plot(x, y_var1, color = 'k', label = 'variable1' if i == 0 else \"_esto_no_se_pintará\")\n plt.plot(x, y_var2, color = 'g', label = 'variable2' if i == 0 else \"_esto_tampoco\")\n plt.legend()\n plt.ylim(0, 9)", "Espero que a alguien le resulte útil." ]
[ "markdown", "code", "markdown", "code", "markdown" ]
AllenDowney/ThinkBayes2
examples/usb.ipynb
mit
[ "Flipping USB Connectors\nIf you like this article, you might also like the second edition of Think Bayes.\nClick here to run this article on Colab\nI am not the first person to observe that it sometimes takes several tries to plug in a USB connector (specifically the rectangular Type A connector, which is not reversible).\nThere are memes about it, there are cartoons about it, and on Quora alone,\npeople\nhave\nasked\nabout\nit more than once.\nBut I might be the first to use Bayesian decision analysis to figure out the optimal strategy for plugging in a USB connector. Specifically, I have worked out how long you should try on the first side before flipping, how long you should try on the second side before flipping again, how long you should try on the third side, and so on.\nOf course, my analysis is based on some modeling assumptions:\n\n\nInitially, the probability is 0.5 that the connector is in the right orientation.\n\n\nIf it is, the time it takes to succeed follows an exponential distribution with a mean of 1.1 seconds.\n\n\nFlipping the connector takes 0.1 seconds.\n\n\nWith that, we are ready to get started.\nContinuous Updates\nThe first step is to figure out the probability that the connector is in the right orientation as a function of how long you have been trying.\nFor that, we can use a Bayes table, which is a form of Bayes's Theorem I use in Chapter 2 of Think Bayes.\nThe following function takes a sequence of hypotheses, prior probabilities, and likelihoods, and returns a pandas DataFrame that represents a Bayes table.", "import pandas as pd\n\ndef bayes_table(hypos, prior, likelihood):\n \"\"\"Make a table showing a Bayesian update.\"\"\"\n table = pd.DataFrame(dict(prior=prior, likelihood=likelihood), index=hypos)\n table['unnorm'] = table['prior'] * table['likelihood']\n prob_data = table['unnorm'].sum()\n table['posterior'] = table['unnorm'] / prob_data\n return table", "Now suppose that the prior probability is 0.5 that the orientation of the connector is correct, and you have been trying for 0.9 seconds.\nWhat is the likelihood that you would have to try so long?\n\n\nIf you are on the wrong side, it is 100%.\n\n\nIf you are on the right side, it's given by the survival function (complementary CDF) of the exponential distribution, which is $\\exp(-\\lambda t)$, where $\\lambda$ is the rate parameter and $t$ is time.\n\n\nThe following function computes this likelihood:", "import numpy as np\n\ndef expo_sf(t, lam):\n \"\"\"Survival function of the exponential distribution.\"\"\"\n return np.exp(-lam * t)", "We can use this function to compute the likelihood of trying for 0.9 seconds or more, given an exponential distribution with mean 1.1.", "t = 0.9\nmu = 1.1\nlam = 1/mu\n\nexpo_sf(t, lam)", "The result is the likelihood of the data, given that the orientation of the connector is correct.\nNow let's make a Bayes table with two hypotheses -- the connector is either the right way or the wrong way -- with equal prior probabilities.", "hypos = ['Right way', 'Wrong way']\nprior = [1/2, 1/2]", "And here is the likelihood of the data for each hypothesis:", "likelihood = [expo_sf(t, lam), 1]", "Putting it together, here's the Bayes table.", "bayes_table(hypos, prior, likelihood)", "After 0.9 seconds, the probability is about 69% that the orientation of the connector is wrong, so you might want to think about trying the other side.\nBut if it takes 0.1 seconds to flip, maybe you should keep trying a little longer. To figure out when to flip, let's do the same analysis again for general values of $\\lambda$ and $t$.\nGeneralization\nTo minimize human error, I'll use Sympy to do the algebra. Here are the symbols I'll use.", "from sympy import symbols, exp\n\nt, lam, p, q, r = symbols('t lam p q r')", "Here's the likelihood again, using the symbols.", "likelihood = [exp(-lam * t), 1]\nlikelihood", "And here's the Bayes table, using $p$ and $q$ for the prior probabilities of the hypotheses.", "prior = [p, q]\ntable = bayes_table(hypos, prior, likelihood)\ntable", "From the table I'll select the posterior probability that the orientation is correct.", "expr = table.loc['Right way', 'posterior']\nexpr.simplify()", "You might recognize this as a form of the logistic function; we can compute it like this:", "def logistic(p, lam, t):\n q = 1-p\n return p / (p + q * np.exp(lam * t))", "Let's see what that looks like for a range of values of t, assuming that the prior probability is p=0.5.", "import matplotlib.pyplot as plt\n\nts = np.linspace(0, 4)\nps = logistic(p=0.5, lam=1/mu, t=ts)\n\nplt.plot(ts, ps)\nplt.xlabel(\"How long you've been trying (seconds)\")\nplt.ylabel(\"Probability the orientation is right\");", "After a few seconds of fiddling, you should be reasonably convinced that the orientation is wrong.\nStrategy\nNow, let's think about turning belief into action. Let me start with a conjecture: I suspect that the best strategy is to try on the first side until the probability of correct orientation drops below some threshold (to be determined), then try on the second side until the probability drops below that threshold again, and repeat until success.\nTo test this strategy, we will have to figure out how long to try as a function of the prior probability, p, and the threshold probability, r. Again, I'll make Sympy do the work.\nHere's the equation that sets the posterior probability, which we computed in the previous section, to r.", "from sympy import Eq, solve\n\neqn = Eq(expr, r)\neqn", "And here's the solution for t in terms of p, q, r, and lam.", "solve(eqn, t)[0]", "And here's how we can express this solution in terms of the prior and posterior odds.", "def wait_time(p, lam, r):\n q = 1-p\n prior_odds = p / q\n posterior_odds = r / (1-r)\n return np.log(prior_odds / posterior_odds) / lam", "Let's see what that looks like for a range of values of r, assuming that the prior probability is p=0.5.", "rs = np.linspace(0.05, 0.5)\nts = wait_time(p=0.5, lam=1/mu, r=rs)\n\nplt.plot(rs, ts, color='C2')\nplt.xlabel(\"Probability the orientation is right\")\nplt.ylabel(\"How long to keep trying (seconds)\");", "When the threshold is low, we have to wait a few seconds to reach it. As the threshold increases, the time to reach it decreases.\nWe'll use this function in the next section to simulate the strategy.\nSimulation\nAs a step toward optimization, let's run a simulation. The following function takes as parameters:\n\ncorrect: A Boolean indicating if the orientation is correct.\np: The prior probability that the orientation is correct.\nlam: The rate parameter for the distribution of time until success.\nr: The threshold for the posterior probability.\nflip: The time it takes to flip the connector, in seconds.\ntrace: A list that indicates how much time we have spent, so far, trying and flipping.\n\nIt runs the simulation and returns a sequence of waiting and flipping times. The sum of this sequence is the total time it took to connect. And we can use the length of the sequence to figure out how many times we had to flip.", "def simulate(correct, p, lam, r, flip, trace):\n # figure out the maximum time we should try before flipping\n wait = wait_time(p, lam, r)\n \n # if we're on the correct side, see if we succeed before time's up\n if correct:\n t = np.random.exponential(1/lam)\n if t < wait:\n # if so, update and return the trace\n return trace + [t]\n \n # if time expired, add the wait time and flip time to the trace \n # and make a recursive call to continue the simulation\n return simulate(not correct, 1-r, lam, r, flip, trace + [wait, flip])", "Here's a test run, starting on the correct side.", "simulate(correct=True, p=0.5, lam=1/mu, r=0.2, flip=0.1, trace=[])", "And here's a run where we start on the wrong side.", "simulate(correct=False, p=0.5, lam=1/mu, r=0.2, flip=0.1, trace=[])", "The following function runs the simulation many times with initial probability p=0.5, starting in the right orientation half the time.\nIt returns two arrays, containing the length of the trace and the total duration for each simulation.", "def run_simulations(lam, r, flip, iters=20000, flag=None):\n res = []\n for i in range(iters):\n correct = i%2 if flag is None else flag\n trace = simulate(correct, 0.5, lam, r, flip, [])\n res.append((len(trace), sum(trace)))\n \n return np.transpose(res)", "Here's the average total duration with threshold probability r=0.25.", "lengths, totals = run_simulations(lam=1/mu, r=0.25, flip=0.1)\ntotals.mean()", "With this threshold, it takes about 2 seconds to connect, on average.\nOptimization\nNow let's see how the average duration varies as we sweep through a range of values for the threshold probability, r:", "rs = np.linspace(0.15, 0.4, 21)\nrs\n\nnp.random.seed(17)\n\nres = []\nfor r in rs:\n lengths, totals = run_simulations(lam=1/mu, r=r, flip=0.1)\n res.append((r, totals.mean()))\n\nfrom statsmodels.nonparametric.smoothers_lowess import lowess\n\ndef make_lowess(series):\n \"\"\"Use LOWESS to compute a smooth line.\n\n series: pd.Series\n\n returns: pd.Series\n \"\"\"\n endog = series.values\n exog = series.index.values\n\n smooth = lowess(endog, exog)\n index, data = np.transpose(smooth)\n\n return pd.Series(data, index=index)\n\ndef plot_series_lowess(series, color):\n \"\"\"Plots a series of data points and a smooth line.\n\n series: pd.Series\n color: string or tuple\n \"\"\"\n series.plot(lw=0, marker='o', color=color, alpha=0.5)\n smooth = make_lowess(series)\n smooth.plot(label='_', color=color)", "Here's what the results look like.", "rs, ts = np.transpose(res)\nseries = pd.Series(ts, rs)\n\nplot_series_lowess(series, 'C1')\n\nplt.xlabel(\"Threshold probability where you flip (r)\")\nplt.ylabel(\"Average total duration (seconds)\");", "The optimal value of r is close to 0.3. With that threshold we can see how long we should try on the first side, starting with prior probability p=0.5.", "r_opt = 0.3\nwait_time(p=0.5, lam=1/mu, r=r_opt)", "With the given values of lam and flip, it turns out the optimal time to wait is about 0.9 seconds.\nIf we have to flip, the prior probability for the second side is p=1-r, so we have to wait twice as long for the posterior probability to get down to r.", "wait_time(p=1-r_opt, lam=1/mu, r=r_opt)", "How many flips?\nNow let's run the simulations with the optimal value of r and see what the distributions look like for the total time and the number of flips.", "lengths1, totals1 = run_simulations(lam=1/mu, r=r_opt, flip=0.1, flag=True)\nlengths2, totals2 = run_simulations(lam=1/mu, r=r_opt, flip=0.1, flag=False)", "Here's the distribution of total time, represented as a CDF.", "try:\n import empiricaldist\nexcept ImportError:\n !pip install empiricaldist\n\nfrom empiricaldist import Cdf\n\nCdf.from_seq(totals1).plot(lw=2, label='Right the first time')\nCdf.from_seq(totals2).plot(lw=2, label='Wrong the first time')\n\nplt.xlabel('Total time to connect (seconds)')\nplt.ylabel('CDF')\nplt.title('Distribution of total time to connect')\nplt.legend();\n\ntotals1.mean(), totals2.mean()\n\nnp.append(totals1, totals2).mean()", "The average is about 2.4 seconds, but occasionally it takes much longer!\nAnd here's the distribution for the total number of flips.", "from empiricaldist import Pmf\n\nflips1 = (lengths1-1) // 2\npmf1 = Pmf.from_seq(flips1) / 2\npmf1.bar(alpha=0.7, label='Right the first time')\n\nflips2 = (lengths2-1) // 2\npmf2 = Pmf.from_seq(flips2) / 2\npmf2.bar(alpha=0.7, label='Right the second time')\n\nplt.xlabel('How many times you have to flip')\nplt.ylabel('PMF')\nplt.title('Distribution of number of flips')\nplt.legend();\n\nlengths = np.append(lengths1, lengths2)\nflips = (lengths-1) // 2\nPmf.from_seq(flips).head(5)", "The probability of getting it right on the first try is only about 28%. That might seem low, because the chance of starting in the right orientation is 50%, but remember that we have a substantial chance of flipping even if we start in the right orientation (and in that case, we have to flip at least twice).\nThe most common outcome is that we have to flip once, about 40% of the time. And the probability of the notorious double flip is about 18%. \nFortunately, it is rare to flip three or more times.\nSummary\nWith that, I think we have solved the USB connector problem.\n\n\nFor given parameters lam and flip, we can find the threshold probability, r, that minimizes the average time to connect.\n\n\nGiven this optimal value, we can estimate the distribution of total time and the number of times we have to flip.\n\n\nSadly, all of this fun is gradually being spoiled by the encroachment of the USB-C connector, which is reversible.\nIf you like this article, you might also like the second edition of Think Bayes.\nCopyright 2021 Allen Downey\nCode: MIT License\nText: Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
jinntrance/MOOC
coursera/ml-classification/assignments/module-10-online-learning-assignment-blank.ipynb
cc0-1.0
[ "Training Logistic Regression via Stochastic Gradient Ascent\nThe goal of this notebook is to implement a logistic regression classifier using stochastic gradient ascent. You will:\n\nExtract features from Amazon product reviews.\nConvert an SFrame into a NumPy array.\nWrite a function to compute the derivative of log likelihood function with respect to a single coefficient.\nImplement stochastic gradient ascent.\nCompare convergence of stochastic gradient ascent with that of batch gradient ascent.\n\nFire up GraphLab Create\nMake sure you have the latest version of GraphLab Create. Upgrade by\npip install graphlab-create --upgrade\nSee this page for detailed instructions on upgrading.", "from __future__ import division\nimport graphlab", "Load and process review dataset\nFor this assignment, we will use the same subset of the Amazon product review dataset that we used in Module 3 assignment. The subset was chosen to contain similar numbers of positive and negative reviews, as the original dataset consisted of mostly positive reviews.", "products = graphlab.SFrame('amazon_baby_subset.gl/')", "Just like we did previously, we will work with a hand-curated list of important words extracted from the review data. We will also perform 2 simple data transformations:\n\nRemove punctuation using Python's built-in string manipulation functionality.\nCompute word counts (only for the important_words)\n\nRefer to Module 3 assignment for more details.", "import json\nwith open('important_words.json', 'r') as f: \n important_words = json.load(f)\nimportant_words = [str(s) for s in important_words]\n\n# Remote punctuation\ndef remove_punctuation(text):\n import string\n return text.translate(None, string.punctuation) \n\nproducts['review_clean'] = products['review'].apply(remove_punctuation)\n\n# Split out the words into individual columns\nfor word in important_words:\n products[word] = products['review_clean'].apply(lambda s : s.split().count(word))", "The SFrame products now contains one column for each of the 193 important_words.", "products", "Split data into training and validation sets\nWe will now split the data into a 90-10 split where 90% is in the training set and 10% is in the validation set. We use seed=1 so that everyone gets the same result.", "train_data, validation_data = products.random_split(.9, seed=1)\n\nprint 'Training set : %d data points' % len(train_data)\nprint 'Validation set: %d data points' % len(validation_data)", "Convert SFrame to NumPy array\nJust like in the earlier assignments, we provide you with a function that extracts columns from an SFrame and converts them into a NumPy array. Two arrays are returned: one representing features and another representing class labels. \nNote: The feature matrix includes an additional column 'intercept' filled with 1's to take account of the intercept term.", "import numpy as np\n\ndef get_numpy_data(data_sframe, features, label):\n data_sframe['intercept'] = 1\n features = ['intercept'] + features\n features_sframe = data_sframe[features]\n feature_matrix = features_sframe.to_numpy()\n label_sarray = data_sframe[label]\n label_array = label_sarray.to_numpy()\n return(feature_matrix, label_array)", "Note that we convert both the training and validation sets into NumPy arrays.\nWarning: This may take a few minutes.", "feature_matrix_train, sentiment_train = get_numpy_data(train_data, important_words, 'sentiment')\nfeature_matrix_valid, sentiment_valid = get_numpy_data(validation_data, important_words, 'sentiment') ", "Are you running this notebook on an Amazon EC2 t2.micro instance? (If you are using your own machine, please skip this section)\nIt has been reported that t2.micro instances do not provide sufficient power to complete the conversion in acceptable amount of time. For interest of time, please refrain from running get_numpy_data function. Instead, download the binary file containing the four NumPy arrays you'll need for the assignment. To load the arrays, run the following commands:\narrays = np.load('module-10-assignment-numpy-arrays.npz')\nfeature_matrix_train, sentiment_train = arrays['feature_matrix_train'], arrays['sentiment_train']\nfeature_matrix_valid, sentiment_valid = arrays['feature_matrix_valid'], arrays['sentiment_valid']\n Quiz question: In Module 3 assignment, there were 194 features (an intercept + one feature for each of the 193 important words). In this assignment, we will use stochastic gradient ascent to train the classifier using logistic regression. How does the changing the solver to stochastic gradient ascent affect the number of features?\nBuilding on logistic regression\nLet us now build on Module 3 assignment. Recall from lecture that the link function for logistic regression can be defined as:\n$$\nP(y_i = +1 | \\mathbf{x}_i,\\mathbf{w}) = \\frac{1}{1 + \\exp(-\\mathbf{w}^T h(\\mathbf{x}_i))},\n$$\nwhere the feature vector $h(\\mathbf{x}_i)$ is given by the word counts of important_words in the review $\\mathbf{x}_i$. \nWe will use the same code as in Module 3 assignment to make probability predictions, since this part is not affected by using stochastic gradient ascent as a solver. Only the way in which the coefficients are learned is affected by using stochastic gradient ascent as a solver.", "'''\nproduces probablistic estimate for P(y_i = +1 | x_i, w).\nestimate ranges between 0 and 1.\n'''\ndef predict_probability(feature_matrix, coefficients):\n # Take dot product of feature_matrix and coefficients \n score = np.dot(feature_matrix, coefficients)\n \n # Compute P(y_i = +1 | x_i, w) using the link function\n predictions = 1. / (1.+np.exp(-score)) \n return predictions", "Derivative of log likelihood with respect to a single coefficient\nLet us now work on making minor changes to how the derivative computation is performed for logistic regression.\nRecall from the lectures and Module 3 assignment that for logistic regression, the derivative of log likelihood with respect to a single coefficient is as follows:\n$$\n\\frac{\\partial\\ell}{\\partial w_j} = \\sum_{i=1}^N h_j(\\mathbf{x}_i)\\left(\\mathbf{1}[y_i = +1] - P(y_i = +1 | \\mathbf{x}_i, \\mathbf{w})\\right)\n$$\nIn Module 3 assignment, we wrote a function to compute the derivative of log likelihood with respect to a single coefficient $w_j$. The function accepts the following two parameters:\n * errors vector containing $(\\mathbf{1}[y_i = +1] - P(y_i = +1 | \\mathbf{x}_i, \\mathbf{w}))$ for all $i$\n * feature vector containing $h_j(\\mathbf{x}_i)$ for all $i$\nComplete the following code block:", "def feature_derivative(errors, feature): \n \n # Compute the dot product of errors and feature\n ## YOUR CODE HERE\n derivative = np.dot(errors, feature)\n\n return derivative", "Note. We are not using regularization in this assignment, but, as discussed in the optional video, stochastic gradient can also be used for regularized logistic regression.\nTo verify the correctness of the gradient computation, we provide a function for computing average log likelihood (which we recall from the last assignment was a topic detailed in an advanced optional video, and used here for its numerical stability).\nTo track the performance of stochastic gradient ascent, we provide a function for computing average log likelihood. \n$$\\ell\\ell_A(\\mathbf{w}) = \\color{red}{\\frac{1}{N}} \\sum_{i=1}^N \\Big( (\\mathbf{1}[y_i = +1] - 1)\\mathbf{w}^T h(\\mathbf{x}_i) - \\ln\\left(1 + \\exp(-\\mathbf{w}^T h(\\mathbf{x}_i))\\right) \\Big) $$\nNote that we made one tiny modification to the log likelihood function (called compute_log_likelihood) in our earlier assignments. We added a $\\color{red}{1/N}$ term which averages the log likelihood accross all data points. The $\\color{red}{1/N}$ term makes it easier for us to compare stochastic gradient ascent with batch gradient ascent. We will use this function to generate plots that are similar to those you saw in the lecture.", "def compute_avg_log_likelihood(feature_matrix, sentiment, coefficients):\n \n indicator = (sentiment==+1)\n scores = np.dot(feature_matrix, coefficients)\n logexp = np.log(1. + np.exp(-scores))\n \n # Simple check to prevent overflow\n mask = np.isinf(logexp)\n logexp[mask] = -scores[mask]\n \n lp = np.sum((indicator-1)*scores - logexp)/len(feature_matrix)\n \n return lp", "Quiz Question: Recall from the lecture and the earlier assignment, the log likelihood (without the averaging term) is given by \n$$\\ell\\ell(\\mathbf{w}) = \\sum_{i=1}^N \\Big( (\\mathbf{1}[y_i = +1] - 1)\\mathbf{w}^T h(\\mathbf{x}_i) - \\ln\\left(1 + \\exp(-\\mathbf{w}^T h(\\mathbf{x}_i))\\right) \\Big) $$\nHow are the functions $\\ell\\ell(\\mathbf{w})$ and $\\ell\\ell_A(\\mathbf{w})$ related?\nModifying the derivative for stochastic gradient ascent\nRecall from the lecture that the gradient for a single data point $\\color{red}{\\mathbf{x}_i}$ can be computed using the following formula:\n$$\n\\frac{\\partial\\ell_{\\color{red}{i}}(\\mathbf{w})}{\\partial w_j} = h_j(\\color{red}{\\mathbf{x}i})\\left(\\mathbf{1}[y\\color{red}{i} = +1] - P(y_\\color{red}{i} = +1 | \\color{red}{\\mathbf{x}_i}, \\mathbf{w})\\right)\n$$\n Computing the gradient for a single data point\nDo we really need to re-write all our code to modify $\\partial\\ell(\\mathbf{w})/\\partial w_j$ to $\\partial\\ell_{\\color{red}{i}}(\\mathbf{w})/{\\partial w_j}$? \nThankfully No!. Using NumPy, we access $\\mathbf{x}i$ in the training data using feature_matrix_train[i:i+1,:]\nand $y_i$ in the training data using sentiment_train[i:i+1]. We can compute $\\partial\\ell{\\color{red}{i}}(\\mathbf{w})/\\partial w_j$ by re-using all the code written in feature_derivative and predict_probability.\nWe compute $\\partial\\ell_{\\color{red}{i}}(\\mathbf{w})/\\partial w_j$ using the following steps:\n* First, compute $P(y_i = +1 | \\mathbf{x}_i, \\mathbf{w})$ using the predict_probability function with feature_matrix_train[i:i+1,:] as the first parameter.\n* Next, compute $\\mathbf{1}[y_i = +1]$ using sentiment_train[i:i+1].\n* Finally, call the feature_derivative function with feature_matrix_train[i:i+1, j] as one of the parameters. \nLet us follow these steps for j = 1 and i = 10:", "j = 1 # Feature number\ni = 10 # Data point number\ncoefficients = np.zeros(194) # A point w at which we are computing the gradient.\n\npredictions = predict_probability(feature_matrix_train[i:i+1,:], coefficients)\nindicator = (sentiment_train[i:i+1]==+1)\n\nerrors = indicator - predictions \ngradient_single_data_point = feature_derivative(errors, feature_matrix_train[i:i+1,j])\nprint \"Gradient single data point: %s\" % gradient_single_data_point\nprint \" --> Should print 0.0\"", "Quiz Question: The code block above computed $\\partial\\ell_{\\color{red}{i}}(\\mathbf{w})/{\\partial w_j}$ for j = 1 and i = 10. Is $\\partial\\ell_{\\color{red}{i}}(\\mathbf{w})/{\\partial w_j}$ a scalar or a 194-dimensional vector?\nModifying the derivative for using a batch of data points\nStochastic gradient estimates the ascent direction using 1 data point, while gradient uses $N$ data points to decide how to update the the parameters. In an optional video, we discussed the details of a simple change that allows us to use a mini-batch of $B \\leq N$ data points to estimate the ascent direction. This simple approach is faster than regular gradient but less noisy than stochastic gradient that uses only 1 data point. Although we encorage you to watch the optional video on the topic to better understand why mini-batches help stochastic gradient, in this assignment, we will simply use this technique, since the approach is very simple and will improve your results.\nGiven a mini-batch (or a set of data points) $\\mathbf{x}{i}, \\mathbf{x}{i+1} \\ldots \\mathbf{x}{i+B}$, the gradient function for this mini-batch of data points is given by:\n$$\n\\color{red}{\\sum{s = i}^{i+B}} \\frac{\\partial\\ell_{s}}{\\partial w_j} = \\color{red}{\\sum_{s = i}^{i + B}} h_j(\\mathbf{x}_s)\\left(\\mathbf{1}[y_s = +1] - P(y_s = +1 | \\mathbf{x}_s, \\mathbf{w})\\right)\n$$\n Computing the gradient for a \"mini-batch\" of data points\nUsing NumPy, we access the points $\\mathbf{x}i, \\mathbf{x}{i+1} \\ldots \\mathbf{x}_{i+B}$ in the training data using feature_matrix_train[i:i+B,:]\nand $y_i$ in the training data using sentiment_train[i:i+B]. \nWe can compute $\\color{red}{\\sum_{s = i}^{i+B}} \\partial\\ell_{s}/\\partial w_j$ easily as follows:", "j = 1 # Feature number\ni = 10 # Data point start\nB = 10 # Mini-batch size\ncoefficients = np.zeros(194) # A point w at which we are computing the gradient.\n\npredictions = predict_probability(feature_matrix_train[i:i+B,:], coefficients)\nindicator = (sentiment_train[i:i+B]==+1)\n\nerrors = indicator - predictions \ngradient_mini_batch = feature_derivative(errors, feature_matrix_train[i:i+B,j])\nprint \"Gradient mini-batch data points: %s\" % gradient_mini_batch\nprint \" --> Should print 1.0\"", "Quiz Question: The code block above computed \n$\\color{red}{\\sum_{s = i}^{i+B}}\\partial\\ell_{s}(\\mathbf{w})/{\\partial w_j}$ \nfor j = 10, i = 10, and B = 10. Is this a scalar or a 194-dimensional vector?\n Quiz Question: For what value of B is the term\n$\\color{red}{\\sum_{s = 1}^{B}}\\partial\\ell_{s}(\\mathbf{w})/\\partial w_j$\nthe same as the full gradient\n$\\partial\\ell(\\mathbf{w})/{\\partial w_j}$?\nAveraging the gradient across a batch\nIt is a common practice to normalize the gradient update rule by the batch size B:\n$$\n\\frac{\\partial\\ell_{\\color{red}{A}}(\\mathbf{w})}{\\partial w_j} \\approx \\color{red}{\\frac{1}{B}} {\\sum_{s = i}^{i + B}} h_j(\\mathbf{x}_s)\\left(\\mathbf{1}[y_s = +1] - P(y_s = +1 | \\mathbf{x}_s, \\mathbf{w})\\right)\n$$\nIn other words, we update the coefficients using the average gradient over data points (instead of using a summation). By using the average gradient, we ensure that the magnitude of the gradient is approximately the same for all batch sizes. This way, we can more easily compare various batch sizes of stochastic gradient ascent (including a batch size of all the data points), and study the effect of batch size on the algorithm as well as the choice of step size.\nImplementing stochastic gradient ascent\nNow we are ready to implement our own logistic regression with stochastic gradient ascent. Complete the following function to fit a logistic regression model using gradient ascent:", "from math import sqrt\ndef logistic_regression_SG(feature_matrix, sentiment, initial_coefficients, step_size, batch_size, max_iter):\n log_likelihood_all = []\n \n # make sure it's a numpy array\n coefficients = np.array(initial_coefficients)\n # set seed=1 to produce consistent results\n np.random.seed(seed=1)\n # Shuffle the data before starting\n permutation = np.random.permutation(len(feature_matrix))\n feature_matrix = feature_matrix[permutation,:]\n sentiment = sentiment[permutation]\n \n i = 0 # index of current batch\n # Do a linear scan over data\n\n for itr in xrange(max_iter):\n # Predict P(y_i = +1|x_i,w) using your predict_probability() function\n # Make sure to slice the i-th row of feature_matrix with [i:i+batch_size,:]\n ### YOUR CODE HERE\n \n predictions = predict_probability(feature_matrix[i:i+batch_size, :], coefficients)\n \n if len(predictions) <= 0:\n break;\n \n # Compute indicator value for (y_i = +1)\n # Make sure to slice the i-th entry with [i:i+batch_size]\n ### YOUR CODE HERE\n indicator = (sentiment[i:i+batch_size] == +1)\n \n # Compute the errors as indicator - predictions\n errors = indicator - predictions\n\n for j in xrange(len(coefficients)): # loop over each coefficient\n # Recall that feature_matrix[:,j] is the feature column associated with coefficients[j]\n # Compute the derivative for coefficients[j] and save it to derivative.\n # Make sure to slice the i-th row of feature_matrix with [i:i+batch_size,j]\n ### YOUR CODE HERE\n derivative = feature_derivative(errors, feature_matrix[i:i+batch_size, j])\n \n # compute the product of the step size, the derivative, and the **normalization constant** (1./batch_size)\n ### YOUR CODE HERE\n coefficients[j] += step_size*derivative * 1.0 / batch_size\n \n # Checking whether log likelihood is increasing\n # Print the log likelihood over the *current batch*\n lp = compute_avg_log_likelihood(feature_matrix[i:i+batch_size,:], sentiment[i:i+batch_size],\n coefficients)\n log_likelihood_all.append(lp)\n if itr <= 15 or (itr <= 1000 and itr % 100 == 0) or (itr <= 10000 and itr % 1000 == 0) \\\n or itr % 10000 == 0 or itr == max_iter-1:\n data_size = len(feature_matrix)\n print 'Iteration %*d: Average log likelihood (of data points in batch [%0*d:%0*d]) = %.8f' % \\\n (int(np.ceil(np.log10(max_iter))), itr, \\\n int(np.ceil(np.log10(data_size))), i, \\\n int(np.ceil(np.log10(data_size))), i+batch_size, lp)\n \n # if we made a complete pass over data, shuffle and restart\n i += batch_size\n if i+batch_size > len(feature_matrix):\n permutation = np.random.permutation(len(feature_matrix))\n feature_matrix = feature_matrix[permutation,:]\n sentiment = sentiment[permutation]\n i = 0\n \n # We return the list of log likelihoods for plotting purposes.\n return coefficients, log_likelihood_all", "Note. In practice, the final set of coefficients is rarely used; it is better to use the average of the last K sets of coefficients instead, where K should be adjusted depending on how fast the log likelihood oscillates around the optimum.\nCheckpoint\nThe following cell tests your stochastic gradient ascent function using a toy dataset consisting of two data points. If the test does not pass, make sure you are normalizing the gradient update rule correctly.", "sample_feature_matrix = np.array([[1.,2.,-1.], [1.,0.,1.]])\nsample_sentiment = np.array([+1, -1])\n\ncoefficients, log_likelihood = logistic_regression_SG(sample_feature_matrix, sample_sentiment, np.zeros(3),\n step_size=1., batch_size=2, max_iter=2)\nprint '-------------------------------------------------------------------------------------'\nprint 'Coefficients learned :', coefficients\nprint 'Average log likelihood per-iteration :', log_likelihood\nif np.allclose(coefficients, np.array([-0.09755757, 0.68242552, -0.7799831]), atol=1e-3)\\\n and np.allclose(log_likelihood, np.array([-0.33774513108142956, -0.2345530939410341])):\n # pass if elements match within 1e-3\n print '-------------------------------------------------------------------------------------'\n print 'Test passed!'\nelse:\n print '-------------------------------------------------------------------------------------'\n print 'Test failed'", "Compare convergence behavior of stochastic gradient ascent\nFor the remainder of the assignment, we will compare stochastic gradient ascent against batch gradient ascent. For this, we need a reference implementation of batch gradient ascent. But do we need to implement this from scratch?\nQuiz Question: For what value of batch size B above is the stochastic gradient ascent function logistic_regression_SG act as a standard gradient ascent algorithm?\nRunning gradient ascent using the stochastic gradient ascent implementation\nInstead of implementing batch gradient ascent separately, we save time by re-using the stochastic gradient ascent function we just wrote &mdash; to perform gradient ascent, it suffices to set batch_size to the number of data points in the training data. Yes, we did answer above the quiz question for you, but that is an important point to remember in the future :)\nSmall Caveat. The batch gradient ascent implementation here is slightly different than the one in the earlier assignments, as we now normalize the gradient update rule.\nWe now run stochastic gradient ascent over the feature_matrix_train for 10 iterations using:\n* initial_coefficients = np.zeros(194)\n* step_size = 5e-1\n* batch_size = 1\n* max_iter = 10", "coefficients, log_likelihood = logistic_regression_SG(feature_matrix_train, sentiment_train,\n initial_coefficients=np.zeros(194),\n step_size=5e-1, batch_size=1, max_iter=10)", "Quiz Question. When you set batch_size = 1, as each iteration passes, how does the average log likelihood in the batch change?\n* Increases\n* Decreases\n* Fluctuates \nNow run batch gradient ascent over the feature_matrix_train for 200 iterations using:\n* initial_coefficients = np.zeros(194)\n* step_size = 5e-1\n* batch_size = len(feature_matrix_train)\n* max_iter = 200", "# YOUR CODE HERE\ncoefficients_batch, log_likelihood_batch = logistic_regression_SG(feature_matrix_train, sentiment_train,\n initial_coefficients=np.zeros(194),\n step_size=5e-1, batch_size=len(feature_matrix_train), max_iter=200)", "Quiz Question. When you set batch_size = len(train_data), as each iteration passes, how does the average log likelihood in the batch change?\n* Increases \n* Decreases\n* Fluctuates \nMake \"passes\" over the dataset\nTo make a fair comparison betweeen stochastic gradient ascent and batch gradient ascent, we measure the average log likelihood as a function of the number of passes (defined as follows):\n$$\n[\\text{# of passes}] = \\frac{[\\text{# of data points touched so far}]}{[\\text{size of dataset}]}\n$$\nQuiz Question Suppose that we run stochastic gradient ascent with a batch size of 100. How many gradient updates are performed at the end of two passes over a dataset consisting of 50000 data points?", "2*50000/100", "Log likelihood plots for stochastic gradient ascent\nWith the terminology in mind, let us run stochastic gradient ascent for 10 passes. We will use\n* step_size=1e-1\n* batch_size=100\n* initial_coefficients to all zeros.", "step_size = 1e-1\nbatch_size = 100\nnum_passes = 10\nnum_iterations = num_passes * int(len(feature_matrix_train)/batch_size)\n\ncoefficients_sgd, log_likelihood_sgd = logistic_regression_SG(feature_matrix_train, sentiment_train,\n initial_coefficients=np.zeros(194),\n step_size=1e-1, batch_size=100, max_iter=num_iterations)", "We provide you with a utility function to plot the average log likelihood as a function of the number of passes.", "import matplotlib.pyplot as plt\n%matplotlib inline\n\ndef make_plot(log_likelihood_all, len_data, batch_size, smoothing_window=1, label=''):\n plt.rcParams.update({'figure.figsize': (9,5)})\n log_likelihood_all_ma = np.convolve(np.array(log_likelihood_all), \\\n np.ones((smoothing_window,))/smoothing_window, mode='valid')\n plt.plot(np.array(range(smoothing_window-1, len(log_likelihood_all)))*float(batch_size)/len_data,\n log_likelihood_all_ma, linewidth=4.0, label=label)\n plt.rcParams.update({'font.size': 16})\n plt.tight_layout()\n plt.xlabel('# of passes over data')\n plt.ylabel('Average log likelihood per data point')\n plt.legend(loc='lower right', prop={'size':14})\n\nmake_plot(log_likelihood_sgd, len_data=len(feature_matrix_train), batch_size=100,\n label='stochastic gradient, step_size=1e-1')", "Smoothing the stochastic gradient ascent curve\nThe plotted line oscillates so much that it is hard to see whether the log likelihood is improving. In our plot, we apply a simple smoothing operation using the parameter smoothing_window. The smoothing is simply a moving average of log likelihood over the last smoothing_window \"iterations\" of stochastic gradient ascent.", "make_plot(log_likelihood_sgd, len_data=len(feature_matrix_train), batch_size=100,\n smoothing_window=30, label='stochastic gradient, step_size=1e-1')", "Checkpoint: The above plot should look smoother than the previous plot. Play around with smoothing_window. As you increase it, you should see a smoother plot.\nStochastic gradient ascent vs batch gradient ascent\nTo compare convergence rates for stochastic gradient ascent with batch gradient ascent, we call make_plot() multiple times in the same cell.\nWe are comparing:\n* stochastic gradient ascent: step_size = 0.1, batch_size=100\n* batch gradient ascent: step_size = 0.5, batch_size=len(feature_matrix_train)\nWrite code to run stochastic gradient ascent for 200 passes using:\n* step_size=1e-1\n* batch_size=100\n* initial_coefficients to all zeros.", "step_size = 1e-1\nbatch_size = 100\nnum_passes = 200\nnum_iterations = num_passes * int(len(feature_matrix_train)/batch_size)\n\n## YOUR CODE HERE\ncoefficients_sgd, log_likelihood_sgd = logistic_regression_SG(feature_matrix_train, sentiment_train,\n initial_coefficients=np.zeros(194),\n step_size=step_size, batch_size=batch_size, max_iter=num_iterations)", "We compare the convergence of stochastic gradient ascent and batch gradient ascent in the following cell. Note that we apply smoothing with smoothing_window=30.", "make_plot(log_likelihood_sgd, len_data=len(feature_matrix_train), batch_size=100,\n smoothing_window=30, label='stochastic, step_size=1e-1')\nmake_plot(log_likelihood_batch, len_data=len(feature_matrix_train), batch_size=len(feature_matrix_train),\n smoothing_window=1, label='batch, step_size=5e-1')", "Quiz Question: In the figure above, how many passes does batch gradient ascent need to achieve a similar log likelihood as stochastic gradient ascent? \n\nIt's always better\n10 passes\n20 passes\n150 passes or more\n\nExplore the effects of step sizes on stochastic gradient ascent\nIn previous sections, we chose step sizes for you. In practice, it helps to know how to choose good step sizes yourself.\nTo start, we explore a wide range of step sizes that are equally spaced in the log space. Run stochastic gradient ascent with step_size set to 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, and 1e2. Use the following set of parameters:\n* initial_coefficients=np.zeros(194)\n* batch_size=100\n* max_iter initialized so as to run 10 passes over the data.", "batch_size = 100\nnum_passes = 10\nnum_iterations = num_passes * int(len(feature_matrix_train)/batch_size)\n\ncoefficients_sgd = {}\nlog_likelihood_sgd = {}\nfor step_size in np.logspace(-4, 2, num=7):\n coefficients_sgd[step_size], log_likelihood_sgd[step_size] = logistic_regression_SG(feature_matrix_train, sentiment_train,\n initial_coefficients=np.zeros(194),\n step_size=step_size, batch_size=batch_size, max_iter=num_iterations)", "Plotting the log likelihood as a function of passes for each step size\nNow, we will plot the change in log likelihood using the make_plot for each of the following values of step_size:\n\nstep_size = 1e-4\nstep_size = 1e-3\nstep_size = 1e-2\nstep_size = 1e-1\nstep_size = 1e0\nstep_size = 1e1\nstep_size = 1e2\n\nFor consistency, we again apply smoothing_window=30.", "for step_size in np.logspace(-4, 2, num=7):\n make_plot(log_likelihood_sgd[step_size], len_data=len(train_data), batch_size=100,\n smoothing_window=30, label='step_size=%.1e'%step_size)", "Now, let us remove the step size step_size = 1e2 and plot the rest of the curves.", "for step_size in np.logspace(-4, 2, num=7)[0:6]:\n make_plot(log_likelihood_sgd[step_size], len_data=len(train_data), batch_size=100,\n smoothing_window=30, label='step_size=%.1e'%step_size)", "Quiz Question: Which of the following is the worst step size? Pick the step size that results in the lowest log likelihood in the end.\n1. 1e-2\n2. 1e-1\n3. 1e0\n4. 1e1\n5. 1e2\nQuiz Question: Which of the following is the best step size? Pick the step size that results in the highest log likelihood in the end.\n1. 1e-4\n2. 1e-2\n3. 1e0\n4. 1e1\n5. 1e2" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
hanezu/cs231n-assignment
assignment2/BatchNormalization.ipynb
mit
[ "Batch Normalization\nOne way to make deep networks easier to train is to use more sophisticated optimization procedures such as SGD+momentum, RMSProp, or Adam. Another strategy is to change the architecture of the network to make it easier to train. One idea along these lines is batch normalization which was recently proposed by [3].\nThe idea is relatively straightforward. Machine learning methods tend to work better when their input data consists of uncorrelated features with zero mean and unit variance. When training a neural network, we can preprocess the data before feeding it to the network to explicitly decorrelate its features; this will ensure that the first layer of the network sees data that follows a nice distribution. However even if we preprocess the input data, the activations at deeper layers of the network will likely no longer be decorrelated and will no longer have zero mean or unit variance since they are output from earlier layers in the network. Even worse, during the training process the distribution of features at each layer of the network will shift as the weights of each layer are updated.\nThe authors of [3] hypothesize that the shifting distribution of features inside deep neural networks may make training deep networks more difficult. To overcome this problem, [3] proposes to insert batch normalization layers into the network. At training time, a batch normalization layer uses a minibatch of data to estimate the mean and standard deviation of each feature. These estimated means and standard deviations are then used to center and normalize the features of the minibatch. A running average of these means and standard deviations is kept during training, and at test time these running averages are used to center and normalize features.\nIt is possible that this normalization strategy could reduce the representational power of the network, since it may sometimes be optimal for certain layers to have features that are not zero-mean or unit variance. To this end, the batch normalization layer includes learnable shift and scale parameters for each feature dimension.\n[3] Sergey Ioffe and Christian Szegedy, \"Batch Normalization: Accelerating Deep Network Training by Reducing\nInternal Covariate Shift\", ICML 2015.", "# As usual, a bit of setup\n\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom cs231n.classifiers.fc_net import *\nfrom cs231n.data_utils import get_CIFAR10_data\nfrom cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array\nfrom cs231n.solver import Solver\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n# for auto-reloading external modules\n# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n%load_ext autoreload\n%autoreload 2\n\ndef rel_error(x, y):\n \"\"\" returns relative error \"\"\"\n return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))\n\n# Load the (preprocessed) CIFAR10 data.\n\ndata = get_CIFAR10_data()\nfor k, v in data.iteritems():\n print '%s: ' % k, v.shape", "Batch normalization: Forward\nIn the file cs231n/layers.py, implement the batch normalization forward pass in the function batchnorm_forward. Once you have done so, run the following to test your implementation.", "# Check the training-time forward pass by checking means and variances\n# of features both before and after batch normalization\n\n# Simulate the forward pass for a two-layer network\nN, D1, D2, D3 = 200, 50, 60, 3\nX = np.random.randn(N, D1)\nW1 = np.random.randn(D1, D2)\nW2 = np.random.randn(D2, D3)\na = np.maximum(0, X.dot(W1)).dot(W2)\n\nprint 'Before batch normalization:'\nprint ' means: ', a.mean(axis=0)\nprint ' stds: ', a.std(axis=0)\n\n# Means should be close to zero and stds close to one\nprint 'After batch normalization (gamma=1, beta=0)'\na_norm, _ = batchnorm_forward(a, np.ones(D3), np.zeros(D3), {'mode': 'train'})\nprint ' mean: ', a_norm.mean(axis=0)\nprint ' std: ', a_norm.std(axis=0)\n\n# Now means should be close to beta and stds close to gamma\ngamma = np.asarray([1.0, 2.0, 3.0])\nbeta = np.asarray([11.0, 12.0, 13.0])\na_norm, _ = batchnorm_forward(a, gamma, beta, {'mode': 'train'})\nprint 'After batch normalization (nontrivial gamma, beta)'\nprint ' means: ', a_norm.mean(axis=0)\nprint ' stds: ', a_norm.std(axis=0)\n\n# Check the test-time forward pass by running the training-time\n# forward pass many times to warm up the running averages, and then\n# checking the means and variances of activations after a test-time\n# forward pass.\n\nN, D1, D2, D3 = 200, 50, 60, 3\nW1 = np.random.randn(D1, D2)\nW2 = np.random.randn(D2, D3)\n\nbn_param = {'mode': 'train'}\ngamma = np.ones(D3)\nbeta = np.zeros(D3)\nfor t in xrange(50):\n X = np.random.randn(N, D1)\n a = np.maximum(0, X.dot(W1)).dot(W2)\n batchnorm_forward(a, gamma, beta, bn_param)\nbn_param['mode'] = 'test'\nX = np.random.randn(N, D1)\na = np.maximum(0, X.dot(W1)).dot(W2)\na_norm, _ = batchnorm_forward(a, gamma, beta, bn_param)\n\n# Means should be close to zero and stds close to one, but will be\n# noisier than training-time forward passes.\nprint 'After batch normalization (test-time):'\nprint ' means: ', a_norm.mean(axis=0)\nprint ' stds: ', a_norm.std(axis=0)", "Batch Normalization: backward\nNow implement the backward pass for batch normalization in the function batchnorm_backward.\nTo derive the backward pass you should write out the computation graph for batch normalization and backprop through each of the intermediate nodes. Some intermediates may have multiple outgoing branches; make sure to sum gradients across these branches in the backward pass.\nOnce you have finished, run the following to numerically check your backward pass.", "# Gradient check batchnorm backward pass\n\nN, D = 4, 5\nx = 5 * np.random.randn(N, D) + 12\ngamma = np.random.randn(D)\nbeta = np.random.randn(D)\ndout = np.random.randn(N, D)\n\nbn_param = {'mode': 'train'}\nfx = lambda x: batchnorm_forward(x, gamma, beta, bn_param)[0]\nfg = lambda a: batchnorm_forward(x, gamma, beta, bn_param)[0]\nfb = lambda b: batchnorm_forward(x, gamma, beta, bn_param)[0]\n\ndx_num = eval_numerical_gradient_array(fx, x, dout)\nda_num = eval_numerical_gradient_array(fg, gamma, dout)\ndb_num = eval_numerical_gradient_array(fb, beta, dout)\n\n_, cache = batchnorm_forward(x, gamma, beta, bn_param)\ndx, dgamma, dbeta = batchnorm_backward(dout, cache)\nprint 'dx error: ', rel_error(dx_num, dx)\nprint 'dgamma error: ', rel_error(da_num, dgamma)\nprint 'dbeta error: ', rel_error(db_num, dbeta)", "Batch Normalization: alternative backward\nIn class we talked about two different implementations for the sigmoid backward pass. One strategy is to write out a computation graph composed of simple operations and backprop through all intermediate values. Another strategy is to work out the derivatives on paper. For the sigmoid function, it turns out that you can derive a very simple formula for the backward pass by simplifying gradients on paper.\nSurprisingly, it turns out that you can also derive a simple expression for the batch normalization backward pass if you work out derivatives on paper and simplify. After doing so, implement the simplified batch normalization backward pass in the function batchnorm_backward_alt and compare the two implementations by running the following. Your two implementations should compute nearly identical results, but the alternative implementation should be a bit faster.\nNOTE: You can still complete the rest of the assignment if you don't figure this part out, so don't worry too much if you can't get it.", " N, D = 100, 500\nx = 5 * np.random.randn(N, D) + 12\ngamma = np.random.randn(D)\nbeta = np.random.randn(D)\ndout = np.random.randn(N, D)\n\nbn_param = {'mode': 'train'}\nout, cache = batchnorm_forward(x, gamma, beta, bn_param)\n\nt1 = time.time()\ndx1, dgamma1, dbeta1 = batchnorm_backward(dout, cache)\nt2 = time.time()\ndx2, dgamma2, dbeta2 = batchnorm_backward_alt(dout, cache)\nt3 = time.time()\n\nprint 'dx difference: ', rel_error(dx1, dx2)\nprint 'dgamma difference: ', rel_error(dgamma1, dgamma2)\nprint 'dbeta difference: ', rel_error(dbeta1, dbeta2)\nprint 'speedup: %.2fx' % ((t2 - t1) / (t3 - t2))", "Fully Connected Nets with Batch Normalization\nNow that you have a working implementation for batch normalization, go back to your FullyConnectedNet in the file cs2312n/classifiers/fc_net.py. Modify your implementation to add batch normalization.\nConcretely, when the flag use_batchnorm is True in the constructor, you should insert a batch normalization layer before each ReLU nonlinearity. The outputs from the last layer of the network should not be normalized. Once you are done, run the following to gradient-check your implementation.\nHINT: You might find it useful to define an additional helper layer similar to those in the file cs231n/layer_utils.py. If you decide to do so, do it in the file cs231n/classifiers/fc_net.py.", "N, D, H1, H2, C = 2, 15, 20, 30, 10\nX = np.random.randn(N, D)\ny = np.random.randint(C, size=(N,))\n\nfor reg in [0, 3.14]:\n print 'Running check with reg = ', reg\n model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,\n reg=reg, weight_scale=5e-2, dtype=np.float64,\n use_batchnorm=True)\n\n loss, grads = model.loss(X, y)\n print 'Initial loss: ', loss\n\n for name in sorted(grads):\n f = lambda _: model.loss(X, y)[0]\n grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)\n print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name]))\n if reg == 0: print", "Batchnorm for deep networks\nRun the following to train a six-layer network on a subset of 1000 training examples both with and without batch normalization.", "# Try training a very deep net with batchnorm\nhidden_dims = [100, 100, 100, 100, 100]\n\nnum_train = 1000\nsmall_data = {\n 'X_train': data['X_train'][:num_train],\n 'y_train': data['y_train'][:num_train],\n 'X_val': data['X_val'],\n 'y_val': data['y_val'],\n}\n\nweight_scale = 2e-2\nbn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)\nmodel = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)\n\nbn_solver = Solver(bn_model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=True, print_every=200)\nbn_solver.train()\n\nsolver = Solver(model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=True, print_every=200)\nsolver.train()", "Run the following to visualize the results from two networks trained above. You should find that using batch normalization helps the network to converge much faster.", "plt.subplot(3, 1, 1)\nplt.title('Training loss')\nplt.xlabel('Iteration')\n\nplt.subplot(3, 1, 2)\nplt.title('Training accuracy')\nplt.xlabel('Epoch')\n\nplt.subplot(3, 1, 3)\nplt.title('Validation accuracy')\nplt.xlabel('Epoch')\n\nplt.subplot(3, 1, 1)\nplt.plot(solver.loss_history, 'o', label='baseline')\nplt.plot(bn_solver.loss_history, 'o', label='batchnorm')\n\nplt.subplot(3, 1, 2)\nplt.plot(solver.train_acc_history, '-o', label='baseline')\nplt.plot(bn_solver.train_acc_history, '-o', label='batchnorm')\n\nplt.subplot(3, 1, 3)\nplt.plot(solver.val_acc_history, '-o', label='baseline')\nplt.plot(bn_solver.val_acc_history, '-o', label='batchnorm')\n \nfor i in [1, 2, 3]:\n plt.subplot(3, 1, i)\n plt.legend(loc='upper center', ncol=4)\nplt.gcf().set_size_inches(15, 15)\nplt.show()", "Batch normalization and initialization\nWe will now run a small experiment to study the interaction of batch normalization and weight initialization.\nThe first cell will train 8-layer networks both with and without batch normalization using different scales for weight initialization. The second layer will plot training accuracy, validation set accuracy, and training loss as a function of the weight initialization scale.", "# Try training a very deep net with batchnorm\nhidden_dims = [50, 50, 50, 50, 50, 50, 50]\n\nnum_train = 1000\nsmall_data = {\n 'X_train': data['X_train'][:num_train],\n 'y_train': data['y_train'][:num_train],\n 'X_val': data['X_val'],\n 'y_val': data['y_val'],\n}\n\nbn_solvers = {}\nsolvers = {}\nweight_scales = np.logspace(-4, 0, num=20)\nfor i, weight_scale in enumerate(weight_scales):\n print 'Running weight scale %d / %d' % (i + 1, len(weight_scales))\n bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)\n model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)\n\n bn_solver = Solver(bn_model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=False, print_every=200)\n bn_solver.train()\n bn_solvers[weight_scale] = bn_solver\n\n solver = Solver(model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=False, print_every=200)\n solver.train()\n solvers[weight_scale] = solver\n\n# Plot results of weight scale experiment\nbest_train_accs, bn_best_train_accs = [], []\nbest_val_accs, bn_best_val_accs = [], []\nfinal_train_loss, bn_final_train_loss = [], []\n\nfor ws in weight_scales:\n best_train_accs.append(max(solvers[ws].train_acc_history))\n bn_best_train_accs.append(max(bn_solvers[ws].train_acc_history))\n \n best_val_accs.append(max(solvers[ws].val_acc_history))\n bn_best_val_accs.append(max(bn_solvers[ws].val_acc_history))\n \n final_train_loss.append(np.mean(solvers[ws].loss_history[-100:]))\n bn_final_train_loss.append(np.mean(bn_solvers[ws].loss_history[-100:]))\n \nplt.subplot(3, 1, 1)\nplt.title('Best val accuracy vs weight initialization scale')\nplt.xlabel('Weight initialization scale')\nplt.ylabel('Best val accuracy')\nplt.semilogx(weight_scales, best_val_accs, '-o', label='baseline')\nplt.semilogx(weight_scales, bn_best_val_accs, '-o', label='batchnorm')\nplt.legend(ncol=2, loc='lower right')\n\nplt.subplot(3, 1, 2)\nplt.title('Best train accuracy vs weight initialization scale')\nplt.xlabel('Weight initialization scale')\nplt.ylabel('Best training accuracy')\nplt.semilogx(weight_scales, best_train_accs, '-o', label='baseline')\nplt.semilogx(weight_scales, bn_best_train_accs, '-o', label='batchnorm')\nplt.legend()\n\nplt.subplot(3, 1, 3)\nplt.title('Final training loss vs weight initialization scale')\nplt.xlabel('Weight initialization scale')\nplt.ylabel('Final training loss')\nplt.semilogx(weight_scales, final_train_loss, '-o', label='baseline')\nplt.semilogx(weight_scales, bn_final_train_loss, '-o', label='batchnorm')\nplt.legend()\n\nplt.gcf().set_size_inches(10, 15)\nplt.show()", "Question:\nDescribe the results of this experiment, and try to give a reason why the experiment gave the results that it did.\nAnswer:\nVal accuracy\nComparison: Base line and batch norm have similar performance during .05 ~ .1 weight init. But BN is significantly better outside this interval.\nFrom BN's graph: BN behaves well in [1e-3, 1e-1], and indeed reached highest at 1e-2, where Baseline is already not working.\nGenerally BN improves fitness towards weight init.\nTraining accuracy\nBaseline exceed batch norm on its highest point. It shows that BN are less prone to overfitting.\nLoss\nBN also has significantly less loss in the end. \nIn addition, loss of base line might see some 0 when calculating softmax loss.\n\nBN prevent vanishing gradient and gradient explosion problem" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
LucaFoschini/IGERTbootcamp2016
EverythingData.ipynb
mit
[ "Everything Data (IGERT Bootcamp, Day 3)\nInstructor: Luca Foschini (email: luca@evidation.com) (twitter: @calimagna)\nFormat: Lecture and hands-on\nGoals\n\nLearn how perform basic data manipulation with python\nSee all the things that python can do\nLearn about what makes your code run slow\nDo you really have big data? \n\nData Ingestion, Wrangling, ETL\n\n80% of Data Science is data wrangling. \nPython's library ecosystem is the first reason to use it!\nPandas: if you learn one thing today, learn this!\n\nEverything has a Python API\nIt's safe to say that every internet service has an API for Python:\nExamples: \n - Weather : https://github.com/csparpa/pyowm\n - Twitter: https://code.google.com/p/python-twitter/ \n - Fitbit: https://github.com/orcasgit/python-fitbit\nMany Domain Specific Libraries\n\nNatural Language Processing: http://www.nltk.org/ [Run the tutorial], see also : http://fbkarsdorp.github.io/python-course/\nGraphs: http://networkx.readthedocs.io/en/networkx-1.11/examples/drawing/ego_graph.html\nMachine Learning: http://scikit-learn.org/stable/auto_examples/manifold/plot_lle_digits.html [Run the example]\n\nExotic:\n\nDeep Learning: https://keras.io/\nSurvival analysis: https://github.com/CamDavidsonPilon/lifelines\nBayesian inference and MCMC: http://pymcmc.readthedocs.org/en/latest/\n\nEverything nicely integrated in notebooks, and can be easily turned into slides", "# Example 1:\n# do something fun with the weather API", "Data Wrangling with Python and Pandas (tutorial)\nIntroduction: http://pandas.pydata.org/pandas-docs/stable/10min.html\nTutorial on data wrangling:\nhttps://github.com/jvns/pandas-cookbook", "# Run some exploration on tutorial\n%matplotlib inline\nimport pandas as pd\nimport matplotlib\nmatplotlib.style.use('ggplot')\n#montreal weather\nweather_url = \"https://raw.githubusercontent.com/jvns/pandas-cookbook/master/data/weather_2012.csv\"\n\nweather_2012_final = pd.read_csv(weather_url, parse_dates='Date/Time', index_col='Date/Time')\nweather_2012_final['Temp (C)'].plot(figsize=(15, 6))\n#weather_2012_final.to_hdf()\n\nprint weather_2012_final[weather_2012_final['Weather'] == 'Cloudy']['Temp (C)'].median()\nprint weather_2012_final[weather_2012_final['Weather'] == 'Snow']['Temp (C)'].median()\nweather_2012_final.to_hdf('ciao.h5', compression='blocs')", "Why is my code slow?\n\nLook under the hood: Memory hiearchies.\nPython is magic, magic isn't free: how built-in types are implemented and efficiency consideration\nProfiling and monitoring\nIf everything else fails: go parallel. \n\nExample of vectorization and timing\nhttp://nbviewer.jupyter.org/github/rossant/ipython-minibook/blob/master/chapter3/301-vector-computations.ipynb", "# Run the example above\n\ndef closest(position, positions):\n x0, y0 = position\n dbest, ibest = None, None\n for i, (x, y) in enumerate(positions):\n d = (x - x0) ** 2 + (y - y0) ** 2\n if dbest is None or d < dbest:\n dbest, ibest = d, i\n return ibest\n\nimport random\npositions = [(random.random(), random.random()) for _ in xrange(10000000)]\n\n\n%timeit closest((.5, .5), positions)\n\npositions = np.random.rand(10000000,2)\n\nx, y = positions[:,0], positions[:,1]\n\ndistances = (x - .5) ** 2 + (y - .5) ** 2\n\n%timeit exec In[39]", "One benchmark a day\nGoldmine: https://github.com/rasbt/One-Python-benchmark-per-day/tree/master/\nTry: \n\n\n6 different ways for counting elements using a dictionary\n\n\nPython vs Cython vs Numba\n\n\nMemory, cores, I/O\n\nLatency: Register, Cache, RAM, Disk (SSD/HDD), network\nOut of core vs distributed\nEmbarrassingly parallel problems (shell/python parallel)", "from IPython.display import Image\nImage(url='http://i.imgur.com/k0t1e.png')", "Scale:\n\nParallel : doc and example\nMultiprocess : http://sebastianraschka.com/Articles/2014_multiprocessing_intro.html \nBig Data (Spark and BDAS) https://spark.apache.org/examples.html", "!pip install ipyparallel\n\n# Example: Run some parallel code\nfrom ipyparallel import Client\nclient = Client(profile='mycluster')\n%px print(\"Hello from the cluster engines!\")", "How to deal with big data?\n\nbe smart: (sampling/approximation algorithms, divide-and-conquer)\nbe rich: rent-a-cloud, Digital Ocean, Cloud9\n\nNetwork analysis with NetworkX\nIntro and examples here", "%matplotlib inline\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom IPython.display import Image\nn = 10\nm = 20\nrgraph1 = nx.gnm_random_graph(n,m)\nprint \"Nodes: \", rgraph1.nodes()\nprint \"Edges: \", rgraph1.edges()\n\nif nx.is_connected(rgraph1):\n print \"Graph is connected\"\nelse:\n print \"Graph is not connected\"\n\nprint \"Diameter of graph is \", nx.diameter(rgraph1)\nnx.draw(rgraph1)\nplt.draw()\n\nelarge=[(u,v) for (u,v) in rgraph1.edges() if u + v >= 9]\nesmall=[(u,v) for (u,v) in rgraph1.edges() if u + v < 9]\n\npos=nx.spring_layout(rgraph1) # positions for all nodes\n\n# nodes\nnx.draw_networkx_nodes(rgraph1,pos,node_size=700)\n\n# edges\nnx.draw_networkx_edges(rgraph1,pos,edgelist=elarge,\n width=6,edge_color='r')\nnx.draw_networkx_edges(rgraph1,pos,edgelist=esmall,\n width=6,alpha=0.5,edge_color='b',style='dashed')\n\n# labels\nnx.draw_networkx_labels(rgraph1,pos,font_size=20,)\n\nplt.axis('off')\nplt.savefig(\"data/weighted_graph.png\") # save as png\nplt.show() # display\n\nT = nx.dfs_tree(rgraph1,0)\nprint \"DFS Tree edges : \", T.edges()\n\nT = nx.bfs_tree(rgraph1, 0)\nprint \"BFS Tree edges : \", T.edges()", "Galleries and miniproject\n\nInteresting notebook gallery. Pick one!\n\nMiniproject\nExtend the analysis provided here:\nhttp://nbviewer.ipython.org/github/rossant/ipython-minibook/blob/master/chapter3/303-cities-data-explore.ipynb \n\nWhat is the city that has the most other cities in a 10-mile radius from it?\nHow many cities have no other city in 10 miles from them? Where are they mostly located?\nWhat is the distribution of the number of cities within a 10-mile radius from a city? What about varying the radius using interact() ?" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
empet/Math
Imags/Animating a family-of-complex-functions.ipynb
bsd-3-clause
[ "Animation of a family of complex functions", "import plotly.graph_objects as go\nimport numpy as np\n\nPlotly version of the HSV colorscale, corresponding to S=1, V=1, where S is saturation and V is the value.\n\npl_hsv = [[0.0, 'rgb(0, 255, 255)'],\n [0.0833, 'rgb(0, 127, 255)'],\n [0.1667, 'rgb(0, 0, 255)'],\n [0.25, 'rgb(127, 0, 255)'],\n [0.3333, 'rgb(255, 0, 255)'],\n [0.4167, 'rgb(255, 0, 127)'],\n [0.5, 'rgb(255, 0, 0)'],\n [0.5833, 'rgb(255, 127, 0)'],\n [0.6667, 'rgb(254, 255, 0)'],\n [0.75, 'rgb(127, 255, 0)'],\n [0.8333, 'rgb(0, 255, 0)'],\n [0.9167, 'rgb(0, 255, 127)'],\n [1.0, 'rgb(0, 255, 255)']]\n\ndef evaluate_function(func, re=(-1,1), im=(-1,1), N=100):\n # func is the complex function to be ploted\n # re, im are the interval ends on the real and imaginary axes, defining the rectangular region in the complex plane\n # N gives the number of points in an interval of length 1\n l = re[1]-re[0]\n h = im[1]-im[0]\n resL = int(N*l) #horizontal resolution\n resH = int(N*h) #vertical resolution\n X = np.linspace(re[0], re[1], resL)\n Y = np.linspace(im[0], im[1], resH)\n x, y = np.meshgrid(X,Y)\n z = x+1j*y\n return X, Y, z", "For a particular parameter, the corresponding function of the given family is represented as heatmap of its argument, which illustrates \nits zeros and poles position.\nDefine tickvals and ticktext for the heatmap colorbar", "tickvals = [-np.pi, -2*np.pi/3, -np.pi/3, 0, np.pi/3, 2*np.pi/3, np.pi]\nticktext=['-\\u03c0', '-2\\u03c0/3', '-\\u03c0/3', '0', '\\u03c0/3', '2\\u03c0/3', '\\u03c0'] ", "Define a family of complex functions, depending on the parameter t. To animate the motion of function zeros and poles, as t varies,\nwe use functools.partial to get the\nfunction corresponding to a particular parameter:", "from functools import partial\ndef func(t, z):\n return z**4+np.exp(2*t*1j)/z**2*np.exp(1j*t)\nf0 = partial(func, 0) \n\nx, y, z = evaluate_function(f0, re=(-1.5, 1.5), im=(-1.5,1.5), N=50)\nw = f0(z)\nargument = np.angle(w)\n\nfig = go.Figure(go.Heatmap(x=x, y=y, z=argument, colorscale=pl_hsv, \n colorbar=dict(thickness=20, tickvals=tickvals, \n ticktext=ticktext, \n title='arg(f(z))')))\n\nframes = []\nt = np.linspace(0, 3, 45) #6, 85\nfor s in t:\n g = partial(func, s)\n w = g(z)\n argument = np.angle(w)\n frames.append(go.Frame(data=[go.Heatmap(z=argument)]))\nfig.update(frames=frames); \n\nfig.update_layout(width=500, height=475,\n updatemenus=[dict(type='buttons', \n y=1,\n x=1.45,\n active=0,\n buttons=[dict(label='Play',\n method='animate',\n args=[None, \n dict(frame=dict(duration=10, \n redraw=True),\n transition=dict(duration=0),\n fromcurrent=True,\n mode='immediate')])])]);", "" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
stephensekula/smu-honors-physics
pi_monte_carlo/Monte Carlo Lecture Code.ipynb
mit
[ "Monte Carlo Methods: Computation of Pi\nBasics: variables, values, printing", "Ntotal = 100\nprint(Ntotal)\n\nNin = 55.0\nprint(Ntotal/Nin)", "Random Numbers: Uniformly Distributed Random Numbers", "import random\nrandom.uniform(0.0,1.0)\n\nrandom.uniform(0.0,1.0)\n\nrandom.uniform(0.0,1.0)\n\nrandom.uniform(0.0,1.0)", "Generating \"dots\" - random x, random y, and r", "import math\nimport random\nNtotal = 100\nNin = 0\n\nx = random.uniform(0.0,1.0)\ny = random.uniform(0.0,1.0)\nr = math.sqrt(x**2 + y**2)\n\nprint(x)\nprint(y)\nprint(r)", "The range() sequence type\nIf you want to generate a series of numbers in a sequence, e.g. 1,2,3,4..., then you want the range() immutable sequence type (or its equivalent in some other Python library). See the example below. range() doesn't directly create a list of numbers; it is its own type in Python. To get a list from it, see below.", "list_of_numbers=list(range(1,5))\nprint(list_of_numbers)\n\n# Note that the list EXCLUDES the endpoint. If you want to get a list of 5 numbers, from 1-5, \n# then you need to extend the endpoint by 1:\n\nlist_of_numbers=list(range(1,6))\nprint(list_of_numbers)\n\n# or this\n\nlist_of_numbers=list(range(0,5))\nprint(list_of_numbers)\n", "Putting things together: \"looping\" 100 times and printing r each time\nLet's put things together now. We can create a variable that stores the number of iterations (\"loops\") of the calculation we want to do. Let's call that Ntotal. Let's then loop 100 times and each time make a random x, random y, and from that compute $r=\\sqrt{x^2+y^2}$.\nNote that Python uses indentation to indicate a related block of code acting as a \"subroutine\" - a program within the program.", "import math\nNtotal = 100\nNin = 0\nfor i in range(0,Ntotal):\n x = random.uniform(0.0,1.0)\n y = random.uniform(0.0,1.0)\n r = math.sqrt(x**2 + y**2)\n print(\"x=%f, y=%f, r=%f\" % (x,y,r))", "Monte Carlo - Accept/Reject\nNow that we can make random \"dots\" in x,y and compute the radius (relative to 0,0) of each dot, let's employ \"Accept/Reject\" to see if something is in/on the circle or outside the circle. All we have to do is, for each $r$, test whether $r \\le R$ or $r > R$. If the former, we have a dot in the circle - a hit! If the latter, then we have a dot out of the circle - a miss! We accept the hits and reject the misses. The total number of points define all the moves in the game, and the ratio of hits to the total will tell us about the area of the circle, and thus get us closer to $\\pi$.", "Ntotal = 100\nNin = 0\nR = 1.0\nfor i in range(0,Ntotal):\n x = random.uniform(0.0,1.0)\n y = random.uniform(0.0,1.0)\n r = math.sqrt(x**2 + y**2)\n\n if r <= R:\n Nin = Nin + 1\n # alternatively, Nin += 1 (auto-increment by 1)\n\nprint(\"Number of dots: %d\" % Ntotal)\nprint(\"Number of hits: %d\" % Nin)\nprint(\"Number of misses: %d\" % (Ntotal-Nin))\n \nmy_pi = 4.0*float(Nin)/float(Ntotal)\nprint(\"pi = %f\" % (my_pi))", "Precision in Monte Carlo Simulation\nThe number above is probably close to what you know as $\\pi$, but likely not very precise. After all, we only threw 100 dots. We can increase precision by increasing the number of dots. In the code block below, feel free to play with Ntotal, trying different values. Observe how the computed value of $\\pi$ changes. Would you say it's \"closing in\" on the value you know, or not? Try Ntotal at 1000, 5000, 10000, 50000, and 100000.\nIn the code block below, I have also added a computation of statistical error. The error should be a binomial error. Binomial errors occur when you have a bunch of things and you can classify them in two ways: as $A$ and $\\bar{A}$ (\"A\" and \"Not A\"). In our case, they are hits or \"not hits\" (misses). So binomial errors apply. A binomial error is computed below.", "Ntotal = 100\nNin = 0\nR = 1.0\nfor i in range(0,Ntotal):\n x = random.uniform(0.0,1.0)\n y = random.uniform(0.0,1.0)\n r = math.sqrt(x**2 + y**2)\n\n if r <= R:\n Nin = Nin + 1\n # alternatively, Nin += 1 (auto-increment by 1)\n\nmy_pi = 4.0*float(Nin)/float(Ntotal)\nmy_pi_uncertainty = my_pi * math.sqrt(1.0/float(Nin) + 1.0/float(Ntotal))\nprint(\"pi = %.6f +/- %.6f (percent error= %.2f%%)\" % (my_pi, my_pi_uncertainty, 100.0*my_pi_uncertainty/my_pi))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
amueller/odscon-sf-2015
06 - Working With Text Data.ipynb
cc0-1.0
[ "%matplotlib notebook\nimport matplotlib.pyplot as plt\nimport numpy as np", "Text Classification of Movie Reviews\nUnpack data - this only works on linux and (maybe?) OS X. Unpack using 7zip on Windows.", "#! tar -xf data/aclImdb.tar.bz2 --directory data\n\nfrom sklearn.datasets import load_files\n\nreviews_train = load_files(\"data/aclImdb/train/\")\ntext_train, y_train = reviews_train.data, reviews_train.target\n\nprint(\"Number of documents in training data: %d\" % len(text_train))\nprint(np.bincount(y_train))\n\nreviews_test = load_files(\"data/aclImdb/test/\")\ntext_test, y_test = reviews_test.data, reviews_test.target\nprint(\"Number of documents in test data: %d\" % len(text_test))\nprint(np.bincount(y_test))\n\nfrom IPython.display import HTML\n\nprint(text_train[1])\n\nHTML(text_train[1].decode(\"utf-8\"))\n\nprint(y_train[1])\n\nfrom sklearn.feature_extraction.text import CountVectorizer\ncv = CountVectorizer()\ncv.fit(text_train)\n\nlen(cv.vocabulary_)\n\nprint(cv.get_feature_names()[:50])\nprint(cv.get_feature_names()[50000:50050])\n\nX_train = cv.transform(text_train)\nX_train\n\nprint(text_train[19726])\n\nX_train[19726].nonzero()[1]\n\nX_test = cv.transform(text_test)\n\nfrom sklearn.svm import LinearSVC\n\nsvm = LinearSVC()\nsvm.fit(X_train, y_train)\n\nsvm.score(X_train, y_train)\n\nsvm.score(X_test, y_test)\n\ndef visualize_coefficients(classifier, feature_names, n_top_features=25):\n # get coefficients with large absolute values \n coef = classifier.coef_.ravel()\n positive_coefficients = np.argsort(coef)[-n_top_features:]\n negative_coefficients = np.argsort(coef)[:n_top_features]\n interesting_coefficients = np.hstack([negative_coefficients, positive_coefficients])\n # plot them\n plt.figure(figsize=(15, 5))\n colors = [\"red\" if c < 0 else \"blue\" for c in coef[interesting_coefficients]]\n plt.bar(np.arange(2 * n_top_features), coef[interesting_coefficients], color=colors)\n feature_names = np.array(feature_names)\n plt.subplots_adjust(bottom=0.3)\n plt.xticks(np.arange(1, 1 + 2 * n_top_features), feature_names[interesting_coefficients], rotation=60, ha=\"right\");\n\n\nvisualize_coefficients(svm, cv.get_feature_names())\n\nsvm = LinearSVC(C=0.001)\nsvm.fit(X_train, y_train)\n\nvisualize_coefficients(svm, cv.get_feature_names())\n\nfrom sklearn.pipeline import make_pipeline\ntext_pipe = make_pipeline(CountVectorizer(), LinearSVC())\ntext_pipe.fit(text_train, y_train)\ntext_pipe.score(text_test, y_test)\n\nfrom sklearn.grid_search import GridSearchCV\nimport time\n\nstart = time.time()\n\nparam_grid = {'linearsvc__C': np.logspace(-5, 0, 6)}\ngrid = GridSearchCV(text_pipe, param_grid, cv=5)\ngrid.fit(text_train, y_train)\n\nprint(time.time() - start)\n\ngrid.best_score_\n\ndef plot_grid_1d(grid_search_cv, ax=None):\n if ax is None:\n ax = plt.gca()\n if len(grid_search_cv.param_grid.keys()) > 1:\n raise ValueError(\"More then one parameter found. Can't do 1d plot.\")\n \n score_means, score_stds = zip(*[(np.mean(score.cv_validation_scores), np.std(score.cv_validation_scores))\n for score in grid_search_cv.grid_scores_])\n score_means, score_stds = np.array(score_means), np.array(score_stds)\n parameters = next(grid_search_cv.param_grid.values().__iter__())\n artists = []\n artists.extend(ax.plot(score_means))\n artists.append(ax.fill_between(range(len(parameters)), score_means - score_stds,\n score_means + score_stds, alpha=0.2, color=\"b\"))\n ax.set_xticklabels(parameters)\n\nplot_grid_1d(grid)\n\ngrid.best_params_\n\nvisualize_coefficients(grid.best_estimator_.named_steps['linearsvc'],\n grid.best_estimator_.named_steps['countvectorizer'].get_feature_names())\n\ngrid.best_score_\n\ngrid.score(text_test, y_test)", "N-Grams", "text_pipe = make_pipeline(CountVectorizer(), LinearSVC())\n\nparam_grid = {'linearsvc__C': np.logspace(-3, 2, 6),\n \"countvectorizer__ngram_range\": [(1, 1), (1, 2)]}\n\ngrid = GridSearchCV(text_pipe, param_grid, cv=5)\n\ngrid.fit(text_train, y_train)\n\nscores = np.array([score.mean_validation_score for score in grid.grid_scores_]).reshape(3, -1)\nplt.matshow(scores)\nplt.ylabel(\"n-gram range\")\nplt.yticks(range(3), param_grid[\"countvectorizer__ngram_range\"])\nplt.xlabel(\"C\")\nplt.xticks(range(6), param_grid[\"linearsvc__C\"]);\nplt.colorbar()\n\ngrid.best_params_\n\nvisualize_coefficients(grid.best_estimator_.named_steps['linearsvc'],\n grid.best_estimator_.named_steps['countvectorizer'].get_feature_names())\n\ngrid.score(text_test, y_test)", "Look at SpaCy and NLTK" ]
[ "code", "markdown", "code", "markdown", "code", "markdown" ]
DS-100/sp17-materials
sp17/disc/disc03/disc03.ipynb
gpl-3.0
[ "Intro to Git\nAuthors: Henry Milner, Andrew Do. Some of the material in this notebook is inspired by lectures by Prof. George Necula in CS 169.\nWhy git?\nYour first reason for this class (any likely many classes and projects to come): It's the only way to interact with other developers, because everyone uses it.\n\"Everyone?\" Yes. Github, the biggest host for public git repositories, has 20 million repositories. There are probably many more private repositories. (You can create either.)\nBetter reasons:\n* Work without fear. If you make a change that breaks something (or just wasn't a good idea), you can always go back.\n* Work on multiple computers. Much simpler and less error-prone than emailing yourself files.\n* Collaborate with other developers. \n* Maintain multiple versions.\nHowever, git can be a little confusing. Many confusions happen because people don't understand the fundamentals you'll learn today. If you've got the basics, the impact of other confusions will be bounded, and you can probably figure out how to search for a solution.\nCloning an existing repository\nWe made a special repository for this section (it takes 5 seconds) here:\nhttps://github.com/DS-100/git-intro\n\nWe'll use a Jupyter notebook, but you can run any of these commands in a Bash shell. Note that cd is a magic command in Jupyter that doesn't have a ! in front of it. !cd only works for the line you write it on.\nWe'll check out the repo in the /tmp folder, which the OS will wipe when you reboot. Obviously, don't do that if you want to keep the repo.", "cd /tmp\n\n# Delete the repo if it happens to already exist:\n!rm -rf git-intro\n\n# Create the repo\n!git clone https://github.com/DS-100/git-intro git-intro\n\n!ls -lh | grep git-intro\n\ncd git-intro", "Looking at files in a repo\nA repository is just a directory. Let's poke around.", "# What files are in the repo?\n!ls -lh\n\n# What about hidden files?\n!ls -alh", "The special .git directory is where git stores all its magic. If you delete it (or this whole directory), the repository won't be a repository any more.", "# What's the current status, according to git?\n!git status\n\n# What's the history of the repo?\n!git log\n\n# What does README.md look like currently?\n!cat README.md", "Making changes: Our first commit\nSuppose we want to add a file. You could create a Jupyter notebook or download an image. For simplicity, we'll just add a text file.", "# We can use Python to compute the filename.\n# Then we can reference Python variables in\n# ! shell commands using {}, because Jupyter\n# is magic.\nimport datetime\nour_id = datetime.datetime.now().microsecond\nfilename = \"our_file_{:d}.txt\".format(our_id)\nfilename\n\n!echo \"The quick brown fox \\\njumped over the lzy dog.\" > \"{filename}\"\n!ls", "Creating the file only changed the local filesystem. We can go to the repository page on Github to verify that the file hasn't been added yet. You probably wouldn't want your changes to be published immediately to the world!", "!git add \"{filename}\"", "If you check again, our file still hasn't been published to the world. In git, you package together your new files and updates to old files, and then you create a new version called a \"commit.\"\nGit maintains a \"staging\" or \"index\" area for files that you've marked for committing with git add.", "!git status\n\n!git commit -m 'Added our new file, \"{filename}\"'\n\n!git status\n\n!git log", "Now our local repository has this new commit in it. Notice that the log shows the message we wrote when we made the commit. It is very tempting to write something like \"stuff\" here. But then it will be very hard to understand your history, and you'll lose some of the benefits of git.\nFor the same reason, try to make each commit a self-contained idea: You fixed a particular bug, added a particular feature, etc.\nOur commit hasn't been published to other repositories yet, including the one on Github. We can check again to verify that.\nTo publish a commit we've created locally to another repository, we use git push. Git remembers that we checked out from the Github repository, and by default it will push to that repository. Just to be sure, let's find the name git has given to that repository, and pass that explicitly to git push.", "!git remote -v\n\n!git help push\n\n!git push origin", "Now our commit is finally visible on Github. Even if we spill coffee on our laptop, our new state will be safely recorded in the remote repository.\nGoing back\nOops, we didn't want that file! In fact, if you look at the history, people have been adding a bunch of silly files. We don't want any of them.\nOnce a commit is created, git basically never forgets about it or its contents (unless you try really hard). When your local filesystem doesn't have any outstanding changes, it's easy to switch back to an older commit.\nWe have previously given the name first to the first commit in the repo, which had basically nothing in it. (We'll soon see how to assign names to commits.)", "!git help branch\n\n!git branch --list\n\n# Let's make a new name for the first commit, \"going-back\",\n# with our ID in there so we don't conflict with other\n# sections.\n!git branch going-back-{our_id} first\n\n!git branch --list\n\n!git checkout going-back-{our_id}\n\n!ls\n\n!git status\n\n!git log --graph --decorate first going-back-{our_id} master", "Note: we can always get back to the commit we made with:\ngit checkout master\n\nBranches and commits\nGit informs us that we've switched to the going-back \"branch,\" and in the local filesystem, neither the file we created nor any other files, other than README.md, are there any more. What do you think would happen if we made some changes and made a new commit now?\n\nA. The previous commits would be overwritten. The master branch would disappear.\nB. The previous commits would be overwritten. The master branch would now refer to our new commit.\nC. A new commit would be created. The master branch would still refer to our last commit. The first branch would refer to the new commit.\nD. A new commit would be created. The master branch would still refer to our last commit. The first branch would still refer to the first commit in the repository.\nE. Git would ask us what to do, because it's not clear what we intended.\nF. Something else?\n\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\nLet's find out.", "new_filename = \"our_second_file_{}.txt\".format(our_id)\nnew_filename\n\n!echo \"Text for our second file!\" > {new_filename}\n!ls\n\n!git add {new_filename}\n!git commit -m'Adding our second file!'\n\n!git status\n\n!git log --graph --decorate first going-back-{our_id} master", "How does committing work?\nEvery commit is a snapshot of some files. A commit can never be changed. It has a unique ID assigned by git, like 20f97c1.\nHumans can't work with IDs like that, so git lets us give names like master or first to commits, using git branch &lt;name&gt; &lt;commit ID&gt;. These names are called \"branches\" or \"refs\" or \"tags.\" They're just names. Often master is used for the most up-to-date commit in a repository, but not always.\nAt any point in time, your repository is pointing to a commit. Except in unusual cases, that commit will have a name. Git gives that name its own name: HEAD. Remember: HEAD is a special kind of name. It refers to other names rather than to a commit.\n<img src=\"before_commit.jpg\">\nWhen you commit:\n\nGit creates your new commit.\nTo keep track of its lineage, git records that your new commit is a \"child\" of the current commit. That's what the lines in that git log line are showing.\nGit updates whatever name HEAD points to (your \"current branch\"). Now that name refers to the new commit.\n\n<img src=\"after_commit.jpg\">\nCan you list all the pieces that make up the full state of your git repository?\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n\nAll the commits with their IDs.\nAll the pointers from commits to their parents (the previous commit they built on).\nAll your \"refs,\" each pointing to a commit.\nThe HEAD, which points to a ref.\nThe \"working directory,\" which is all the actual files you see.\nThe \"index\" or \"staging\" area, which is all the files you've added with git add but haven't committed yet. (You can find out what's staged with git status. The staging area is confusing, so use it sparingly. Usually you should stage things and then immediately create a commit.)\nA list of \"remotes,\" which are other repositories your repository knows about. Often this is just the repository you cloned.\nThe last-known state of the remotes' refs.\n[...there are more, but these are the main ones.]\n\nHow does pushing work?\nIn git, every repository is coequal. The repository we cloned from Github looks exactly like ours, except it might contain different commits and names.\nSuppose you want to publish your changes.", "!git push origin going-back-{our_id}", "Here origin is the name (according to git remote -v) of the repository you want to push to. If you omit a remote name, origin is also the default. Normally that's what you want.\ngoing-back-{our_id} (whatever the value of {our_id}) is a branch in your repository. If you omit a branch name here, your current branch (the branch HEAD refers to) is the default.\nWhat do you think git does?\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\nA few things happen:\n1. Git finds all the commits in going-back-{our_id}'s history - all of its ancestors.\n2. It sends all of those commits to origin, and they're added to that repository. (If origin already has a bunch of them, of course those don't need to be sent.)\n3. It updates the branch named going-back-{our_id} in origin to point to the same commit yours does.\nHowever, suppose someone else has updated going-back-{our_id} since you last got it?\n 456 (your going-back-{our_id})\n \\ 345 (origin's going-back-{our_id}, pushed by someone else)\n \\ /\n \\ /\n 234 (going-back-{our_id} when you last pulled it from origin)\n |\n 123\n\nHow do you think git handles that?\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\nThe answer may surprise you: git gives up and tells you you're not allowed to push. Instead, you have to pull the remote commits and merge them in your repository, then push after merging.\nerror: failed to push some refs to 'https://github.com/DS-100/git-intro.git'\nhint: Updates were rejected because the remote contains work that you do\nhint: not have locally. This is usually caused by another repository pushing\nhint: to the same ref. You may want to first integrate the remote changes\nhint: (e.g., 'git pull ...') before pushing again.\nhint: See the 'Note about fast-forwards' in 'git push --help' for details.\n\nWe'll go over merging next, but the end result after merging will look like this:\n 567 (your going-back-{our_id})\n | \\\n | \\\n | \\\n 456 \\\n \\ 345 (origin's going-back-{our_id}, pushed by someone else)\n \\ /\n \\ /\n 234 (going-back-{our_id} when you last pulled it from origin)\n |\n 123\n\nThen git push origin going-back-{our_id} would succeed, since there are now no conflicts. We're updating going-back-{our_id} to a commit that's a descendant of the current commit going-back-{our_id} names in origin.\nSo it remains to see how to accomplish a merge. We need to start with pulling updates from other repositories.\nHow does pulling work?\nSuppose someone else pushes a commit to the remote repository. We can simulate that with our own second repository:", "cd /tmp\n\n!git clone https://github.com/DS-100/git-intro git-intro-2\n\ncd /tmp/git-intro-2\n\n!git checkout going-back-{our_id}\n\nthird_filename = \"our_third_file_{}.txt\".format(our_id)\nthird_filename\n\n!echo \"Someone else added this third file!\" > {third_filename}\n!git add {third_filename}\n!git commit -m\"Adding a third file!\"\n!git push", "Now we go back to our original repo.", "cd /tmp/git-intro", "You might just want the update. Or maybe you want to push your own commit to the same branch, and your git push failed.\nGit has a command called pull that you could use. But it's complicated, and it's easier to break it down into two steps: fetching and merging.\nSince git commits are never destroyed, it's always safe to fetch commits from another repository. (Refs can be changed, so that's not true for refs. That's the source of the problem with our push before!)", "!git help fetch\n\n!git fetch origin\n\n!git log --graph --decorate going-back-{our_id} origin/going-back-{our_id}", "Now we need to update our ref to the newer commit. In this case, it's easy, because we didn't have any further commits. Git calls that a \"fast-forward\" merge.", "!git merge origin/going-back-{our_id} --ff-only\n\n!git log --graph --decorate", "As a shortcut, you can do fetch and fast-forward merge with a single command:\ngit pull origin/going-back-{our_id} --ff-only\n\nWhat if there's a nontrivial merge to do?\nIn this class, you have three repositories:\n\nThe class Github repository ds100, which contains blank copies of assignments\nThe repository that lives on your own computer, where you work on your assignments\nYour Github repository origin, where you submit your assignments\n\nds100 will be updated regularly with commits that add new assignments. You'll never push to ds100. But you will pull from it regularly to get the new assignments.\nWhen you pull from ds100, you don't want to just use the latest commit from that repo. Then you'd be starting from scratch, without all your work on previous assignments.\nInstead, you want to merge the ds100 updates so that you get the new assignments but don't clobber your own work.\nIn the git log, after a few assignments, this will look something like this:\n(ds100/master) (master on local repo)\n ... ---------------------------efg (merged with 345 to get hw2)\n | / |\n 345 (hw2) def (worked on lab2)\n | |\n | ---------------------------cde (merged with 234 to get lab2)\n | / |\n 234 (lab2) bcd (finishing touches on hw1)\n | |\n | ---------------------------abc (your work on hw1)\n | / ^ not a merge\n 123 (hw1)\n\nConsider the first merge only. The current commit is bcd, and you want to get lab 2. From what you know so far, how should you merge?\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\n<br>\nAnswer: Assuming we're on the master branch in our repo, and there are no uncommitted changes to the working files:\n!git fetch ds100\n!git merge ds100/master\n\nThat doesn't finish things for us, though. How will the merge work? How will git reconcile your changes to the hw1 files with the addition of lab2 files?\nGit tries to intelligently include all the changes introduced in the two merged branches since their last common ancestor. In this case, the changes are independent - one branch introduced new files in the lab2 directory, and the other edited files in the hw1 directory. So git will just do it.\nGit assumes that changes introduced in separate files, or in separate lines of the same file, can be applied together. If two branches change the same line of the same file, it will give up and ask you to reconcile the changes. You'll then need to edit the file and follow the instructions to mark it as fixed. We won't go over an example of that today.\nNote that sometimes git's assumption about independence is not true. For example, suppose you are working on a project and you create a new code file A that imports code from another file B. Your coworker deletes file B. Git will merrily apply both changes, but your code in file A won't work any more. So you need to apply human judgment when merging. If you write informative commit messages, it's much easier to find such problems.\nA shortcut to pull in this class\nThe instructions on the course website tell you to get changes from ds100 with this command:\ngit pull -s recursive -X ours --no-edit ds100 master\n\nWhat does this do? It's basically what we just went through, with some extra options that let you avoid dealing with merges:\n- git pull ds100 master: Pull from ds100, updating the master branch. Equivalent to git fetch ds100; git merge ds100/master as seen above.\n- -s recursive -X ours: If git finds that you and the ds100 repo have made changes to the same line in a file, always take your changes and delete the ds100 repo changes. It will do this instead of asking you to reconcile the changes.\n- --no-edit: Normally, git will ask you to create a commit message to describe the merge commit. This option generates a default message for you.\nMiscellaneous useful tips and commands\n\nThink before you run commands like git merge or git checkout that might update your current branch. If you have outstanding uncommitted changes, it can be complicated to keep them intact. Generally you should commit your changes before running such commands.\ngit diff: See all changes in your working directory versus the most recent commit.\ngit diff &lt;commit_or_ref&gt; &lt;other_commit_or_ref&gt;: See all the changes between two commits.\ngit add -u: Add to the index (in preparation for a commit) all files that have been changed.\ngit add -A: Add to the index (in preparation for a commit) all files.\ngit rm &lt;file&gt;: Delete a file. This change happens in the index, so it will show up in your next commit.\ngit mv &lt;file&gt; &lt;new_name&gt;: Rename a file. Again, this change happens in the index, so it will show up in your next commit.\ngit checkout -- &lt;file&gt;: Reset a file to its state in the current commit, eliminating changes in the working directory.\ngit rm --cached &lt;file&gt;: Unstage a staged change to a file. (Useful if you git add a file you didn't mean to add.) Doesn't delete the file.\ngit checkout -b &lt;new_branch_name&gt;: Create a new branch at the current commit and check it out, making HEAD point to it." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
stable/_downloads/7b89f7dac105a44e25d2fbdd898b911f/vector_mne_solution.ipynb
bsd-3-clause
[ "%matplotlib inline", "Plotting the full vector-valued MNE solution\nThe source space that is used for the inverse computation defines a set of\ndipoles, distributed across the cortex. When visualizing a source estimate, it\nis sometimes useful to show the dipole directions in addition to their\nestimated magnitude. This can be accomplished by computing a\n:class:mne.VectorSourceEstimate and plotting it with\n:meth:stc.plot &lt;mne.VectorSourceEstimate.plot&gt;, which uses\n:func:~mne.viz.plot_vector_source_estimates under the hood rather than\n:func:~mne.viz.plot_source_estimates.\nIt can also be instructive to visualize the actual dipole/activation locations\nin 3D space in a glass brain, as opposed to activations imposed on an inflated\nsurface (as typically done in :meth:mne.SourceEstimate.plot), as it allows\nyou to get a better sense of the underlying source geometry.", "# Author: Marijn van Vliet <w.m.vanvliet@gmail.com>\n#\n# License: BSD-3-Clause\n\nimport numpy as np\nimport mne\nfrom mne.datasets import sample\nfrom mne.minimum_norm import read_inverse_operator, apply_inverse\n\nprint(__doc__)\n\ndata_path = sample.data_path()\nsubjects_dir = data_path / 'subjects'\nsmoothing_steps = 7\n\n# Read evoked data\nmeg_path = data_path / 'MEG' / 'sample'\nfname_evoked = meg_path / 'sample_audvis-ave.fif'\nevoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))\n\n# Read inverse solution\nfname_inv = meg_path / 'sample_audvis-meg-oct-6-meg-inv.fif'\ninv = read_inverse_operator(fname_inv)\n\n# Apply inverse solution, set pick_ori='vector' to obtain a\n# :class:`mne.VectorSourceEstimate` object\nsnr = 3.0\nlambda2 = 1.0 / snr ** 2\nstc = apply_inverse(evoked, inv, lambda2, 'dSPM', pick_ori='vector')\n\n# Use peak getter to move visualization to the time point of the peak magnitude\n_, peak_time = stc.magnitude().get_peak(hemi='lh')", "Plot the source estimate:", "brain = stc.plot(\n initial_time=peak_time, hemi='lh', subjects_dir=subjects_dir,\n smoothing_steps=smoothing_steps)\n\n# You can save a brain movie with:\n# brain.save_movie(time_dilation=20, tmin=0.05, tmax=0.16, framerate=10,\n# interpolation='linear', time_viewer=True)", "Plot the activation in the direction of maximal power for this data:", "stc_max, directions = stc.project('pca', src=inv['src'])\n# These directions must by design be close to the normals because this\n# inverse was computed with loose=0.2\nprint('Absolute cosine similarity between source normals and directions: '\n f'{np.abs(np.sum(directions * inv[\"source_nn\"][2::3], axis=-1)).mean()}')\nbrain_max = stc_max.plot(\n initial_time=peak_time, hemi='lh', subjects_dir=subjects_dir,\n time_label='Max power', smoothing_steps=smoothing_steps)", "The normal is very similar:", "brain_normal = stc.project('normal', inv['src'])[0].plot(\n initial_time=peak_time, hemi='lh', subjects_dir=subjects_dir,\n time_label='Normal', smoothing_steps=smoothing_steps)", "You can also do this with a fixed-orientation inverse. It looks a lot like\nthe result above because the loose=0.2 orientation constraint keeps\nsources close to fixed orientation:", "fname_inv_fixed = (\n meg_path / 'sample_audvis-meg-oct-6-meg-fixed-inv.fif')\ninv_fixed = read_inverse_operator(fname_inv_fixed)\nstc_fixed = apply_inverse(\n evoked, inv_fixed, lambda2, 'dSPM', pick_ori='vector')\nbrain_fixed = stc_fixed.plot(\n initial_time=peak_time, hemi='lh', subjects_dir=subjects_dir,\n smoothing_steps=smoothing_steps)" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Autodesk/molecular-design-toolkit
moldesign/_notebooks/Example 3. Simulating a crystal structure.ipynb
apache-2.0
[ "<span style=\"float:right\"><a href=\"http://moldesign.bionano.autodesk.com/\" target=\"_blank\" title=\"About\">About</a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href=\"https://github.com/autodesk/molecular-design-toolkit/issues\" target=\"_blank\" title=\"Issues\">Issues</a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href=\"http://bionano.autodesk.com/MolecularDesignToolkit/explore.html\" target=\"_blank\" title=\"Tutorials\">Tutorials</a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href=\"http://autodesk.github.io/molecular-design-toolkit/\" target=\"_blank\" title=\"Documentation\">Documentation</a></span>\n</span>\n\n<br>\n<center><h1>Example 3: Simulating a Holliday Junction PDB assembly </h1> </center>\n\nThis notebook takes a crystal structure from the PDB and prepares it for simulation.\n\nAuthor: Aaron Virshup, Autodesk Research\nCreated on: July 1, 2016\nTags: DNA, holliday junction, assembly, PDB, MD", "%matplotlib inline\nfrom matplotlib.pyplot import *\n\nimport moldesign as mdt\nfrom moldesign import units as u", "Contents\n\n\nA. View the crystal structure\nB. Build the biomolecular assembly\nC. Isolate the DNA\nD. Prep for simulation\nE. Dynamics - equilibration\nF. Dynamics - production\n\nA. View the crystal structure\nWe start by downloading the 1KBU crystal structure.\nIt will generate several warnings. Especially note that it contains biomolecular \"assembly\" information. This means that the file from PDB doesn't contain the complete structure, but we can generate the missing parts using symmetry operations.", "xtal = mdt.from_pdb('1kbu')\nxtal.draw()", "B. Build the biomolecular assembly\nAs you can read in the warning, 1KBU only has one biomolecular assembly, conveniently named '1'. This cell builds and views it:", "assembly = mdt.build_assembly(xtal, 1)\nassembly.draw()", "By evaulating the assembly object (it's a normal instance of the moldesign.Molecule class), we can get some information about it's content:", "assembly", "Because we're only interested in DNA, we'll create a new molecule using only the DNA residues, and then assign a forcefield to it.\nC. Isolate the DNA\nThis example will focus only on the DNA components of this structure, so we'll isolate the DNA atoms and create a new molecule from them.\nWe could do this with a list comprehension, e.g.\nmdt.Molecule([atom for atom in assembly.atoms if atom.residue.type == 'dna'])\nHere, however we'll use a shortcut for this - the molecule.get_atoms method, which allows you to run queries on the atoms:", "dna_atoms = assembly.get_atoms('dna')\ndna_only = mdt.Molecule(dna_atoms)\ndna_only.draw3d(display=True)\ndna_only", "D. Prep for simulation\nNext, we'll assign a forcefield and energy model, then minimize the structure.", "ff = mdt.forcefields.DefaultAmber()\ndna = ff.create_prepped_molecule(dna_only)\n\ndna.set_energy_model(mdt.models.OpenMMPotential, implicit_solvent='obc')\ndna.configure_methods()\n\nminimization = dna.minimize()\n\nminimization.draw()", "E. Dynamics - equilibration\nThe structure is ready. We'll associate an integrator with the molecule, then do a 2 step equilibration - first freezing the peptide backbone and running 300K dynamics, then unfreezing and continuing dyanmics.", "# Freeze the backbone:\nfor residue in dna.residues:\n for atom in residue.backbone:\n dna.constrain_atom(atom)\n\ndna.set_integrator(mdt.integrators.OpenMMLangevin,\n timestep=2.0*u.fs,\n frame_interval=1.0*u.ps,\n remove_rotation=True)\ndna.integrator.configure()", "And now we run it. This is may take a while, depending on your hardware.", "equil1 = dna.run(20.0*u.ps)\n\nequil1.draw()", "Next, we'll remove the constraints and do full dynamics:", "dna.clear_constraints()\nequil2 = dna.run(20.0*u.ps)\n\nequil = equil1 + equil2\nequil.draw()\n\nplot(equil2.time, equil2.rmsd())\nxlabel('time / fs'); ylabel(u'rmsd / Å'); grid()", "NOTE: THIS IS NOT A SUFFICIENT EQUILIBRATION FOR PRODUCTION MOLECULAR DYNAMICS! \nIn practice, before going to \"production\", we would at least want to run dynamics until the RMSD and thermodynamic observabled have converged. A variety of equilibration protocols are used in practice, including slow heating, reduced coupling, multiple constraints, etc.\nF. Dynamics - production\nAssuming that we're satisfied with our system's equilibration, we now gather data for \"production\". This will take a while.", "trajectory = dna.run(40.0*u.ps)\n\ntrajectory.draw()", "G. Save your results\nAny MDT object can be saved to disk. We recommend saving objects with the \"Pickle\" format to make sure that all the data is preserved.\nThis cell saves the final trajectory to disk as a compressed pickle file:", "trajectory.write('holliday_traj.P.gz')", "To load the saved object, use:", "traj = mdt.read('holliday_traj.P.gz')\n\ntraj.draw()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
jmhsi/justin_tinker
data_science/courses/temp/courses/dl1/lesson3-rossman.ipynb
apache-2.0
[ "Structured and time series data\nThis notebook contains an implementation of the third place result in the Rossman Kaggle competition as detailed in Guo/Berkhahn's Entity Embeddings of Categorical Variables.\nThe motivation behind exploring this architecture is it's relevance to real-world application. Most data used for decision making day-to-day in industry is structured and/or time-series data. Here we explore the end-to-end process of using neural networks with practical structured data problems.", "%matplotlib inline\n%reload_ext autoreload\n%autoreload 2\n\nfrom fastai.structured import *\nfrom fastai.column_data import *\nnp.set_printoptions(threshold=50, edgeitems=20)\n\nPATH='data/rossmann/'", "Create datasets\nIn addition to the provided data, we will be using external datasets put together by participants in the Kaggle competition. You can download all of them here.\nFor completeness, the implementation used to put them together is included below.", "def concat_csvs(dirname):\n path = f'{PATH}{dirname}'\n filenames=glob.glob(f\"{path}/*.csv\")\n\n wrote_header = False\n with open(f\"{path}.csv\",\"w\") as outputfile:\n for filename in filenames:\n name = filename.split(\".\")[0]\n with open(filename) as f:\n line = f.readline()\n if not wrote_header:\n wrote_header = True\n outputfile.write(\"file,\"+line)\n for line in f:\n outputfile.write(name + \",\" + line)\n outputfile.write(\"\\n\")\n\n# concat_csvs('googletrend')\n# concat_csvs('weather')", "Feature Space:\n* train: Training set provided by competition\n* store: List of stores\n* store_states: mapping of store to the German state they are in\n* List of German state names\n* googletrend: trend of certain google keywords over time, found by users to correlate well w/ given data\n* weather: weather\n* test: testing set", "table_names = ['train', 'store', 'store_states', 'state_names', \n 'googletrend', 'weather', 'test']", "We'll be using the popular data manipulation framework pandas. Among other things, pandas allows you to manipulate tables/data frames in python as one would in a database.\nWe're going to go ahead and load all of our csv's as dataframes into the list tables.", "tables = [pd.read_csv(f'{PATH}{fname}.csv', low_memory=False) for fname in table_names]\n\nfrom IPython.display import HTML", "We can use head() to get a quick look at the contents of each table:\n* train: Contains store information on a daily basis, tracks things like sales, customers, whether that day was a holdiay, etc.\n* store: general info about the store including competition, etc.\n* store_states: maps store to state it is in\n* state_names: Maps state abbreviations to names\n* googletrend: trend data for particular week/state\n* weather: weather conditions for each state\n* test: Same as training table, w/o sales and customers", "for t in tables: display(t.head())", "This is very representative of a typical industry dataset.\nThe following returns summarized aggregate information to each table accross each field.", "for t in tables: display(DataFrameSummary(t).summary())", "Data Cleaning / Feature Engineering\nAs a structured data problem, we necessarily have to go through all the cleaning and feature engineering, even though we're using a neural network.", "train, store, store_states, state_names, googletrend, weather, test = tables\n\nlen(train),len(test)", "We turn state Holidays to booleans, to make them more convenient for modeling. We can do calculations on pandas fields using notation very similar (often identical) to numpy.", "train.StateHoliday = train.StateHoliday!='0'\ntest.StateHoliday = test.StateHoliday!='0'", "join_df is a function for joining tables on specific fields. By default, we'll be doing a left outer join of right on the left argument using the given fields for each table.\nPandas does joins using the merge method. The suffixes argument describes the naming convention for duplicate fields. We've elected to leave the duplicate field names on the left untouched, and append a \"_y\" to those on the right.", "def join_df(left, right, left_on, right_on=None, suffix='_y'):\n if right_on is None: right_on = left_on\n return left.merge(right, how='left', left_on=left_on, right_on=right_on, \n suffixes=(\"\", suffix))", "Join weather/state names.", "weather = join_df(weather, state_names, \"file\", \"StateName\")", "In pandas you can add new columns to a dataframe by simply defining it. We'll do this for googletrends by extracting dates and state names from the given data and adding those columns.\nWe're also going to replace all instances of state name 'NI' to match the usage in the rest of the data: 'HB,NI'. This is a good opportunity to highlight pandas indexing. We can use .loc[rows, cols] to select a list of rows and a list of columns from the dataframe. In this case, we're selecting rows w/ statename 'NI' by using a boolean list googletrend.State=='NI' and selecting \"State\".", "googletrend['Date'] = googletrend.week.str.split(' - ', expand=True)[0]\ngoogletrend['State'] = googletrend.file.str.split('_', expand=True)[2]\ngoogletrend.loc[googletrend.State=='NI', \"State\"] = 'HB,NI'", "The following extracts particular date fields from a complete datetime for the purpose of constructing categoricals.\nYou should always consider this feature extraction step when working with date-time. Without expanding your date-time into these additional fields, you can't capture any trend/cyclical behavior as a function of time at any of these granularities. We'll add to every table with a date field.", "add_datepart(weather, \"Date\", drop=False)\nadd_datepart(googletrend, \"Date\", drop=False)\nadd_datepart(train, \"Date\", drop=False)\nadd_datepart(test, \"Date\", drop=False)", "The Google trends data has a special category for the whole of the US - we'll pull that out so we can use it explicitly.", "trend_de = googletrend[googletrend.file == 'Rossmann_DE']", "Now we can outer join all of our data into a single dataframe. Recall that in outer joins everytime a value in the joining field on the left table does not have a corresponding value on the right table, the corresponding row in the new table has Null values for all right table fields. One way to check that all records are consistent and complete is to check for Null values post-join, as we do here.\nAside: Why note just do an inner join?\nIf you are assuming that all records are complete and match on the field you desire, an inner join will do the same thing as an outer join. However, in the event you are wrong or a mistake is made, an outer join followed by a null-check will catch it. (Comparing before/after # of rows for inner join is equivalent, but requires keeping track of before/after row #'s. Outer join is easier.)", "store = join_df(store, store_states, \"Store\")\nlen(store[store.State.isnull()])\n\njoined = join_df(train, store, \"Store\")\njoined_test = join_df(test, store, \"Store\")\nlen(joined[joined.StoreType.isnull()]),len(joined_test[joined_test.StoreType.isnull()])\n\njoined = join_df(joined, googletrend, [\"State\",\"Year\", \"Week\"])\njoined_test = join_df(joined_test, googletrend, [\"State\",\"Year\", \"Week\"])\nlen(joined[joined.trend.isnull()]),len(joined_test[joined_test.trend.isnull()])\n\njoined = joined.merge(trend_de, 'left', [\"Year\", \"Week\"], suffixes=('', '_DE'))\njoined_test = joined_test.merge(trend_de, 'left', [\"Year\", \"Week\"], suffixes=('', '_DE'))\nlen(joined[joined.trend_DE.isnull()]),len(joined_test[joined_test.trend_DE.isnull()])\n\njoined = join_df(joined, weather, [\"State\",\"Date\"])\njoined_test = join_df(joined_test, weather, [\"State\",\"Date\"])\nlen(joined[joined.Mean_TemperatureC.isnull()]),len(joined_test[joined_test.Mean_TemperatureC.isnull()])\n\nfor df in (joined, joined_test):\n for c in df.columns:\n if c.endswith('_y'):\n if c in df.columns: df.drop(c, inplace=True, axis=1)", "Next we'll fill in missing values to avoid complications with NA's. NA (not available) is how Pandas indicates missing values; many models have problems when missing values are present, so it's always important to think about how to deal with them. In these cases, we are picking an arbitrary signal value that doesn't otherwise appear in the data.", "for df in (joined,joined_test):\n df['CompetitionOpenSinceYear'] = df.CompetitionOpenSinceYear.fillna(1900).astype(np.int32)\n df['CompetitionOpenSinceMonth'] = df.CompetitionOpenSinceMonth.fillna(1).astype(np.int32)\n df['Promo2SinceYear'] = df.Promo2SinceYear.fillna(1900).astype(np.int32)\n df['Promo2SinceWeek'] = df.Promo2SinceWeek.fillna(1).astype(np.int32)", "Next we'll extract features \"CompetitionOpenSince\" and \"CompetitionDaysOpen\". Note the use of apply() in mapping a function across dataframe values.", "for df in (joined,joined_test):\n df[\"CompetitionOpenSince\"] = pd.to_datetime(dict(year=df.CompetitionOpenSinceYear, \n month=df.CompetitionOpenSinceMonth, day=15))\n df[\"CompetitionDaysOpen\"] = df.Date.subtract(df.CompetitionOpenSince).dt.days", "We'll replace some erroneous / outlying data.", "for df in (joined,joined_test):\n df.loc[df.CompetitionDaysOpen<0, \"CompetitionDaysOpen\"] = 0\n df.loc[df.CompetitionOpenSinceYear<1990, \"CompetitionDaysOpen\"] = 0", "We add \"CompetitionMonthsOpen\" field, limiting the maximum to 2 years to limit number of unique categories.", "for df in (joined,joined_test):\n df[\"CompetitionMonthsOpen\"] = df[\"CompetitionDaysOpen\"]//30\n df.loc[df.CompetitionMonthsOpen>24, \"CompetitionMonthsOpen\"] = 24\njoined.CompetitionMonthsOpen.unique()", "Same process for Promo dates.", "for df in (joined,joined_test):\n df[\"Promo2Since\"] = pd.to_datetime(df.apply(lambda x: Week(\n x.Promo2SinceYear, x.Promo2SinceWeek).monday(), axis=1).astype(pd.datetime))\n df[\"Promo2Days\"] = df.Date.subtract(df[\"Promo2Since\"]).dt.days\n\nfor df in (joined,joined_test):\n df.loc[df.Promo2Days<0, \"Promo2Days\"] = 0\n df.loc[df.Promo2SinceYear<1990, \"Promo2Days\"] = 0\n df[\"Promo2Weeks\"] = df[\"Promo2Days\"]//7\n df.loc[df.Promo2Weeks<0, \"Promo2Weeks\"] = 0\n df.loc[df.Promo2Weeks>25, \"Promo2Weeks\"] = 25\n df.Promo2Weeks.unique()\n\njoined.to_feather(f'{PATH}joined')\njoined_test.to_feather(f'{PATH}joined_test')", "Durations\nIt is common when working with time series data to extract data that explains relationships across rows as opposed to columns, e.g.:\n* Running averages\n* Time until next event\n* Time since last event\nThis is often difficult to do with most table manipulation frameworks, since they are designed to work with relationships across columns. As such, we've created a class to handle this type of data.\nWe'll define a function get_elapsed for cumulative counting across a sorted dataframe. Given a particular field fld to monitor, this function will start tracking time since the last occurrence of that field. When the field is seen again, the counter is set to zero.\nUpon initialization, this will result in datetime na's until the field is encountered. This is reset every time a new store is seen. We'll see how to use this shortly.", "def get_elapsed(fld, pre):\n day1 = np.timedelta64(1, 'D')\n last_date = np.datetime64()\n last_store = 0\n res = []\n\n for s,v,d in zip(df.Store.values,df[fld].values, df.Date.values):\n if s != last_store:\n last_date = np.datetime64()\n last_store = s\n if v: last_date = d\n res.append(((d-last_date).astype('timedelta64[D]') / day1).astype(int))\n df[pre+fld] = res", "We'll be applying this to a subset of columns:", "columns = [\"Date\", \"Store\", \"Promo\", \"StateHoliday\", \"SchoolHoliday\"]\n\ndf = train[columns]\n\ndf = test[columns]", "Let's walk through an example.\nSay we're looking at School Holiday. We'll first sort by Store, then Date, and then call add_elapsed('SchoolHoliday', 'After'):\nThis will apply to each row with School Holiday:\n* A applied to every row of the dataframe in order of store and date\n* Will add to the dataframe the days since seeing a School Holiday\n* If we sort in the other direction, this will count the days until another holiday.", "fld = 'SchoolHoliday'\ndf = df.sort_values(['Store', 'Date'])\nget_elapsed(fld, 'After')\ndf = df.sort_values(['Store', 'Date'], ascending=[True, False])\nget_elapsed(fld, 'Before')", "We'll do this for two more fields.", "fld = 'StateHoliday'\ndf = df.sort_values(['Store', 'Date'])\nget_elapsed(fld, 'After')\ndf = df.sort_values(['Store', 'Date'], ascending=[True, False])\nget_elapsed(fld, 'Before')\n\nfld = 'Promo'\ndf = df.sort_values(['Store', 'Date'])\nget_elapsed(fld, 'After')\ndf = df.sort_values(['Store', 'Date'], ascending=[True, False])\nget_elapsed(fld, 'Before')", "We're going to set the active index to Date.", "df = df.set_index(\"Date\")", "Then set null values from elapsed field calculations to 0.", "columns = ['SchoolHoliday', 'StateHoliday', 'Promo']\n\nfor o in ['Before', 'After']:\n for p in columns:\n a = o+p\n df[a] = df[a].fillna(0)", "Next we'll demonstrate window functions in pandas to calculate rolling quantities.\nHere we're sorting by date (sort_index()) and counting the number of events of interest (sum()) defined in columns in the following week (rolling()), grouped by Store (groupby()). We do the same in the opposite direction.", "bwd = df[['Store']+columns].sort_index().groupby(\"Store\").rolling(7, min_periods=1).sum()\n\nfwd = df[['Store']+columns].sort_index(ascending=False\n ).groupby(\"Store\").rolling(7, min_periods=1).sum()", "Next we want to drop the Store indices grouped together in the window function.\nOften in pandas, there is an option to do this in place. This is time and memory efficient when working with large datasets.", "bwd.drop('Store',1,inplace=True)\nbwd.reset_index(inplace=True)\n\nfwd.drop('Store',1,inplace=True)\nfwd.reset_index(inplace=True)\n\ndf.reset_index(inplace=True)", "Now we'll merge these values onto the df.", "df = df.merge(bwd, 'left', ['Date', 'Store'], suffixes=['', '_bw'])\ndf = df.merge(fwd, 'left', ['Date', 'Store'], suffixes=['', '_fw'])\n\ndf.drop(columns,1,inplace=True)\n\ndf.head()", "It's usually a good idea to back up large tables of extracted / wrangled features before you join them onto another one, that way you can go back to it easily if you need to make changes to it.", "df.to_feather(f'{PATH}df')\n\ndf = pd.read_feather(f'{PATH}df')\n\ndf[\"Date\"] = pd.to_datetime(df.Date)\n\ndf.columns\n\njoined = join_df(joined, df, ['Store', 'Date'])\n\njoined_test = join_df(joined_test, df, ['Store', 'Date'])", "The authors also removed all instances where the store had zero sale / was closed. We speculate that this may have cost them a higher standing in the competition. One reason this may be the case is that a little exploratory data analysis reveals that there are often periods where stores are closed, typically for refurbishment. Before and after these periods, there are naturally spikes in sales that one might expect. By ommitting this data from their training, the authors gave up the ability to leverage information about these periods to predict this otherwise volatile behavior.", "joined = joined[joined.Sales!=0]", "We'll back this up as well.", "joined.reset_index(inplace=True)\njoined_test.reset_index(inplace=True)\n\njoined.to_feather(f'{PATH}joined')\njoined_test.to_feather(f'{PATH}joined_test')", "We now have our final set of engineered features.\nWhile these steps were explicitly outlined in the paper, these are all fairly typical feature engineering steps for dealing with time series data and are practical in any similar setting.\nCreate features", "joined = pd.read_feather(f'{PATH}joined')\njoined_test = pd.read_feather(f'{PATH}joined_test')\n\njoined.head().T.head(40)", "Now that we've engineered all our features, we need to convert to input compatible with a neural network.\nThis includes converting categorical variables into contiguous integers or one-hot encodings, normalizing continuous features to standard normal, etc...", "cat_vars = ['Store', 'DayOfWeek', 'Year', 'Month', 'Day', 'StateHoliday', 'CompetitionMonthsOpen',\n 'Promo2Weeks', 'StoreType', 'Assortment', 'PromoInterval', 'CompetitionOpenSinceYear', 'Promo2SinceYear',\n 'State', 'Week', 'Events', 'Promo_fw', 'Promo_bw', 'StateHoliday_fw', 'StateHoliday_bw',\n 'SchoolHoliday_fw', 'SchoolHoliday_bw']\n\ncontin_vars = ['CompetitionDistance', 'Max_TemperatureC', 'Mean_TemperatureC', 'Min_TemperatureC',\n 'Max_Humidity', 'Mean_Humidity', 'Min_Humidity', 'Max_Wind_SpeedKm_h', \n 'Mean_Wind_SpeedKm_h', 'CloudCover', 'trend', 'trend_DE',\n 'AfterStateHoliday', 'BeforeStateHoliday', 'Promo', 'SchoolHoliday']\n\nn = len(joined); n\n\ndep = 'Sales'\njoined = joined[cat_vars+contin_vars+[dep, 'Date']].copy()\n\njoined_test[dep] = 0\njoined_test = joined_test[cat_vars+contin_vars+[dep, 'Date', 'Id']].copy()\n\nfor v in cat_vars: joined[v] = joined[v].astype('category').cat.as_ordered()\n\napply_cats(joined_test, joined)\n\nfor v in contin_vars:\n joined[v] = joined[v].astype('float32')\n joined_test[v] = joined_test[v].astype('float32')", "We're going to run on a sample.", "idxs = get_cv_idxs(n, val_pct=150000/n)\njoined_samp = joined.iloc[idxs].set_index(\"Date\")\nsamp_size = len(joined_samp); samp_size", "To run on the full dataset, use this instead:", "samp_size = n\njoined_samp = joined.set_index(\"Date\")", "We can now process our data...", "joined_samp.head(2)\n\ndf, y, nas, mapper = proc_df(joined_samp, 'Sales', do_scale=True)\nyl = np.log(y)\n\njoined_test = joined_test.set_index(\"Date\")\n\ndf_test, _, nas, mapper = proc_df(joined_test, 'Sales', do_scale=True, skip_flds=['Id'],\n mapper=mapper, na_dict=nas)\n\ndf.head(2)", "In time series data, cross-validation is not random. Instead, our holdout data is generally the most recent data, as it would be in real application. This issue is discussed in detail in this post on our web site.\nOne approach is to take the last 25% of rows (sorted by date) as our validation set.", "train_ratio = 0.75\n# train_ratio = 0.9\ntrain_size = int(samp_size * train_ratio); train_size\nval_idx = list(range(train_size, len(df)))", "An even better option for picking a validation set is using the exact same length of time period as the test set uses - this is implemented here:", "val_idx = np.flatnonzero(\n (df.index<=datetime.datetime(2014,9,17)) & (df.index>=datetime.datetime(2014,8,1)))\n\nval_idx=[0]", "DL\nWe're ready to put together our models.\nRoot-mean-squared percent error is the metric Kaggle used for this competition.", "def inv_y(a): return np.exp(a)\n\ndef exp_rmspe(y_pred, targ):\n targ = inv_y(targ)\n pct_var = (targ - inv_y(y_pred))/targ\n return math.sqrt((pct_var**2).mean())\n\nmax_log_y = np.max(yl)\ny_range = (0, max_log_y*1.2)", "We can create a ModelData object directly from out data frame.", "md = ColumnarModelData.from_data_frame(PATH, val_idx, df, yl.astype(np.float32), cat_flds=cat_vars, bs=128,\n test_df=df_test)", "Some categorical variables have a lot more levels than others. Store, in particular, has over a thousand!", "cat_sz = [(c, len(joined_samp[c].cat.categories)+1) for c in cat_vars]\n\ncat_sz", "We use the cardinality of each variable (that is, its number of unique values) to decide how large to make its embeddings. Each level will be associated with a vector with length defined as below.", "emb_szs = [(c, min(50, (c+1)//2)) for _,c in cat_sz]\n\nemb_szs\n\nm = md.get_learner(emb_szs, len(df.columns)-len(cat_vars),\n 0.04, 1, [1000,500], [0.001,0.01], y_range=y_range)\nlr = 1e-3\n\nm.lr_find()\n\nm.sched.plot(100)", "Sample", "m = md.get_learner(emb_szs, len(df.columns)-len(cat_vars),\n 0.04, 1, [1000,500], [0.001,0.01], y_range=y_range)\nlr = 1e-3\n\nm.fit(lr, 3, metrics=[exp_rmspe])\n\nm.fit(lr, 5, metrics=[exp_rmspe], cycle_len=1)\n\nm.fit(lr, 2, metrics=[exp_rmspe], cycle_len=4)", "All", "m = md.get_learner(emb_szs, len(df.columns)-len(cat_vars),\n 0.04, 1, [1000,500], [0.001,0.01], y_range=y_range)\nlr = 1e-3\n\nm.fit(lr, 1, metrics=[exp_rmspe])\n\nm.fit(lr, 3, metrics=[exp_rmspe])\n\nm.fit(lr, 3, metrics=[exp_rmspe], cycle_len=1)", "Test", "m = md.get_learner(emb_szs, len(df.columns)-len(cat_vars),\n 0.04, 1, [1000,500], [0.001,0.01], y_range=y_range)\nlr = 1e-3\n\nm.fit(lr, 3, metrics=[exp_rmspe])\n\nm.fit(lr, 3, metrics=[exp_rmspe], cycle_len=1)\n\nm.save('val0')\n\nm.load('val0')\n\nx,y=m.predict_with_targs()\n\nexp_rmspe(x,y)\n\npred_test=m.predict(True)\n\npred_test = np.exp(pred_test)\n\njoined_test['Sales']=pred_test\n\ncsv_fn=f'{PATH}tmp/sub.csv'\n\njoined_test[['Id','Sales']].to_csv(csv_fn, index=False)\n\nFileLink(csv_fn)", "RF", "from sklearn.ensemble import RandomForestRegressor\n\n((val,trn), (y_val,y_trn)) = split_by_idx(val_idx, df.values, yl)\n\nm = RandomForestRegressor(n_estimators=40, max_features=0.99, min_samples_leaf=2,\n n_jobs=-1, oob_score=True)\nm.fit(trn, y_trn);\n\npreds = m.predict(val)\nm.score(trn, y_trn), m.score(val, y_val), m.oob_score_, exp_rmspe(preds, y_val)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
valentina-s/GLM_PythonModules
notebooks/MLE_multipleNeuronsWeights.ipynb
bsd-2-clause
[ "This notebook presents how to perform maximum-likelihood parameter estimation for multiple neurons. The neurons depend on each other through a set of weights.", "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport random\nimport csv\n%matplotlib inline\n\nimport os\nimport sys\nsys.path.append(os.path.join(os.getcwd(),'..'))\nsys.path.append(os.path.join(os.getcwd(),'..','code'))\nsys.path.append(os.path.join(os.getcwd(),'..','data'))\n\nimport filters\nimport likelihood_functions as lk\nimport PoissonProcessClasses as PP\nimport auxiliary_functions as auxfun\n\nimport imp\nimp.reload(filters)\nimp.reload(lk)\nimp.reload(auxfun)\nimp.reload(PP)\n\n\n# Number of neurons\nnofCells = 2", "Reading input-output data:", "# creating the path to the data\ndata_path = os.path.join(os.getcwd(),'..','data')\n\n# reading stimulus\nStim = np.array(pd.read_csv(os.path.join(data_path,'Stim2.csv'),header = None))\n\n# reading location of spikes\n# (lengths of tsp sequences are not equal so reading them line by line)\ntsp_list = []\nwith open(os.path.join(data_path,'tsp2.csv')) as csvfile:\n tspreader = csv.reader(csvfile)\n for row in tspreader:\n tsp_list.append(row)", "Extracting a spike train from spike positions:", "dt = 0.01\ny_list = []\nfor tsp in tsp_list:\n tsp = np.array(tsp).astype(np.float)\n tsp_int = np.ceil((tsp - dt*0.001)/dt)\n tsp_int = np.reshape(tsp_int,(tsp_int.shape[0],1))\n tsp_int = tsp_int.astype(int)\n y_list.append(np.array([item in tsp_int for item in np.arange(Stim.shape[0]/dt)+1]).astype(int))", "Creating filters:", "# create a stimulus filter\nkpeaks = np.array([0,round(20/3)])\npars_k = {'neye':5,'n':5,'kpeaks':kpeaks,'b':3}\nK,K_orth,kt_domain = filters.createStimulusBasis(pars_k, nkt = 20) \n\n# create a post-spike filter\nhpeaks = np.array([0.1,2])\npars_h = {'n':5,'hpeaks':hpeaks,'b':.4}\nH,H_orth,ht_domain = filters.createPostSpikeBasis(pars_h,dt)\n\n# Interpolate Post Spike Filter\nMSP = auxfun.makeInterpMatrix(len(ht_domain),1)\nMSP[0,0] = 0\nH_orth = np.dot(MSP,H_orth)", "Conditional Intensity (spike rate):\n$$\\lambda_{\\beta}(i) = \\exp(K(\\beta_k)Stim + H(\\beta_h)y + \\sum_{j\\ne i}w_j I(\\beta_{I})*y_j) + \\mu$$\n$$\\lambda_{\\beta}(i) = \\exp(M_k\\beta_k + M_h \\beta_h + Y w + \\mu)$$\nCreating a matrix of covariates:", "M_k = lk.construct_M_k(Stim,K,dt)\n\nM_h_list = []\nfor tsp in tsp_list:\n tsp = np.array(tsp).astype(np.float)\n M_h_list.append(lk.construct_M_h(tsp,H_orth,dt,Stim))\n\n\n\n# creating a matrix of output covariates\nY = np.array(y_list).T", "<!---Simulating a neuron spike trains:-->", "# tsp_list = []\n# for i in range(nofCells):\n# tsp_list.append(auxfun.simSpikes(np.hstack((coeff_k,coeff_h)),M,dt))\n\nM_list = []\nfor i in range(len(M_h_list)):\n # exclude the i'th spike-train\n M_list.append(np.hstack((M_k,M_h_list[i],np.delete(Y,i,1),np.ones((M_k.shape[0],1)))))\n #M_list.append(np.hstack((M_k,M_h_list[i],np.ones((M_h.shape[0],1)))))", "Conditional intensity as a function of the covariates:\n$$ \\lambda_{\\beta} = \\exp(M\\beta) $$\nCreate a Poisson process model with this intensity:\nSetting initial parameters:", "coeff_k0 = np.array([ 0.061453,0.284916,0.860335,1.256983,0.910615,0.488660,-0.887091,0.097441,0.026607,-0.090147])\ncoeff_h0 = np.zeros((5,))\ncoeff_w0 = np.zeros((nofCells,))\nmu_0 = 0\n\npars0 = np.hstack((coeff_k0,coeff_h0,coeff_w0,mu_0))\npars0 = np.hstack((coeff_k0,coeff_h0,mu_0))\npars0 = np.zeros((17,))", "Fitting the likelihood:", "res_list = []\nfor i in range(len(y_list)):\n model = PP.PPModel(M_list[i].T,dt = dt/100)\n res_list.append(model.fit(y_list[i],start_coef = pars0,maxiter = 500, method = 'L-BFGS-B'))", "Specifying the true parameters:", "k_coeff = np.array([0.061453, 0.284916, 0.860335, 1.256983, 0.910615, 0.488660, -0.887091, 0.097441, 0.026607, -0.090147])\nh_coeff = np.array([-15.18,38.24,-67.58,-14.06,-3.36])\n\nfor i in range(len(res_list)):\n k_coeff_predicted = res_list[i].x[:10]\n h_coeff_predicted = res_list[i].x[10:15]\n print('Estimated dc for neuron '+str(i)+': '+str(res_list[i].x[-1]))\n fig,axs = plt.subplots(1,2,figsize = (10,5))\n fig.suptitle('Neuron%d'%(i+1))\n axs[0].plot(-kt_domain[::-1],np.dot(K,k_coeff_predicted),'r',label = 'predicted')\n axs[0].set_title('Stimulus Filter')\n axs[0].hold(True)\n axs[0].plot(-kt_domain[::-1],np.dot(K,k_coeff),'b',label = 'true')\n axs[0].plot(-kt_domain[::-1],np.dot(K,pars0[:10]),'g',label = 'initial')\n axs[0].set_xlabel('Time')\n axs[0].legend(loc = 'upper left')\n axs[1].set_title('Post-Spike Filter')\n axs[1].plot(ht_domain,np.dot(H_orth,h_coeff_predicted),'r',label = 'predicted')\n axs[1].plot(ht_domain,np.dot(H_orth,h_coeff),'b',label = 'true')\n axs[1].plot(ht_domain,np.dot(H_orth,coeff_h0[:H_orth.shape[1]]),'g',label = 'initial')\n axs[1].set_title('Post-Spike Filter')\n axs[1].set_xlabel('Time')\n axs[1].legend(loc = 'upper right')", "Extracting the weight matrix:", "W = np.array([np.hstack((res_list[i].x[-(nofCells):-nofCells+i],0,res_list[i].x[-nofCells+i:-1])) for i in range(len(res_list))])\nprint(W)", "Note: the stimulus and the post-spike estimates can be different for different neurons.\nNote: there might be some scale issue. Need to normalize these weights in some way." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
BjerknesClimateDataCentre/QuinCe
Documentation/Algorithms/SensorOffsets/Offset Experiments.ipynb
gpl-3.0
[ "Sensor Offsets\nThis notebook contains experiments used in determining the best way to work out sensor offsets in a dataset. See Github Issue #268.\nSetup\nWe use a peak detection algorithm described in this StackOverflow question.\nThe sample data here has had bad data removed (there were a lot of spikes). This implies that the offset detection alorithm should be run after the sensor QC stage.", "# Imports\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom math import floor\nimport sys\n\ndef thresholding_algo(y, lag, threshold, influence):\n signals = np.zeros(len(y))\n filteredY = np.array(y)\n avgFilter = [0]*len(y)\n stdFilter = [0]*len(y)\n avgFilter[lag - 1] = np.mean(y[0:lag])\n stdFilter[lag - 1] = np.std(y[0:lag])\n for i in range(lag, len(y)):\n if abs(y[i] - avgFilter[i-1]) > threshold * stdFilter[i-1]:\n if y[i] > avgFilter[i-1]:\n signals[i] = 1\n else:\n signals[i] = -1\n\n filteredY[i] = influence * y[i] + (1 - influence) * filteredY[i-1]\n avgFilter[i] = np.mean(filteredY[(i-lag+1):i+1])\n stdFilter[i] = np.std(filteredY[(i-lag+1):i+1])\n else:\n signals[i] = 0\n filteredY[i] = y[i]\n avgFilter[i] = np.mean(filteredY[(i-lag+1):i+1])\n stdFilter[i] = np.std(filteredY[(i-lag+1):i+1])\n\n return dict(signals = np.asarray(signals),\n avgFilter = np.asarray(avgFilter),\n stdFilter = np.asarray(stdFilter))\n", "Load data", "# Load TSG\ntsg = pd.read_csv('TSG.csv', names=['Time', 'SST'])\n\n# Load GO and make UNIX time colum\ngo = pd.read_csv('GO.tsv', sep='\\t')\n\ngo['UNIXTime'] = go.apply(\n lambda row:\n int(datetime.timestamp(\n datetime.strptime(f'{getattr(row, \"PC Date\")} {getattr(row, \"PC Time\")}', '%d/%m/%Y %H:%M:%S'))),\n axis=1)\n\nmin_time = min(tsg['Time'])\nif min(go['UNIXTime']) < min_time:\n min_time = min(go['UNIXTime'])\n \nmax_time = max(tsg['Time'])\nif max(go['UNIXTime']) > max_time:\n max_time = max(go['UNIXTime'])\n\nprint(f'Minimum time: {min_time}')\nprint(f'Maximum time: {max_time}')", "Set Time Range", "#time_start = 1547000000\n#time_end = 1552000000\n\ntime_start = 1543618976\ntime_end = 1558552742\n\ntsg_filtered = tsg.loc[(tsg['Time'] >= time_start) & (tsg['Time'] <= time_end)]\ngo_filtered = go.loc[(go['UNIXTime'] >= time_start) & (go['UNIXTime'] <= time_end)]\n\nprint(f'Select {len(tsg_filtered)}/{len(go_filtered)} values')", "SST from TSG and EqT from GO", "plt.figure(figsize=(16, 9))\nplt.scatter(tsg_filtered['Time'], tsg_filtered['SST'], label='SST', s=2)\nplt.scatter(go_filtered['UNIXTime'], go_filtered['equ temp'], label='EqT', s=2)\nplt.legend()", "Set up peak detector\nSet the parameters for the peak detection algorithm\n|Parameter |Meaning |\n|----------|-------------------------------------------------------------------------------|\n|lag |The number of values to use in the moving average. |\n|threshold |Signal threshold (in standard deviations from the moving average). |\n|influence |The influence of a signal as a fraction of the influence of normal data points.|\nBecause the two time series have different sampling rates, our lag is defined as a period of time, which is then converted to a number of measurements based on the mean spacing between measurements across each time series. For example, using our complete TSG dataset:\n\nTarget lag window: 1 day (86,400 seconds)\nNumber of measurements in TSG data set: 918,537\nTSG measurement range: 2018-11-30 23:02:56 to 2019-05-22 20:19:02 (14,937,366 seconds)\nSeconds per measurement: $\\frac{14937366}{918537} = 16.26$ measurements per second\nLag size = $floor\\left(\\frac{86400}{16.26}\\right) = 5313$ measurements\n\nNote that we assume that our measurements are regularly spaced. This is a good enough approximation for our needs.", "# Peak detector settings\nDEFAULT_LAG_WINDOW = 86400\nTHRESHOLD=2.5\nINFLUENCE=0.5\n\ndef calc_lag(times):\n value_count = len(times)\n time_range = times[len(times) - 1] - times[0]\n seconds_per_measurement = time_range / value_count\n \n lag_window = DEFAULT_LAG_WINDOW\n lag_size = floor(lag_window / seconds_per_measurement)\n while lag_size >= len(times):\n lag_window = floor(lag_window / 2)\n lag_size = floor(lag_window / seconds_per_measurement)\n \n return lag_size", "Run peak detector for SST and EqT", "sst_lag_size = calc_lag(list(tsg_filtered['Time']))\nprint(sst_lag_size)\nsst = list(tsg_filtered['SST'])\nsst_peaks = thresholding_algo(sst, sst_lag_size, THRESHOLD, INFLUENCE)\n\neqt_lag_size = calc_lag(list(go_filtered['UNIXTime']))\nprint(eqt_lag_size)\neqt = list(go_filtered['equ temp'])\neqt_peaks = thresholding_algo(eqt, eqt_lag_size, THRESHOLD, INFLUENCE)\n\nfig, (sst_ax, eqt_ax) = plt.subplots(2, figsize=(16, 10))\n\nsst_ax.scatter(tsg_filtered['Time'], tsg_filtered['SST'], label='SST', s=2)\nsst_ax2 = sst_ax.twinx()\nsst_ax2.set_ylim(-2,2)\nsst_ax2.plot(tsg_filtered['Time'], sst_peaks['signals'], color=\"orange\")\n\neqt_ax.scatter(go_filtered['UNIXTime'], go_filtered['equ temp'], label='EqT', s=2)\neqt_ax2 = eqt_ax.twinx()\neqt_ax2.set_ylim(-2,2)\neqt_ax2.plot(go_filtered['UNIXTime'], eqt_peaks['signals'], color=\"orange\")", "Extract peak positions\nFind the places in each time series where the peaks series goes from zero to one (or -1). We exclude periods where the source time series contains NaNs (not yet implemented).", "def get_peak_positions(times, peaks):\n peak_positions = list()\n \n last_value = peaks[0]\n \n for i in range(1, len(peaks)):\n if peaks[i] != 0:\n if last_value == 0:\n peak_positions.append((times[i], int(peaks[i])))\n \n last_value = peaks[i]\n \n return peak_positions\n\nsst_peak_positions = get_peak_positions(list(tsg_filtered['Time']), sst_peaks['signals'])\neqt_peak_positions = get_peak_positions(list(go_filtered['UNIXTime']), eqt_peaks['signals'])\n\nprint(f'SST peak count: {len(sst_peak_positions)}')\nprint(f'EqT peak count: {len(eqt_peak_positions)}')", "Calculate offsets\nOffsets are calculated using two lists of peak positions: the base_list and the adjust_list. For SST and EqT we adjust the EqT to match the SST. Since SST is physically before EqT in the instrument, all adjustments must be backwards in time. We apply a maximum distance threshold to adjustments to prevent accidental unrealistic adjustments if the peaks don't line up particularly well at certain times.\nWe loop through the adjust_list peak positions. For each of these we find the closest entry in the base_list that is:\n\nbefore the adjust_list peak\nin the same direction as the peak (up or down)\nwithin the maximum time limit\n\nIf there is a match we record the difference in times of the before_list and adjust_list peaks as the offset. If any two adjust values try to adjust to the same base value, we only accept the first one to ensure that regions of the adjust time series don't collapse into a black hole type situation.", "def make_offsets(base_list, adjust_list, max_difference=600):\n\n adjustments = list()\n \n # Keep track of where we are in the base list\n base_index = -1\n base_index_used = False\n \n # Loop through each peak in the adjust list\n for (pos, direction) in adjust_list:\n # Find the last base entry before the adjust entry\n while base_index < len(base_list) - 1 and base_list[base_index + 1][0] < pos:\n base_index += 1\n base_index_used = False\n \n # Check that the peak is within the limit and in the same direction\n if not base_index_used and base_list[base_index][1] == direction:\n\n offset = base_list[base_index][0] - pos\n if abs(offset) <= max_difference:\n adjustments.append((pos, offset))\n base_index_used = True\n \n return adjustments\n\neqt_offsets = make_offsets(sst_peak_positions, eqt_peak_positions, max_difference=600)\n\n# Extend the first and last offsets to the start and end of the full timeseries\neqt_offsets.insert(0, (list(go_filtered['UNIXTime'])[0], eqt_offsets[0][1]))\neqt_offsets.append((list(go_filtered['UNIXTime'])[len(go_filtered) - 1], eqt_offsets[len(eqt_offsets) - 1][1]))\n\n\noffset_times = [x[0] for x in eqt_offsets]\noffset_amounts = [x[1] * -1 for x in eqt_offsets]\n\nplt.figure(figsize=(16, 9))\nplt.plot(offset_times, offset_amounts)\n", "Further Adjustments\nWe shouldn't interpolate the offset across the large NaN gaps - we should treat each gap as its own start/end point. This is easily achieved by extrapolating the first/last offset of each section to the start/end of that section, as we do for the whole time series. We need to determine the definition of a long gap - maybe some function of the average time between offset adjustments?" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
gschivley/ERCOT_power
Group classification/Group classification.ipynb
mit
[ "Notebook to classify power plants\nVariables include:\n- Fuel type\n- Historical ramp speeds\n- Historical efficiency\n- Location?", "%matplotlib inline\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\nimport sklearn as sk\n\nfrom cluster import Clusters\n\nimport os\nfilename = 'Cluster_Data_2.csv'\npath = '../Clean Data'\nfullpath = os.path.join(path, filename)\ncluster = Clusters(fullpath)\n\ncluster.make_clusters(n_clusters=range(4,26))\n\ncluster.evaluate_clusters()", "From the figures above, it is difficult to determine an optimal number of clusters. The silhouette score clearly shows that we need more than 5 clusters. 6 looks like a good number, but one of the clusters shows odd behavior when plotting generation change vs net demand change (looks like 2 solid regression lines in the figure).", "cluster_labels = cluster.export_cluster_id(k=16)\ncluster_labels[:5]\n\nlabeled_plants = cluster.label_and_export(k=16)\n\nlabeled_plants.to_clipboard()\n\nexport_df = fossil_with_ramp.loc[:,['year', 'plant_id', 'cluster_id_6']]\n\nexport_df.to_csv('Cluster labels.csv', index = False)", "PREVIOUS WORK BELOW\nCode above now does everything needed, below is kept for reference\nVariable definitions\nThese variables will be used to cluster the power plants.\n- year: year that the data is for\n- fuel type: primary fuel type used at the facility (SUB, LIG, NG, etc)\n- 1-hr ramp rate: maximum or 95 percentile rate of generation increase over 1 hour\n- 3-hr ramp rate: maximum or 95 percentile rate of generation increase over 3 hours\n- efficiency: might just change this to heat rate (MMBTU/MWh)\n- efficiency std: standard deviation in efficiency based on monthly values for the year\n- CF: capacity factor, which is the amount generated in a year divided by the theoretical maximum generation possible $MWh/(Capacity * 8760)$ (or hours per month for monthly values used in calculating the standard deviation)\n- CF std: standard deviation of CF based on monthly values for the year", "# cols = ['year', 'fuel type', '1-hr ramp rate', '3-hr ramp rate',\n# 'efficiency', 'efficiency std', 'CF', 'CF std']\n\n# #Add index of power plant IDs\n# df = pd.DataFrame(columns=cols)\n\nfilename = 'Cluster_Data.csv'\npath = '../Clean Data'\nfullpath = os.path.join(path, filename)\n\ncluster_df = pd.read_csv(fullpath)", "missing '1-hr ramp rate'", "cluster_df[cluster_df.plant_id==127]\n\ncluster_df[cluster_df.plant_id==3466]\n\ncluster_df[cluster_df.plant_id==3584]\n\ncluster_df['fuel_type'].unique()", "Filter out non-fossil plants", "fossil_codes = ['SUB', 'LIG', 'NG', 'DFO', 'PC']\nfossil_df = cluster_df.loc[cluster_df['fuel_type'].isin(fossil_codes)]\n\nfossil_df.describe()\n\n# Unique plants\nlen(fossil_df.dropna().loc[:,'plant_id'].unique())\n\nsns.distplot(fossil_df['capacity'].dropna())\n\nfossil_with_ramp = fossil_df.dropna()\nsns.distplot(fossil_with_ramp['capacity'])", "Start classifying only with all nan values dropped", "from sklearn.cluster import KMeans\nfrom sklearn.metrics import silhouette_samples, silhouette_score, calinski_harabaz_score\nfrom sklearn import preprocessing\n\nfossil_with_ramp.columns\n\nsns.pairplot(fossil_with_ramp, hue='fuel_type', vars=[u'capacity', u'capacity_factor', u'efficiency',\n u'ramp_rate'])", "The code below begins by scaling the X-vector data to mean 0 and a standard variance. It then loops through a wide range of k values for the number of clusters (3 to 14), calculating the labels for each plant, the Calinski Harabaz score, and the Silhouette score for each value of k.", "cluster_data = pd.DataFrame(index=range(3,15), columns=['n_clusters', 'score', 'silhouette'])\ncluster_labels = {}\n\nX = fossil_with_ramp[['capacity', 'capacity_factor', 'efficiency', 'ramp_rate']]\nX_scaled = preprocessing.StandardScaler().fit_transform(X)\n\nfor idx, n_clusters in enumerate(range(3,15)):\n cluster_data.loc[n_clusters, 'n_clusters'] = n_clusters\n clusterer = KMeans(n_clusters, random_state=42)\n\n\n # fit_clusters = clusterer.fit(X)\n# cluster_labels = clusterer.fit_predict(X)\n cluster_labels[n_clusters] = clusterer.fit_predict(X_scaled)\n \n # http://scikit-learn.org/stable/modules/clustering.html#calinski-harabaz-index\n # The score is higher when clusters are dense and well separated\n# score[idx] = metrics.calinski_harabaz_score(X, cluster_labels)\n cluster_data.loc[n_clusters, 'score'] = calinski_harabaz_score(X_scaled, cluster_labels[n_clusters])\n \n# silhouette[idx] = silhouette_score(X, cluster_labels)\n cluster_data.loc[n_clusters, 'silhouette'] = silhouette_score(X_scaled, cluster_labels[n_clusters])\n \n# print 'For ', n_clusters, ' clusters, the average silhouette score is :', silhouette[idx], \\\n# ' and the score is :', score[idx]", "The two score values are plotted below. They don't agree exactly on the optimal number of centers, but it looks like 6 or 8 is probably best.", "fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8,3))\ncluster_data.plot(y='score', ax=ax1)\n# ax1.plot(range(3,15), cluster_data['score'])\nax1.set_title('Calinski Harabaz score\\nHigher is better')\ncluster_data.plot(y='silhouette', ax=ax2)\n# ax2.plot(range(3,15), silhouette)\nax2.set_title('Silhouette score\\nLower is better')", "The integer appended to each of the cluster_id column labels is equal to the number of clusters.", "for n_clusters in cluster_labels.keys():\n fossil_with_ramp.loc[:,'cluster_id_{}'.format(n_clusters)] = cluster_labels[n_clusters]\n\nfossil_with_ramp.head()\n\nsns.pairplot(fossil_with_ramp, hue='cluster_id_6', vars=[u'capacity', u'capacity_factor', u'efficiency',\n u'ramp_rate'])", "Some basic information about each of the clusters\nI've created a bunch of tables and figures to better understand each of the clusters (k=6). The tables show mean, count, and variance. The first figure shows the number of power plants in each group (remember that each power plant is included for every year it operates and data is available). The second looks at the size (capacity). Finally, there is a figure with facets for mean Capacity, Capacity Factor, Efficiency, and Ramp Rate.", "drop_columns = ['cluster_id_{}'.format(i) for i in [3,4,5,7,8,9,10,11,12,13,14]]\ngrouped = fossil_with_ramp.drop(drop_columns, axis=1).groupby(['cluster_id_6', 'fuel_type'])\n\ngrouped.mean()\n\ngrouped.count()\n\ngrouped.std()\n\nsns.countplot('cluster_id_6', hue='fuel_type', data=fossil_with_ramp)\nplt.title('Count of plants in each cluster')\n\nsns.barplot('cluster_id_6', 'capacity', data=fossil_with_ramp, hue='fuel_type',\n estimator=sum)\nplt.title('Total Capacity of plants in each cluster')\n\n# data = fossil_with_ramp.drop(drop_columns, axis=1)\nfig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2, figsize=(10,8))\nsns.barplot('cluster_id_6', 'capacity', data=fossil_with_ramp, hue='fuel_type', ax=ax1)\nax1.set_title('Mean Capacity')\nsns.barplot('cluster_id_6', 'capacity_factor', data=fossil_with_ramp, hue='fuel_type', ax=ax2)\nax2.set_title('Mean Capacity Factor')\nsns.barplot('cluster_id_6', 'efficiency', data=fossil_with_ramp, hue='fuel_type', ax=ax3)\nax3.set_title('Mean Efficiency')\nsns.barplot('cluster_id_6', 'ramp_rate', data=fossil_with_ramp, hue='fuel_type', ax=ax4)\nax4.set_title('Mean Ramp Rate')\n# sns.barplot('Climate', 'HDD65', data=house, ax=ax2)\n\n# # Shrink the point sizes (scale), change the estimator from mean to median\n# sns.pointplot('Climate', 'HDD65', data=house, scale=0.7, estimator=np.median, ax=ax3)\n\n# # Adjust the bandwidth (smoothing)\n# sns.violinplot('Climate', 'HDD65', data=house, ax=ax4, bw=0.4)\n\nplt.tight_layout()", "What to do with these results\nOnce we verify that the input data are correct, we can export a df with columns year, plant_id, and cluster.\n|year|plant_id|cluster|\n|--|--|--|\nImport this data into the notebook Determine marginal units" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
slimn/Data-Analyst
P1-Analyze the Stroop effect/Perceptual Phenomenon.ipynb
gpl-2.0
[ "Background Information\n1. What is our independent variable? What is our dependent variable?\nIndependent variable\nlist of words\n\ndependent variable\nreaction time to name the ink colors\n\n2. What is an appropriate set of hypotheses for this task? What kind of statistical test do you expect to perform? Justify your choices.\nHypotheses\nTime to name the ink colors of congruent words less than time to name the ink colors of incongruent words.\n\nNull hypothesis(H0) : Time to name the ink colors of congruent words and incongruent words are equaivalent.\n Alternative hypothesis(1) : Time to name the ink colors of congruent words less than incongruent words.\nStaistical test\ntwo-sample t-test.", "%matplotlib inline\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.stats import ttest_ind\npath = '/Users/Slimn/Desktop/Work/Project/Udacity/NanoDegree/P1/stroopdata.csv'\ndf = pd.read_csv(path)\nprint df", "3. Report some descriptive statistics regarding this dataset. Include at least one measure of central tendency and at least one measure of variability.\nCongruent Incongruent\n\nMean 14.051125 22.01591667\nStandard Error 0.726550901 0.979195185\nMedian 14.3565 21.0175\nMode #N/A #N/A\nStandard Deviation 3.559357958 4.797057122\nSample Variance 12.66902907 23.01175704\nKurtosis -0.205224823 2.688900198\nSkewness 0.416899874 1.547590026\nRange 13.698 19.568\nMinimum 8.63 15.687\nMaximum 22.328 35.255\nSum 337.227 528.382\nCount 24 24\nConfidence Level(95.0%) 1.50298505 2.025619571", "summary = df.describe()\nsummary.transpose()", "4. Provide one or two visualizations that show the distribution of the sample data. Write one or two sentences noting what you observe about the plot or plots.", "#%matplotlib inline\n#plt.figure()\nax = df.boxplot()\nplt.title('Measure time takes to name the ink colors')\n#plt.figure()\n#plt.legend()", "From the plot, \nmean time for congruent word ~14.\nmean time for congruent word ~22.\nthe incongruent word have two sample outlier, this data set need to verify.\nparticipance taken short time to say for congruent word condition.\n5. Now, perform the statistical test and report your results. What is your confidence level and your critical statistic value? Do you reject the null hypothesis or fail to reject it? Come to a conclusion in terms of the experiment task. Did the results match up with your expectations?", "# Use scipy.stats.ttest_ind.\n#t_stat, p_val = ttest_ind(df['Congruent'], df['Incongruent'], equal_var=False)\nt_stat, p_val = ttest_ind(df['Congruent'], df['Incongruent'])\nprint \"t_stat \", t_stat\nprint \"P-value \", p_val\n", "Statistic Test\ncritical statistic value = 0.05\nconfidential level = 0.95\np_value < 0.05 \nreject null hypothesis \nCondclusion.\nAn independent-samples t-test was conducted to compare time to name ink color \nof congruent words and incongruent words conditions.\nThere was a significant difference in taken time for congruent words (Mean=14.0511, STD=3.5594) \nand incongruent words (Mean=22.01591, STD=4.7971) conditions; t(24)= -6.5323, p = 6.5102e-08\nThese results show that ink color really does have an effect to human brain befoe alphabet.\n6. Optional: What do you think is responsible for the effects observed? Can you think of an alternative or similar task that would result in a similar effect? Some research about the problem will be helpful for thinking about these two questions!\nThe human brain is reconize color before alphabet.\nThe similar task is Search engines replacing our memory.\nResearch name : Google Effects on Memory: Cognitive Consequences of Having Information at Our Fingertips\nby Betsy Sparrow, Jenny Liu, Daniel M. Wegner\nReference\nhttps://en.wikipedia.org/wiki/Stroop_effect\nhttp://pandas.pydata.org/pandas-docs/version/0.17.0/api.html\nhttp://docs.scipy.org/doc/scipy/reference/stats.html\nhttp://www.randalolson.com/2012/08/06/statistical-analysis-made-easy-in-python/\nhttp://www.abc.net.au/science/articles/2011/07/15/3270222.htm\nhttp://www.sciencemag.org/content/333/6043/776" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
AllenDowney/ThinkBayes2
examples/shower.ipynb
mit
[ "The Shower Problem\nThis notebook is one of the examples in the second edition of Think Bayes.\nClick here to run this notebook on Colab.\nRecently I posted an article about an optimal (maybe) strategy for plugging in a USB connector.\nI tweeted about the article and got a reply from Cameron Davidson-Pilon that pointed me to a related problem posted by Chris Said:\n\nThe Shower Problem\nHere’s the setup: You’re at your friend’s place and you need to take a shower. The shower knob is unlabeled. One direction is hot and the other direction is cold, and you don’t know which is which.\nYou turn it to the left. It’s cold. You wait.\nAt what point do you switch over to the right?\nLet’s make this more explicit.\n\n\nYour goal is to find a policy that minimizes the expected amount of time it takes to get hot water flowing out of the shower head. To simplify things, assume that the water coming out of the head is either hot or cold, and that the lukewarm transition time is effectively zero.\n\n\nYou know that the shower has a Time-To-Hot constant called $\\tau$. This value is defined as the time it takes for hot water to arrive, assuming you have turned the knob to the hot direction and keep it there.\n\n\nThe constant is a fixed property of the shower and is sampled once from a known distribution. You have certain knowledge of the distribution, but you don’t know $\\tau$.\n\n\nThe shower is memoryless, such that every time you turn the knob to the hot direction, it will take $\\tau$ seconds until the hot water arrives, regardless of your prior actions. Every time you turn it to the cold direction, only cold water will come out.\n\n\nI don’t know how to solve this problem. But as a starting point I realize it’s possible to keep track of the probability that the hot direction is to the left or to the right. In the animation above, the probability that the hot direction is to the right is just the unexplored white area under the right curve, divided by the total unexplored white area of both curves.\nBut how do you turn that into a policy for exploring the space? Does anybody know?\n\nChris provides a sample of 20,000 values for the time constant and the additional information that the distribution of $tau$ is Weibull with parameters $\\lambda=50$ and $k=1.5$.\nAlso, Chris keeps track of the leader board for best strategy. At the moment, the leader is Cameron Davidson-Pilon himself, with an average duration of 111.365 seconds.\nSo let's see if we can do any better. As in the previous problem, I'm going to start with the conjecture that the optimal strategy is to stay on the first side until the probability that you are on the right side drops below some threshold, then switch to the second side and stay until the probability drops below the same threshold.\nI am not confident that this strategy is actually optimal, but let's see if it does a decent job.\nSuppose we try on the first side for $t_1$ seconds and then try on the second side for $t_2$ seconds. At that point, we can use the survival function of the Weibull distribution to compute the log odds that the second side is correct:\n$$ LO = \\left( \\frac{t_1}{\\lambda} \\right)^k - \\left( \\frac{t_2}{\\lambda} \\right)^k$$\nNow suppose we want to stay on the current side until the log odds we're on the wrong side exceeds some threshold, x, and the time we've spent on the other side is other. \nThe following function computes how long we should stay.", "def wait_time(x, other, lam, k):\n term = (other / lam) ** k\n total = lam * (x + term) ** (1/k)\n return total", "Starting with $t_1=0$ and $t_2=0$, when $x=1$, the time we wait turns out to be $\\lambda$.", "right = 0\nleft = 0\nx = 1\nlam = 50\nk = 1.5\n\nright = wait_time(x, left, lam, k)\nright", "Now suppose we've waited 50 seconds on the left and we switch to the right. How long should we stay?", "left = wait_time(x, right, lam, k)\nleft", "And now if we've spent left second on the left and switch to the right, how long should we stay.", "right = wait_time(x, left, lam, k)\nright", "But all of that is based on the assumption that x=1. Let's see what happens to the average total time as we vary x.\nSimulation\nThe following function simulates the strategy I just outlined.\nIt takes as parameters:\n\ncorrect: A Boolean indicating if we're on the right side.\ntau: The actual time to get hot water.\nthis: The longest time we've tried on the current side.\nthat: The longest time we've tried on the other side..\nlam: The scale parameter for the distribution of time until success.\nk: The shape parameter for the distribution of time until success.\nx: The threshold for the posterior log odds.\ntrace: A list that indicates how much time we have spent, so far, trying and flipping.\n\nIt runs the simulation and returns a sequence of waiting times. The sum of this sequence is the total time it took to connect.", "def simulate(correct, tau, this, that, lam, k, x, trace):\n if len(trace) > 20:\n return 'Too many recursions'\n \n # print(correct, tau, this, that, lam, k, x, trace)\n \n # figure out the maximum time we should try before flipping\n wait = wait_time(x, that, lam, k)\n \n # if we're on the correct side, see if we succeed before time's up\n if correct and tau < wait:\n # if so, update and return the trace\n return trace + [tau]\n \n # if time expired, add the wait time to the trace \n # and make a recursive call to continue the simulation\n return simulate(not correct, tau, that, wait, lam, k, x, trace + [wait])", "Here are a few test runs with x=0.5 and various values of correct and tau.", "x = 0.5\nsimulate(True, 20, 0, 0, lam, k, x, [])\n\nsimulate(False, 20, 0, 0, lam, k, x, [])\n\nsimulate(True, 40, 0, 0, lam, k, x, [])\n\nsimulate(False, 40, 0, 0, lam, k, x, [])\n\nsimulate(True, 60, 0, 0, lam, k, x, [])\n\nsimulate(False, 60, 0, 0, lam, k, x, [])\n\nsimulate(True, 120, 0, 0, lam, k, x, [])\n\nsimulate(False, 120, 0, 0, lam, k, x, [])", "Let's get the test data from Chris.", "import os\n\nif not os.path.exists('shower_problem_tau_samples.csv'):\n !wget https://gist.github.com/csaid/a57c4ebaa1c7b0671cdc9692638ea4c4/raw/ad1709938834d7bc88b62ff0763733502eb6a329/shower_problem_tau_samples.csv\n\nimport pandas as pd\n\ntest_set = pd.read_csv('shower_problem_tau_samples.csv')\ntest_set.head()\n\ntest_set.describe()", "The following function uses the samples to run simulations and returns the average total time.", "import numpy as np\n\ndef run_simulations(samples, lam, k, x):\n res = []\n for i, (direction, tau) in samples.iterrows():\n # print(direction, tau)\n trace = simulate(direction, tau, 0, 0, lam, k, x, [])\n res.append((len(trace), sum(trace)))\n \n return np.transpose(res)", "I'll train the algorithm with a different sample from the same distribution.", "from scipy.stats import weibull_min\n\ntrain_set = test_set.copy()\ntrain_set['tau'] = weibull_min.rvs(k, scale=lam, size=20000)\n\ntrain_set.describe()", "Here's a test run with x=1.", "x = 1\nlengths, totals = run_simulations(train_set, lam, k, x)", "On average it takes about two tries and 116 seconds.", "lengths.mean()\n\ntotals.mean()", "Optimization\nNow let's see how the average duration varies as we sweep through a range of values for the threshold probability, r:", "xs = np.linspace(1, 3, 20)\nxs\n\nres = []\nfor x in xs:\n lengths, totals = run_simulations(train_set, lam, k, x)\n res.append((x, totals.mean()))\n\nfrom statsmodels.nonparametric.smoothers_lowess import lowess\n\ndef make_lowess(series):\n \"\"\"Use LOWESS to compute a smooth line.\n\n series: pd.Series\n\n returns: pd.Series\n \"\"\"\n endog = series.values\n exog = series.index.values\n\n smooth = lowess(endog, exog)\n index, data = np.transpose(smooth)\n\n return pd.Series(data, index=index)\n\ndef plot_series_lowess(series, color):\n \"\"\"Plots a series of data points and a smooth line.\n\n series: pd.Series\n color: string or tuple\n \"\"\"\n series.plot(lw=0, marker='o', color=color, alpha=0.5)\n smooth = make_lowess(series)\n smooth.plot(label='_', color=color)", "Here's what the results look like.", "import matplotlib.pyplot as plt\n\nrs, ts = np.transpose(res)\nseries = pd.Series(ts, rs)\n\nplot_series_lowess(series, 'C1')\n\nplt.xlabel(\"Threshold log odds where you flip (x)\")\nplt.ylabel(\"Average total duration (seconds)\");", "It looks like the optimal value of x is about 2.\nTesting\nNow let's run the simulations with the optimal value of x and see what the average total time is for the test data.", "lengths, totals = run_simulations(test_set, lam, k, 2)", "The average total time is about 102 seconds, so it looks like this strategy is better than the current leader (assuming I haven't made a mistake in the simulation).", "totals.mean()", "Here's the distribution of total time, represented as a CDF.", "try:\n import empiricaldist\nexcept ImportError:\n !pip install empiricaldist\n\nfrom empiricaldist import Cdf\n\nCdf.from_seq(totals).plot(lw=2)\n\nplt.xlabel('Total time to connect (seconds)')\nplt.ylabel('CDF')\nplt.title('Distribution of total time to connect');\n\nnp.percentile(totals, 90)", "And here's the distribution of flips.", "from empiricaldist import Pmf\n\npmf1 = Pmf.from_seq(lengths-1)\npmf1.bar(alpha=0.7)\n\nplt.xlabel('How many times you have to flip')\nplt.ylabel('PMF')\nplt.title('Distribution of number of flips');", "Fortunately, we don't have to flip more than once very often.\nIf you like this article, you might also like the second edition of Think Bayes.\nCopyright 2021 Allen Downey\nCode: MIT License\nText: Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
amcdawes/QMlabs
Lab 2 - Quantum States - Blank.ipynb
mit
[ "Quantum states\nUseful for working examples and problems with photon quantum states. You may notice some similarity to the Jones Calculus ;-)", "from numpy import sqrt\nfrom qutip import *", "These are the polarization states:", "H = Qobj([[1],[0]])\nV = Qobj([[0],[1]])\nP45 = Qobj([[1/sqrt(2)],[1/sqrt(2)]])\nM45 = Qobj([[1/sqrt(2)],[-1/sqrt(2)]])\nR = Qobj([[1/sqrt(2)],[-1j/sqrt(2)]])\nL = Qobj([[1/sqrt(2)],[1j/sqrt(2)]])", "Devices:\nHWP - Half-wave plate axis at $\\theta$ to the horizontal\nLP - Linear polarizer, axis at $\\theta$\nQWP - Quarter-wave plate, axis at $\\theta$\nNote, these are functions so you need to call them with a specific value of theta.", "def HWP(theta):\n return Qobj([[cos(2*theta),sin(2*theta)],[sin(2*theta),-cos(2*theta)]]).tidyup()\n\ndef LP(theta):\n return Qobj([[cos(theta)**2,cos(theta)*sin(theta)],[sin(theta)*cos(theta),sin(theta)**2]]).tidyup()\n\ndef QWP(theta):\n return Qobj([[cos(theta)**2 + 1j*sin(theta)**2,\n (1-1j)*sin(theta)*cos(theta)],\n [(1-1j)*sin(theta)*cos(theta),\n sin(theta)**2 + 1j*cos(theta)**2]]).tidyup()", "Example 1) Check that the $|H\\rangle$ state is normalized", "H.dag()*H\n\npsi = Qobj([[1+1j], [2-1j]])\npsi\n\npsi.dag()\n\npsi.dag().dag()", "1) verify that the $|V\\rangle$ state is normalized\n2) Verify that the $|H\\rangle$ and $|V\\rangle$ states are orthogonal. Repeat for the other pairs of states.\n3) Find the horizontal component $c_H$ of the state $\\psi = \\frac{1}{\\sqrt{5}}|H\\rangle + \\frac{2}{\\sqrt{5}}|V\\rangle$\n4) Verify Eq. (3.18)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
jphall663/GWU_data_mining
02_analytical_data_prep/src/py_part_2_feature_extraction.ipynb
apache-2.0
[ "License\n\nCopyright (C) 2017 J. Patrick Hall, jphall@gwu.edu\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nSimple feature extraction - Pandas and Scikit-Learn\nImports", "import pandas as pd # pandas for handling mixed data sets \nimport numpy as np # numpy for basic math and matrix operations\nimport matplotlib.pyplot as plt # pyplot for plotting\n\n# scikit-learn for machine learning and data preprocessing\nfrom sklearn.decomposition import PCA", "Perform basic feature extraction\nCreate a sample data set", "scratch_df = pd.DataFrame({'x1': [1, 2.5, 3, 4.5],\n 'x2': [1.5, 2, 3.5, 4]})\n\nscratch_df", "Compress x1 and x2 into a single principal component", "pca = PCA(n_components=1)\n\npca.fit(scratch_df)", "Principal components analysis finds vectors that represent that direction(s) of most variance in a data set. These are called eigenvectors.", "print('First eigenvector = ', pca.components_)", "Principal components are the projection of the data onto these eigenvectors. Principal components are usually centered around zero and each principal component is uncorrelated with all the others, i.e. principal components are orthogonal to one-another. Becuase prinicipal components represent the highest variance dimensions in the data and are not correlated with one another, they do an excellent job summarizing a data set with only a few dimensions (e.g. columns) and PCA is probably the most popular feature extraction technique.", "scratch_df['Centered_PC1'] = pca.transform(scratch_df[['x1', 'x2']])\nscratch_df['Non_centered_PC1'] = pca.transform(scratch_df[['x1', 'x2']] + pca.mean_)\nscratch_df['PC1_x1_back_projection'] = pd.Series(np.arange(1,8,2)) * pca.components_[0][0]\nscratch_df['PC1_x2_back_projection'] = pd.Series(np.arange(1,8,2)) * pca.components_[0][1]\nscratch_df\n\nx = plt.scatter(scratch_df.x1, scratch_df.x2, color='b')\npc, = plt.plot(scratch_df.PC1_x1_back_projection, scratch_df.PC1_x2_back_projection, color='r')\nplt.legend([x, pc], ['Observed data (x)', 'First principal component projection'], loc=4)\nplt.xlabel('x1')\nplt.ylabel('x2')\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
cfobel/colonists
colonists/notebooks/Colonists map data structures.ipynb
gpl-3.0
[ "%matplotlib inline\n\nimport numpy as np\nimport pandas as pd\n\nfrom colonists.hex_grid import HexGrid\nfrom colonists.rules import (get_hexes, assign_region_hex_indexes,\n shuffle_regions, mark_port_nodes, get_hex_roll_order,\n assign_collect_index, place_camp, get_empty_node_contents)\nfrom colonists.user_interface import plot_hexes", "Initial hex layout\n\nArrange land hexes in random order.\nArrange port hexes in random order, interleaved with sea hexes\n (one sea hex between each pair of port hexes).", "# ## Create hex grid ##\nhex_grid = HexGrid(8, 17, .165, 1.75)\n\nnp.random.seed(2)\n\n# ## Set up board on grid ##\n# - Assign region (land, port, sea) and terrain type (clay, sheep, ore, wheat, wood,\n# desert, clay port, sheep port, ore port, wheat port, wood port, 3:1 port, sea)\n# to each hex.\ndf_hexes = get_hexes(hex_grid.size)\n# - Shuffle hexes within regions.\nshuffle_regions(df_hexes, inplace=True)\n# - Mark nodes based on the corresponding trade rules to apply. If the nodes do\n# not correspond to any special trading privelege, assign a value of `NaN`.\ndf_nodes = mark_port_nodes(hex_grid.df_nodes, hex_grid.df_hex_paths,\n hex_grid.df_hex_links, df_hexes)\n# - Assign an ordered 0-based index to each hex within each region.\nassign_region_hex_indexes(df_hexes, inplace=True)\n\n# ### Assign collect index (i.e., dice number) to each *land* hex. ###\n#\n# - Select spiral direction and starting position for collect index assignment.\nclockwise = np.random.randint(2)\nshift = np.random.randint(0, 6)\nhex_roll_order = get_hex_roll_order(shift=shift, clockwise=clockwise)\n# - Assign collect index (i.e., dice number) to each *land* hex.\nassign_collect_index(df_hexes, hex_roll_order, inplace=True)\n\nimport matplotlib.pyplot as plt\n\nfig, axes = plt.subplots(1, 2, figsize=(10, 10))\n# Color each hex according to *region* type, label each hex by index.\nplot_hexes(df_nodes, df_hexes, hex_grid.df_hex_paths, colorby='region', axis=axes[0])\n# Color each hex according to *terrain* type, label each hex by index.\nplot_hexes(df_nodes, df_hexes, hex_grid.df_hex_paths, colorby='terrain', axis=axes[1])\n\nfig, axis = plt.subplots(figsize=(8, 10))\nplot_hexes(df_nodes, df_hexes, hex_grid.df_hex_paths, colorby='terrain',\n axis=axis, labelby='collect_index') \naxis.set_axis_off()\nselected = hex_grid.df_nodes.loc[[60]]\naxis.plot(selected.x.values, selected.y.values, marker='s',\n markersize=12, linestyle='none')\npass", "Place a camp [done]\nPlace a camp at a selected node, subject to the following conditions:\n\nSelected node cannot already be occupied by a camp or village.\nAny immediate neighbour (i.e., only a single edge away) of the selected\n node cannot already be occupied by a camp or village.", "selected = 60\ndf_node_contents = get_empty_node_contents(hex_grid.df_nodes)\ndf_result = place_camp(hex_grid.df_nodes, hex_grid.df_edges, df_hexes,\n hex_grid.df_hex_links, selected, df_node_contents)\ndf_node_contents.loc[selected]\ndf_result.loc[selected]" ]
[ "code", "markdown", "code", "markdown", "code" ]
rdhyee/diversity-census-calc
03_02_Displaying_Census_URLs.ipynb
apache-2.0
[ "%matplotlib inline\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pandas import DataFrame, Series, Index\nimport pandas as pd\n\n# check that CENSUS_KEY is defined\n\nimport settings\nassert settings.CENSUS_KEY is not None", "The census documentation has example URLs but needs your API key to work. In this notebook, we'll use the IPython notebook HTML display mechanism to help out.", "# http://api.census.gov/data/2010/sf1/geo.html\n\n\nfrom IPython.core.display import HTML\n\nHTML(\"<iframe src='http://api.census.gov/data/2010/sf1/geo.html' width='800px'/>\")\n\n%%HTML\n<b>hi there</b>\n\ntry:\n from urllib.parse import urlparse, urlencode, parse_qs, urlunparse\nexcept ImportError:\n from urlparse import urlparse, parse_qs, urlunparse\n from urllib import urlencode\n \nfrom IPython.core.display import HTML\n\ndef add_census_key(url, api_key=settings.CENSUS_KEY):\n \"\"\"Take an input example Census API call and a key parameter\"\"\"\n\n pr = urlparse(url)\n \n # we're going to modify the query, which is the 5th element in the tuple (index 4)\n pr1 = list(pr)\n \n # convert pr.query from string to dict\n # see http://stackoverflow.com/a/10233141/7782 for meaning of doseq\n pr_query = parse_qs(pr.query)\n pr_query[\"key\"]= api_key\n\n pr1[4] = urlencode(pr_query, doseq=True)\n \n return urlunparse(pr1)\n\n\ndef c_url (url, title=None, api_key=settings.CENSUS_KEY):\n url_with_key = add_census_key(url, api_key)\n if title is None:\n title = url\n return HTML(\"\"\"<a href=\"{url}\">{title}</a>\"\"\".format(url=url_with_key, title=title))\n\n#add_census_key(\"http://api.census.gov/data/2010/sf1?get=P0010001&for=county:*\")\nc_url(\"http://api.census.gov/data/2010/sf1?get=NAME,P0010001&for=state:*\")", "Scraping the examples", "import requests\nfrom lxml.html import parse, fromstring\n\nurl = \"http://api.census.gov/data/2010/sf1/geo.html\"\nr = requests.get(url).content\ndoc = fromstring(r)\n\nrows = doc.xpath(\"//table/tr\")\n\n# first row is the header\n\nheaders = [col.text for col in rows[0].findall('th')]\nheaders\n\n# next rows are the census URL examples\n\n\nrow = rows[1]\ncols = row.findall('td')\n\n# col[s0]: Summmary Level\n\nprint (cols[0].text)\n\n# cols[1]: Description\n\nprint (cols[1].text)\n\nfrom itertools import islice\nfrom lxml.html import parse\n\n# let's actually now decorate the urls\n\ndef decorated_parse_examples(examples, api_key=settings.CENSUS_KEY):\n for row in examples:\n new_row = row.copy()\n # need to change URLs\n \n example_urls_col = new_row[headers[2]]\n #urls_with_key = [add_census_key(url) for url in example_urls_col]\n \n new_row[headers[2]] = \"<br/>\".join(\n [\"\"\"<a href=\"{url_with_key}\">{url}</a>\"\"\".format(\n url=url, \n url_with_key=add_census_key(url)\n ) for url in example_urls_col\n ])\n \n yield new_row\n \ndef parse_urls_col(col):\n # http://stackoverflow.com/a/15074386/7782\n return [child for child in col.itertext()]\n\ndef parse_census_examples():\n\n url = \"http://api.census.gov/data/2010/sf1/geo.html\"\n doc = parse(url)\n\n rows = doc.xpath(\"//table/tr\")\n\n # first row is the header\n\n headers = [col.text for col in rows[0].findall('th')]\n\n for row in rows[1:]:\n cols = row.findall('td')\n yield ({headers[0]:cols[0].text, \n headers[1]:cols[1].text, \n headers[2]:parse_urls_col(cols[2])})\n\n#parsed_examples = list(islice(parse_census_examples(),None))\nparsed_examples = parse_census_examples()\n\n# let's redisplay the table with \n\nfrom IPython.display import HTML\nfrom jinja2 import Template\n\nURLS_TEMPLATE= \"\"\"\n <table>\n <tr>\n {% for header in headers %}\n <th>{{header}}</th>\n {% endfor %}\n </tr>\n {% for row in rows %}\n <tr>\n {% for header in headers %}\n <td>{{row[header]}}</td>\n {% endfor %}\n </tr>\n {% endfor %}\n </table>\"\"\"\n \ntemplate = Template(URLS_TEMPLATE)\nHTML(template.render(headers=headers, rows=decorated_parse_examples(parsed_examples))) " ]
[ "code", "markdown", "code", "markdown", "code" ]
biof-309-python/BIOF309-2016-Fall
Week_05/Week05 - 02 - Functions.ipynb
mit
[ "Functions\n\nDefinition:\nFunction: (1) In programming, a named section of a program that performs a specific task. In this sense, a function is a type of procedure or routine. Some programming languages make a distinction between a function, which returns a value, and a procedure, which performs some operation but does not return a value.\nMost programming languages come with a prewritten set of functions that are kept in a library. You can also write your own functions to perform specialized tasks.\n(2) The term function is also used synonymously with operation and command. For example, you execute the delete function to erase a word.\n\nFunctions are an essential ingredient of all programs, large and small, and serve as our primary medium to express computational processes in a programming language. Fundamentally, the qualities of good functions all reinforce the idea that functions are abstractions.\n\nEach function should have exactly one job. That job should be identifiable with a short name and characterizable in a single line of text. Functions that perform multiple jobs in sequence should be divided into multiple functions.\nDon't repeat yourself is a central tenet of software engineering. The so-called DRY principle states that multiple fragments of code should not describe redundant logic. Instead, that logic should be implemented once, given a name, and applied multiple times. If you find yourself copying and pasting a block of code, you have probably found an opportunity for functional abstraction.\nFunctions should be defined generally. Squaring is not in the Python Library precisely because it is a special case of the pow function, which raises numbers to arbitrary powers.\n\nSource\nIt may not be clear why it is worth the trouble to divide a program into functions. There are several reasons:\n\nCreating a new function gives you an opportunity to name a group of statements, which makes your program easier to read and debug.\nFunctions can make a program smaller by eliminating repetitive code. Later, if you make a change, you only have to make it in one place.\nDividing a long program into functions allows you to debug the parts one at a time and then assemble them into a working whole.\nWell-designed functions are often useful for many programs. Once you write and debug one, you can reuse it.\n\nThese guidelines improve the readability of code, reduce the number of errors, and often minimize the total amount of code written. Decomposing a complex task into concise functions is a skill that takes experience to master. Fortunately, Python provides several features to support your efforts.\nSource\nHow do we square a number? (Lets pretend we do not all have calculators, phones or computers to do this for us!)", "6 * 6", "The square is just a number multiplied by itself. In python the code that as:", "6**2", "We can also assign this calculation to a varaible:", "a = 6**2", "What happens if we need to square different numbers in our hypothetical program?\n\nCut and paste and change each time?", "a = 6**2\nb = 7**2\n\nprint(a)\nprint(b)", "What if I want to square a varaible x and set it equal to a? What if we want to square the variable y and set it equal to b?\nThis takes the code to a single level of abstraction", "x = 6\ny = 7\n\na = x**2\nb = y**2\n\nprint(a)\nprint(b)", "We see that we are executing the same code but with a different input, x or y. We have abstracted the code a little to introduce the varaibles x and y. \nHow do we convert this to a function?\nWhat is the pattern we see in the code above?\n\n\ninitialize a varaible\n x = 6\n\n\nsquare the varaible and set the result equal to another variable\n a = x**2\n\n\nprint the resulting varaible\n print(a)\n\n\nWe perform the same pattern again by replacing a with b and x with y.", "x = 6\na = x**2\nprint(a)", "How do we convert this to a function, exactly as it is?\nCreating a Function", "def square_x():\n x = 6\n a = x**2\n print(a)", "def = define for defining function. Give it a name: square_x. because it is a function, also needs open and close () and a colon:\nWhat happens when we run this? \n.\n.\n.\nNothing!\ncalling a function = running it. just defining it is not enough\nFunctions must be called to be run. Lets call the function:\nto call a function, simply print its name and do open and close parentheses", "square_x()", "This function prints the number 36 only. Can call it: 'print 36' if want; need to further abstract to see power of this because have nothing in middle of parentheses (no parameters), this function works. Lets chenge the function to give parameters.\nParameters\nParameters are variables that serve as input to a function. We can extract the x varaible from our code to create a parameter x.", "def square(x):\n # x = 6\n a = x**2\n print(a)", "The parameters are then fed into the functiona t the time of the call:", "square(square(6))", "No longer defining x value within function. now do this outside of function. we now have function that will square any number given to it\nReturning a Value From a Function\nIn our example we are printing the result of our function within hte function. What if we wanted to use the result of our function? We can change the function to return a result and tehn we can save teh result and print the result.", "x = 3 #set \"global\" variable equal to 3\n\ndef square(x): #still have single parameter, x; must define a function before calling it\n a = x**2\n return a #return is a keyword that allows us to return the result we computed earlier (Saves the result of function into variable sqr)\n\nsquare(x)", "Now this function has a huge boost in capability by being called to a variable\nWhat are the Requirements of a Function?\nWhat are the requirements of a function:\n- the def keyword\n- A function name (see PEP8 guidelines for function names here)\n- need open and close parentheses\n- A semicolon \":\" at the end of the def line\n- One or more lines of code in the body\nOptional components:\n- Zero or more parameters\n- Zero or more return variables\n def function_name([parameters]):\n body - some code which must be indented\n [return object(s)]\n\nNote to save functions, would create file like burkes_functions.py, this would be a text file\nWhat if We Need Two or More Parameters?\nLets abstract our function again and enable it to raise any number to any power.", "def square(x, y): #function with 2 parameters\n a = x**y\n return a\n\nsqr = power(6, 2) #must feed in both parameters when call code\nprint(sqr)", "Does teh name square make sense any more?", "def power(x, y): #function with 2 parameters\n a = x**y\n return a\n\nsqr = power(6, 2) #must feed in both parameters when call code\nprint(sqr)\n\ndef power(x, y): #function with 2 parameters\n return x**y\n\nsqr = power(6, 2) #must feed in both parameters when call code\nprint(sqr)", "Setting a Default Value to the Parameter", "def power(x, y=2): #removes necessity of feeding second value in when call function\n a = x**y\n return a\n\nsqr = power(6) #do not need to give value for y because have defaulted it, but can define value if want to automatically override default\nprint(sqr)\n\nhelp(power)", "Describing Functions in Python\nTo define a function use the keyword def, as in:", "def my_function():\n \"\"\"This function prints hello\"\"\" #three double quotes around statement define a doc string\n print(\"hello\")\n return", "Note the use of the docstring, which will allow us to use help function:\n-it can also extend to multiple lines, so be as descriptive as you can", "help(my_function) #help will tell entire description that is supplied in doc string (for ex. could put defaults here)", "Help on function my_function in module main:\nmy_function()\n This function prints hello\n\nReturning More Than One Result", "def power(x, y=2):\n a = x**y\n return a, x, y\n\nsqr, num, power = power(y=6, x=3)\nprint(sqr)\nprint(sqr,num,power)", "Pass by Reference vs Value (can SKIP over this - didn't discuss in class)\nAll parameters (arguments) in the Python language are passed by reference. It means if you change what a parameter refers to within a function, the change also reflects back in the calling function. For example:", "#!/usr/bin/python\n\n# Function definition is here\ndef changeme( mylist ):\n \"This changes a passed list into this function\"\n mylist.append([1,2,3,4]);\n print(\"Values inside the function: \", mylist)\n return\n\n# Now you can call changeme function\nmylist = [10,20,30];\nchangeme( mylist );\nprint(\"Values outside the function: \", mylist)", "Calling Functions within Functions", "def power(x, y=2):\n a = x**y\n return a\n\nsqr = power(power(2)) #i've raised 2 to the second power to get 4 and feeding 4 into power again to get squared\nprint(sqr)", "Functions Checklist:\nSource\nThese may help you as you begin to work with functions:\n\nDid you start your function definition with def?\nDoes your function name have only characters and _ (underscore) characters?\nDid you put an open parenthesis ( right after the function name?\nDid you put your arguments after the parenthesis ( separated by commas?\nDid you make each argument unique (meaning no duplicated names)?\nDid you put a close parenthesis and a colon ): after the arguments?\nDid you indent all lines of code you want in the function four spaces? No more, no less.\nDid you \"end\" your function by going back to writing with no indent (dedenting we call it)?\n\nWhen you run (\"use\" or \"call\") a function, check these things:\n\nDid you call/use/run this function by typing its name?\nDid you put the ( character after the name to run it?\nDid you put the values you want into the parenthesis separated by commas?\nDid you end the function call with a ) character?\n\nPython Best Practices:\n\nAdvice on structure a python program overall" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
xpharry/Udacity-DLFoudation
tutorials/gan_mnist/Intro_to_GANs_Solution.ipynb
mit
[ "Generative Adversarial Network\nIn this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!\nGANs were first reported on in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:\n\nPix2Pix \nCycleGAN\nA whole list\n\nThe idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks as close as possible to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator.\n\nThe general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can foold the discriminator.\nThe output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow.", "%matplotlib inline\n\nimport pickle as pkl\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data')", "Model Inputs\nFirst we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input inputs_real and the generator input inputs_z. We'll assign them the appropriate sizes for each of the networks.", "def model_inputs(real_dim, z_dim):\n inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real') \n inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')\n \n return inputs_real, inputs_z", "Generator network\n\nHere we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.\nVariable Scope\nHere we need to use tf.variable_scope for two reasons. Firstly, we're going to make sure all the variable names start with generator. Similarly, we'll prepend discriminator to the discriminator variables. This will help out later when we're training the separate networks.\nWe could just use tf.name_scope to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also sample from it as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the reuse keyword for tf.variable_scope to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again.\nTo use tf.variable_scope, you use a with statement:\npython\nwith tf.variable_scope('scope_name', reuse=False):\n # code here\nHere's more from the TensorFlow documentation to get another look at using tf.variable_scope.\nLeaky ReLU\nTensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can use take the outputs from a linear fully connected layer and pass them to tf.maximum. Typically, a parameter alpha sets the magnitude of the output for negative values. So, the output for negative input (x) values is alpha*x, and the output for positive x is x:\n$$\nf(x) = max(\\alpha * x, x)\n$$\nTanh Output\nThe generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1. Along with the $tanh$ output, we also need to return the logits for use in calculating the loss with tf.nn.sigmoid_cross_entropy_with_logits.", "def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):\n with tf.variable_scope('generator', reuse=reuse):\n # Hidden layer\n h1 = tf.layers.dense(z, n_units, activation=None)\n # Leaky ReLU\n h1 = tf.maximum(alpha * h1, h1)\n \n # Logits and tanh output\n logits = tf.layers.dense(h1, out_dim)\n out = tf.tanh(logits)\n \n return out, logits", "Discriminator\nThe discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer.", "def discriminator(x, n_units=128, reuse=False, alpha=0.01):\n with tf.variable_scope('discriminator', reuse=reuse):\n # Hidden layer\n h1 = tf.layers.dense(x, n_units, activation=None)\n # Leaky ReLU\n h1 = tf.maximum(alpha * h1, h1)\n \n logits = tf.layers.dense(h1, 1, activation=None)\n out = tf.sigmoid(logits)\n \n return out, logits", "Hyperparameters", "# Size of input image to discriminator\ninput_size = 784\n# Size of latent vector to generator\nz_size = 100\n# Sizes of hidden layers in generator and discriminator\ng_hidden_size = 128\nd_hidden_size = 128\n# Leak factor for leaky ReLU\nalpha = 0.01\n# Smoothing \nsmooth = 0.1", "Build network\nNow we're building the network from the functions defined above.\nFirst is to get our inputs, input_real, input_z from model_inputs using the sizes of the input and z.\nThen, we'll create the generator, generator(input_z, input_size). This builds the generator with the appropriate input and output sizes.\nThen the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as g_model. So the real data discriminator is discriminator(input_real) while the fake discriminator is discriminator(g_model, reuse=True).", "tf.reset_default_graph()\n# Create our input placeholders\ninput_real, input_z = model_inputs(input_size, z_size)\n\n# Build the model\ng_model, g_logits = generator(input_z, input_size)\n# g_model is the generator output\n\nd_model_real, d_logits_real = discriminator(input_real)\nd_model_fake, d_logits_fake = discriminator(g_model, reuse=True)", "Discriminator and Generator Losses\nNow we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, d_loss = d_loss_real + d_loss_fake. The losses will by sigmoid cross-entropys, which we can get with tf.nn.sigmoid_cross_entropy_with_logits. We'll also wrap that in tf.reduce_mean to get the mean for all the images in the batch. So the losses will look something like \npython\ntf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))\nFor the real image logits, we'll use d_logits_real which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter smooth. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like labels = tf.ones_like(tensor) * (1 - smooth)\nThe discriminator loss for the fake data is similar. The logits are d_logits_fake, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.\nFinally, the generator losses are using d_logits_fake, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images.", "# Calculate losses\nd_loss_real = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, \n labels=tf.ones_like(d_logits_real) * (1 - smooth)))\nd_loss_fake = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, \n labels=tf.zeros_like(d_logits_real)))\nd_loss = d_loss_real + d_loss_fake\n\ng_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,\n labels=tf.ones_like(d_logits_fake)))", "Optimizers\nWe want to update the generator and discriminator variables separately. So we need to get the variables for each part build optimizers for the two parts. To get all the trainable variables, we use tf.trainable_variables(). This creates a list of all the variables we've defined in our graph.\nFor the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with generator. So, we just need to iterate through the list from tf.trainable_variables() and keep variables to start with generator. Each variable object has an attribute name which holds the name of the variable as a string (var.name == 'weights_0' for instance). \nWe can do something similar with the discriminator. All the variables in the discriminator start with discriminator.\nThen, in the optimizer we pass the variable lists to var_list in the minimize method. This tells the optimizer to only update the listed variables. Something like tf.train.AdamOptimizer().minimize(loss, var_list=var_list) will only train the variables in var_list.", "# Optimizers\nlearning_rate = 0.002\n\n# Get the trainable_variables, split into G and D parts\nt_vars = tf.trainable_variables()\ng_vars = [var for var in t_vars if var.name.startswith('generator')]\nd_vars = [var for var in t_vars if var.name.startswith('discriminator')]\n\nd_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)\ng_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)", "Training", "!mkdir checkpoints\n\nbatch_size = 100\nepochs = 100\nsamples = []\nlosses = []\n# Only save generator variables\nsaver = tf.train.Saver(var_list=g_vars)\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for e in range(epochs):\n for ii in range(mnist.train.num_examples//batch_size):\n batch = mnist.train.next_batch(batch_size)\n \n # Get images, reshape and rescale to pass to D\n batch_images = batch[0].reshape((batch_size, 784))\n batch_images = batch_images*2 - 1\n \n # Sample random noise for G\n batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))\n \n # Run optimizers\n _ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})\n _ = sess.run(g_train_opt, feed_dict={input_z: batch_z})\n \n # At the end of each epoch, get the losses and print them out\n train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})\n train_loss_g = g_loss.eval({input_z: batch_z})\n \n print(\"Epoch {}/{}...\".format(e+1, epochs),\n \"Discriminator Loss: {:.4f}...\".format(train_loss_d),\n \"Generator Loss: {:.4f}\".format(train_loss_g)) \n # Save losses to view after training\n losses.append((train_loss_d, train_loss_g))\n \n # Sample from generator as we're training for viewing afterwards\n sample_z = np.random.uniform(-1, 1, size=(16, z_size))\n gen_samples, _ = sess.run(\n generator(input_z, input_size, reuse=True),\n feed_dict={input_z: sample_z})\n samples.append(gen_samples)\n saver.save(sess, './checkpoints/generator.ckpt')\n\n# Save training generator samples\nwith open('train_samples.pkl', 'wb') as f:\n pkl.dump(samples, f)", "Training loss\nHere we'll check out the training losses for the generator and discriminator.", "fig, ax = plt.subplots()\nlosses = np.array(losses)\nplt.plot(losses.T[0], label='Discriminator')\nplt.plot(losses.T[1], label='Generator')\nplt.title(\"Training Losses\")\nplt.legend()", "Generator samples from training\nHere we can view samples of images from the generator. First we'll look at images taken while training.", "def view_samples(epoch, samples):\n fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)\n for ax, img in zip(axes.flatten(), samples[epoch]):\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')\n \n return fig, axes\n\n# Load samples from generator taken while training\nwith open('train_samples.pkl', 'rb') as f:\n samples = pkl.load(f)", "These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 1, 7, 3, 2. Since this is just a sample, it isn't representative of the full range of images this generator can make.", "_ = view_samples(-1, samples)", "Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!", "rows, cols = 10, 6\nfig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)\n\nfor sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):\n for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):\n ax.imshow(img.reshape((28,28)), cmap='Greys_r')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)", "It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise like 1s and 9s.\nSampling from the generator\nWe can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples!", "saver = tf.train.Saver(var_list=g_vars)\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n sample_z = np.random.uniform(-1, 1, size=(16, z_size))\n gen_samples, _ = sess.run(\n generator(input_z, input_size, reuse=True),\n feed_dict={input_z: sample_z})\n_ = view_samples(0, [gen_samples])" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
turbomanage/training-data-analyst
courses/machine_learning/deepdive/06_structured/4_preproc.ipynb
apache-2.0
[ "<h1> Preprocessing using Dataflow </h1>\n\nThis notebook illustrates:\n<ol>\n<li> Creating datasets for Machine Learning using Dataflow\n</ol>\n<p>\nWhile Pandas is fine for experimenting, for operationalization of your workflow, it is better to do preprocessing in Apache Beam. This will also help if you need to preprocess data in flight, since Apache Beam also allows for streaming.", "pip install --user apache-beam[gcp]", "Run the command again if you are getting oauth2client error.\nNote: You may ignore the following responses in the cell output above:\nERROR (in Red text) related to: witwidget-gpu, fairing\nWARNING (in Yellow text) related to: hdfscli, hdfscli-avro, pbr, fastavro, gen_client\n<b>Restart</b> the kernel before proceeding further (On the Notebook menu - <b>Kernel</b> - <b>Restart Kernel<b>).\nMake sure the Dataflow API is enabled by going to this link. Ensure that you've installed Beam by importing it and printing the version number.", "import apache_beam as beam\nprint(beam.__version__)", "You may receive a UserWarning about the Apache Beam SDK for Python 3 as not being yet fully supported. Don't worry about this.", "# change these to try this notebook out\nBUCKET = 'cloud-training-demos-ml'\nPROJECT = 'cloud-training-demos'\nREGION = 'us-central1'\n\nimport os\nos.environ['BUCKET'] = BUCKET\nos.environ['PROJECT'] = PROJECT\nos.environ['REGION'] = REGION\n\n%%bash\nif ! gsutil ls | grep -q gs://${BUCKET}/; then\n gsutil mb -l ${REGION} gs://${BUCKET}\nfi", "<h2> Save the query from earlier </h2>\n\nThe data is natality data (record of births in the US). My goal is to predict the baby's weight given a number of factors about the pregnancy and the baby's mother. Later, we will want to split the data into training and eval datasets. The hash of the year-month will be used for that.", "# Create SQL query using natality data after the year 2000\nquery = \"\"\"\nSELECT\n weight_pounds,\n is_male,\n mother_age,\n plurality,\n gestation_weeks,\n FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth\nFROM\n publicdata.samples.natality\nWHERE year > 2000\n\"\"\"\n\n# Call BigQuery and examine in dataframe\nfrom google.cloud import bigquery\ndf = bigquery.Client().query(query + \" LIMIT 100\").to_dataframe()\ndf.head()", "<h2> Create ML dataset using Dataflow </h2>\nLet's use Cloud Dataflow to read in the BigQuery data, do some preprocessing, and write it out as CSV files.\nInstead of using Beam/Dataflow, I had three other options:\n\nUse Cloud Dataprep to visually author a Dataflow pipeline. Cloud Dataprep also allows me to explore the data, so we could have avoided much of the handcoding of Python/Seaborn calls above as well!\nRead from BigQuery directly using TensorFlow.\nUse the BigQuery console (http://bigquery.cloud.google.com) to run a Query and save the result as a CSV file. For larger datasets, you may have to select the option to \"allow large results\" and save the result into a CSV file on Google Cloud Storage. \n\n<p>\n\nHowever, in this case, I want to do some preprocessing, modifying data so that we can simulate what is known if no ultrasound has been performed. If I didn't need preprocessing, I could have used the web console. Also, I prefer to script it out rather than run queries on the user interface, so I am using Cloud Dataflow for the preprocessing.\n\nNote that after you launch this, the actual processing is happening on the cloud. Go to the GCP webconsole to the Dataflow section and monitor the running job. It took about 20 minutes for me.\n<p>\nIf you wish to continue without doing this step, you can copy my preprocessed output:\n<pre>\ngsutil -m cp -r gs://cloud-training-demos/babyweight/preproc gs://your-bucket/\n</pre>", "import datetime, os\n\ndef to_csv(rowdict):\n # Pull columns from BQ and create a line\n import hashlib\n import copy\n CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks'.split(',')\n\n # Create synthetic data where we assume that no ultrasound has been performed\n # and so we don't know sex of the baby. Let's assume that we can tell the difference\n # between single and multiple, but that the errors rates in determining exact number\n # is difficult in the absence of an ultrasound.\n no_ultrasound = copy.deepcopy(rowdict)\n w_ultrasound = copy.deepcopy(rowdict)\n\n no_ultrasound['is_male'] = 'Unknown'\n if rowdict['plurality'] > 1:\n no_ultrasound['plurality'] = 'Multiple(2+)'\n else:\n no_ultrasound['plurality'] = 'Single(1)'\n\n # Change the plurality column to strings\n w_ultrasound['plurality'] = ['Single(1)', 'Twins(2)', 'Triplets(3)', 'Quadruplets(4)', 'Quintuplets(5)'][rowdict['plurality'] - 1]\n\n # Write out two rows for each input row, one with ultrasound and one without\n for result in [no_ultrasound, w_ultrasound]:\n data = ','.join([str(result[k]) if k in result else 'None' for k in CSV_COLUMNS])\n key = hashlib.sha224(data.encode('utf-8')).hexdigest() # hash the columns to form a key\n yield str('{},{}'.format(data, key))\n \ndef preprocess(in_test_mode):\n import shutil, os, subprocess\n job_name = 'preprocess-babyweight-features' + '-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S')\n\n if in_test_mode:\n print('Launching local job ... hang on')\n OUTPUT_DIR = './preproc'\n shutil.rmtree(OUTPUT_DIR, ignore_errors=True)\n os.makedirs(OUTPUT_DIR)\n else:\n print('Launching Dataflow job {} ... hang on'.format(job_name))\n OUTPUT_DIR = 'gs://{0}/babyweight/preproc/'.format(BUCKET)\n try:\n subprocess.check_call('gsutil -m rm -r {}'.format(OUTPUT_DIR).split())\n except:\n pass\n\n options = {\n 'staging_location': os.path.join(OUTPUT_DIR, 'tmp', 'staging'),\n 'temp_location': os.path.join(OUTPUT_DIR, 'tmp'),\n 'job_name': job_name,\n 'project': PROJECT,\n 'region': REGION,\n 'teardown_policy': 'TEARDOWN_ALWAYS',\n 'no_save_main_session': True,\n 'num_workers': 4,\n 'max_num_workers': 5\n }\n opts = beam.pipeline.PipelineOptions(flags = [], **options)\n if in_test_mode:\n RUNNER = 'DirectRunner'\n else:\n RUNNER = 'DataflowRunner'\n p = beam.Pipeline(RUNNER, options = opts)\n query = \"\"\"\nSELECT\n weight_pounds,\n is_male,\n mother_age,\n plurality,\n gestation_weeks,\n FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth\nFROM\n publicdata.samples.natality\nWHERE year > 2000\nAND weight_pounds > 0\nAND mother_age > 0\nAND plurality > 0\nAND gestation_weeks > 0\nAND month > 0\n \"\"\"\n\n if in_test_mode:\n query = query + ' LIMIT 100' \n\n for step in ['train', 'eval']:\n if step == 'train':\n selquery = 'SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 4)) < 3'.format(query)\n else:\n selquery = 'SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 4)) = 3'.format(query)\n\n (p \n | '{}_read'.format(step) >> beam.io.Read(beam.io.BigQuerySource(query = selquery, use_standard_sql = True))\n | '{}_csv'.format(step) >> beam.FlatMap(to_csv)\n | '{}_out'.format(step) >> beam.io.Write(beam.io.WriteToText(os.path.join(OUTPUT_DIR, '{}.csv'.format(step))))\n )\n\n job = p.run()\n if in_test_mode:\n job.wait_until_finish()\n print(\"Done!\")\n \npreprocess(in_test_mode = False)", "The above step will take 20+ minutes. Go to the GCP web console, navigate to the Dataflow section and <b>wait for the job to finish</b> before you run the following step.", "%%bash\ngsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*", "Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
SciTools/cube_browser
doc/write_your_own/connections/filePickerSimple.ipynb
bsd-3-clause
[ "Creating a File Picker\nA widget which enables the user to select multiple files from the file system, returning a list.", "import glob\n\nimport IPython.display\nimport ipywidgets", "Create a text input widget to capture the filepath.", "path = ipywidgets.Text(\n description='String:',\n value='/tmp')\n\nIPython.display.display(path)", "Create a SelectMultiple widget, using the passed in path as a source and globing the results.", "options = glob.glob('{}/*'.format(path.value))\n\nfiles = ipywidgets.SelectMultiple(\n description='Dataset(s)',\n options=options)\n\nIPython.display.display(files)", "Print the selected tuple of values for clarity.", "print files.value" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
diego0020/va_course_2015
AstroML/notebooks/08_regression_example.ipynb
mit
[ "Regression Example\nYou'll need to modify the DATA_HOME variable to the location of the datasets.\nIn this tutorial we'll use the the colors of 400,000+ galaxies from the Sloan Digital Sky Survey. \nThe data was downloaded with the script fetch_data.py. This script also includes a python implementation of the SQL query used to construct this data. This template can be modified to download more features if desired.", "import os\nDATA_HOME = os.path.abspath('C:/temp/AstroML/data/sdss_photoz/')", "First we will load this data, shuffle it in preparation for later, and arrange the colors in an array of shape (n_samples, n_features):", "import numpy as np\ndata = np.load(os.path.join(DATA_HOME,'sdss_photoz.npy'))", "The data is in a record array, as in the classification example", "print(data.dtype.names)", "Now we'll set up our data matrix X and redshift z", "N = len(data)\nX = np.zeros((N, 4), dtype=np.float32)\nX[:, 0] = data['u'] - data['g']\nX[:, 1] = data['g'] - data['r']\nX[:, 2] = data['r'] - data['i']\nX[:, 3] = data['i'] - data['z']\nz = data['redshift']\nX = np.nan_to_num(X)", "Next we’ll split the data into two samples: a training sample and a test sample which we’ll use to evaluate our training:", "Ntrain = 3 * N / 4\nXtrain = X[:Ntrain]\nztrain = z[:Ntrain]\nXtest = X[Ntrain:]\nztest = z[Ntrain:]", "Now we’ll use the scikit-learn DecisionTreeRegressor method\nto train a model and predict redshifts for the test set based\non a 20-level decision tree:", "from sklearn.tree import DecisionTreeRegressor\nclf = DecisionTreeRegressor(max_depth=20)\nclf.fit(Xtrain, ztrain)\nzpred = clf.predict(Xtest)", "To judge the efficacy of prediction, we can compute the\nroot-mean-square (RMS) difference between the true and predicted values:", "rms = np.sqrt(np.mean((ztest - zpred) ** 2))\nprint rms", "Our RMS error is about 0.22. This is pretty good for such an unsophisticated\nlearning algorithm, but better algorithms can improve on this. The biggest\nissue here are the catastrophic errors, where the predicted redshift is\nextremely far from the prediction:", "print \"Number of test samples: \", len(ztest)\nprint \"Number of catastrophic errors:\", np.sum(abs(ztest - zpred) > 1)", "About 1.5% of objects have redshift estimates which are off by greater than 1.\nThis sort of error in redshift determination is very problematic for\nhigh-precision cosmological studies. This can be seen in a scatter plot of\nthe predicted redshift versus the true redshift for the test data:", "%matplotlib inline\nimport matplotlib.pyplot as pl\n\nax = pl.axes()\n\npl.scatter(ztest, zpred, c='k', lw=0, s=4)\naxis_lim = np.array([0, 2.5])\n\n# plot the true redshift\npl.plot(axis_lim, axis_lim, '--k')\n\n# plot +/- the rms\npl.plot(axis_lim, axis_lim + rms, '--r') \npl.plot(axis_lim, axis_lim - rms, '--r')\npl.xlim(axis_lim)\npl.ylim(axis_lim)\n\npl.title('Photo-z: Decision Tree Regression')\npl.xlabel(r'$\\mathrm{z_{true}}$', fontsize=14)\npl.ylabel(r'$\\mathrm{z_{phot}}$', fontsize=14)", "The true and predicted redshifts of 102,798 SDSS galaxies, using a simple decision tree regressor. Notice the presece of catastrophic outliers: those galaxies whose predicted redshifts are extremely far from the true value.\nLater, in Exercise #2, we will attempt to improve on this by optimizing the parameters of the decision tree.\nIn practice, the solutions to the photometric redshift problem can benefit from approaches that use physical intuition as well as machine learning tools. For example, some solutions involve the use of libraries of synthetic galaxy spectra which are known to be representative of the true galaxy distribution. This extra information can be used either directly, in a physically motivated analysis, or can be used to generate a larger suite of artificial training instances for a pure machine learning approach." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
PyPSA/PyPSA
examples/notebooks/unit-commitment.ipynb
mit
[ "Unit commitment\nThis tutorial runs through examples of unit commitment for generators at a single bus. Examples of minimum part-load, minimum up time, minimum down time, start up costs, shut down costs and ramp rate restrictions are shown.\nTo enable unit commitment on a generator, set its attribute committable = True.", "import pypsa\nimport pandas as pd", "Minimum part load demonstration\nIn final hour load goes below part-load limit of coal gen (30%), forcing gas to commit.", "nu = pypsa.Network(snapshots=range(4))\n\nnu.add(\"Bus\", \"bus\")\n\nnu.add(\n \"Generator\",\n \"coal\",\n bus=\"bus\",\n committable=True,\n p_min_pu=0.3,\n marginal_cost=20,\n p_nom=10000,\n)\n\nnu.add(\n \"Generator\",\n \"gas\",\n bus=\"bus\",\n committable=True,\n marginal_cost=70,\n p_min_pu=0.1,\n p_nom=1000,\n)\n\nnu.add(\"Load\", \"load\", bus=\"bus\", p_set=[4000, 6000, 5000, 800])\n\nnu.lopf()\n\nnu.generators_t.status\n\nnu.generators_t.p", "Minimum up time demonstration\nGas has minimum up time, forcing it to be online longer", "nu = pypsa.Network(snapshots=range(4))\n\nnu.add(\"Bus\", \"bus\")\n\nnu.add(\n \"Generator\",\n \"coal\",\n bus=\"bus\",\n committable=True,\n p_min_pu=0.3,\n marginal_cost=20,\n p_nom=10000,\n)\n\nnu.add(\n \"Generator\",\n \"gas\",\n bus=\"bus\",\n committable=True,\n marginal_cost=70,\n p_min_pu=0.1,\n up_time_before=0,\n min_up_time=3,\n p_nom=1000,\n)\n\nnu.add(\"Load\", \"load\", bus=\"bus\", p_set=[4000, 800, 5000, 3000])\n\nnu.lopf()\n\nnu.generators_t.status\n\nnu.generators_t.p", "Minimum down time demonstration\nCoal has a minimum down time, forcing it to go off longer.", "nu = pypsa.Network(snapshots=range(4))\n\nnu.add(\"Bus\", \"bus\")\n\nnu.add(\n \"Generator\",\n \"coal\",\n bus=\"bus\",\n committable=True,\n p_min_pu=0.3,\n marginal_cost=20,\n min_down_time=2,\n down_time_before=1,\n p_nom=10000,\n)\n\nnu.add(\n \"Generator\",\n \"gas\",\n bus=\"bus\",\n committable=True,\n marginal_cost=70,\n p_min_pu=0.1,\n p_nom=4000,\n)\n\nnu.add(\"Load\", \"load\", bus=\"bus\", p_set=[3000, 800, 3000, 8000])\n\nnu.lopf()\n\nnu.objective\n\nnu.generators_t.status\n\nnu.generators_t.p", "Start up and shut down costs\nNow there are associated costs for shutting down, etc", "nu = pypsa.Network(snapshots=range(4))\n\nnu.add(\"Bus\", \"bus\")\n\nnu.add(\n \"Generator\",\n \"coal\",\n bus=\"bus\",\n committable=True,\n p_min_pu=0.3,\n marginal_cost=20,\n min_down_time=2,\n start_up_cost=5000,\n p_nom=10000,\n)\n\nnu.add(\n \"Generator\",\n \"gas\",\n bus=\"bus\",\n committable=True,\n marginal_cost=70,\n p_min_pu=0.1,\n shut_down_cost=25,\n p_nom=4000,\n)\n\nnu.add(\"Load\", \"load\", bus=\"bus\", p_set=[3000, 800, 3000, 8000])\n\nnu.lopf(nu.snapshots)\n\nnu.objective\n\nnu.generators_t.status\n\nnu.generators_t.p", "Ramp rate limits", "nu = pypsa.Network(snapshots=range(6))\n\nnu.add(\"Bus\", \"bus\")\n\nnu.add(\n \"Generator\",\n \"coal\",\n bus=\"bus\",\n marginal_cost=20,\n ramp_limit_up=0.1,\n ramp_limit_down=0.2,\n p_nom=10000,\n)\n\nnu.add(\"Generator\", \"gas\", bus=\"bus\", marginal_cost=70, p_nom=4000)\n\nnu.add(\"Load\", \"load\", bus=\"bus\", p_set=[4000, 7000, 7000, 7000, 7000, 3000])\n\nnu.lopf()\n\nnu.generators_t.p\n\nnu = pypsa.Network(snapshots=range(6))\n\nnu.add(\"Bus\", \"bus\")\n\nnu.add(\n \"Generator\",\n \"coal\",\n bus=\"bus\",\n marginal_cost=20,\n ramp_limit_up=0.1,\n ramp_limit_down=0.2,\n p_nom_extendable=True,\n capital_cost=1e2,\n)\n\nnu.add(\"Generator\", \"gas\", bus=\"bus\", marginal_cost=70, p_nom=4000)\n\nnu.add(\"Load\", \"load\", bus=\"bus\", p_set=[4000, 7000, 7000, 7000, 7000, 3000])\n\nnu.lopf(nu.snapshots)\n\nnu.generators.p_nom_opt\n\nnu.generators_t.p\n\nnu = pypsa.Network(snapshots=range(7))\n\nnu.add(\"Bus\", \"bus\")\n\n# Can get bad interactions if SU > RU and p_min_pu; similarly if SD > RD\nnu.add(\n \"Generator\",\n \"coal\",\n bus=\"bus\",\n marginal_cost=20,\n committable=True,\n p_min_pu=0.05,\n initial_status=0,\n ramp_limit_start_up=0.1,\n ramp_limit_up=0.2,\n ramp_limit_down=0.25,\n ramp_limit_shut_down=0.15,\n p_nom=10000.0,\n)\n\nnu.add(\"Generator\", \"gas\", bus=\"bus\", marginal_cost=70, p_nom=10000)\n\nnu.add(\"Load\", \"load\", bus=\"bus\", p_set=[0.0, 200.0, 7000, 7000, 7000, 2000, 0])\n\nnu.lopf()\n\nnu.generators_t.p\n\nnu.generators_t.status\n\nnu.generators.loc[\"coal\"]", "Rolling horizon example\nThis example solves sequentially in batches", "sets_of_snapshots = 6\np_set = [4000, 5000, 700, 800, 4000]\n\nnu = pypsa.Network(snapshots=range(len(p_set) * sets_of_snapshots))\n\nnu.add(\"Bus\", \"bus\")\n\nnu.add(\n \"Generator\",\n \"coal\",\n bus=\"bus\",\n committable=True,\n p_min_pu=0.3,\n marginal_cost=20,\n min_down_time=2,\n min_up_time=3,\n up_time_before=1,\n ramp_limit_up=1,\n ramp_limit_down=1,\n ramp_limit_start_up=1,\n ramp_limit_shut_down=1,\n shut_down_cost=150,\n start_up_cost=200,\n p_nom=10000,\n)\n\nnu.add(\n \"Generator\",\n \"gas\",\n bus=\"bus\",\n committable=True,\n marginal_cost=70,\n p_min_pu=0.1,\n up_time_before=2,\n min_up_time=3,\n shut_down_cost=20,\n start_up_cost=50,\n p_nom=1000,\n)\n\nnu.add(\"Load\", \"load\", bus=\"bus\", p_set=p_set * sets_of_snapshots)\n\noverlap = 2\nfor i in range(sets_of_snapshots):\n nu.lopf(nu.snapshots[i * len(p_set) : (i + 1) * len(p_set) + overlap], pyomo=False)\n\npd.concat(\n {\"Active\": nu.generators_t.status.astype(bool), \"Output\": nu.generators_t.p}, axis=1\n)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
muku42/bokeh
examples/interactions/interactive_bubble/gapminder.ipynb
bsd-3-clause
[ "from IPython.display import display, HTML\n\nimport pandas as pd\n\nfrom jinja2 import Template\n\nfrom bokeh.models import (\n ColumnDataSource, Plot, Circle, Range1d, \n LinearAxis, HoverTool, Text,\n SingleIntervalTicker, Slider, CustomJS\n)\nfrom bokeh.palettes import Spectral6\nfrom bokeh.plotting import vplot\nfrom bokeh.resources import Resources\nfrom bokeh.embed import file_html\n\nfrom data import process_data", "Setting up the data\nThe plot animates with the slider showing the data over time from 1964 to 2013. We can think of each year as a seperate static plot, and when the slider moves, we use the Callback to change the data source that is driving the plot.\nWe could use bokeh-server to drive this change, but as the data is not too big we can also pass all the datasets to the javascript at once and switch between them on the client side.\nThis means that we need to build one data source for each year that we have data for and are going to switch between using the slider. We build them and add them to a dictionary sources that holds them under a key that is the name of the year preficed with a _.", "fertility_df, life_expectancy_df, population_df_size, regions_df, years, regions = process_data()\n\nsources = {}\n\nregion_color = regions_df['region_color']\nregion_color.name = 'region_color'\n\nfor year in years:\n fertility = fertility_df[year]\n fertility.name = 'fertility'\n life = life_expectancy_df[year]\n life.name = 'life' \n population = population_df_size[year]\n population.name = 'population' \n new_df = pd.concat([fertility, life, population, region_color], axis=1)\n sources['_' + str(year)] = ColumnDataSource(new_df)", "sources looks like this\n```\n{'_1964': <bokeh.models.sources.ColumnDataSource at 0x7f7e7d165cc0>,\n '_1965': <bokeh.models.sources.ColumnDataSource at 0x7f7e7d165b00>,\n '_1966': <bokeh.models.sources.ColumnDataSource at 0x7f7e7d1656a0>,\n '_1967': <bokeh.models.sources.ColumnDataSource at 0x7f7e7d165ef0>,\n '_1968': <bokeh.models.sources.ColumnDataSource at 0x7f7e7e9dac18>,\n '_1969': <bokeh.models.sources.ColumnDataSource at 0x7f7e7e9da9b0>,\n '_1970': <bokeh.models.sources.ColumnDataSource at 0x7f7e7e9da668>,\n '_1971': <bokeh.models.sources.ColumnDataSource at 0x7f7e7e9da0f0>...\n```\nWe will pass this dictionary to the Callback. In doing so, we will find that in our javascript we have an object called, for example _1964 that refers to our ColumnDataSource. Note that we needed the prefixing _ as JS objects cannot begin with a number.\nFinally we construct a string that we can insert into our javascript code to define an object.\nThe string looks like this: {1962: _1962, 1963: _1963, ....}\nNote the keys of this object are integers and the values are the references to our ColumnDataSources from above. So that now, in our JS code, we have an object that's storing all of our ColumnDataSources and we can look them up.", "dictionary_of_sources = dict(zip([x for x in years], ['_%s' % x for x in years]))\njs_source_array = str(dictionary_of_sources).replace(\"'\", \"\")", "Build the plot", "# Set up the plot\nxdr = Range1d(1, 9)\nydr = Range1d(20, 100)\nplot = Plot(\n x_range=xdr,\n y_range=ydr,\n title=\"\",\n plot_width=800,\n plot_height=400,\n outline_line_color=None,\n toolbar_location=None, \n)\nAXIS_FORMATS = dict(\n minor_tick_in=None,\n minor_tick_out=None,\n major_tick_in=None,\n major_label_text_font_size=\"10pt\",\n major_label_text_font_style=\"normal\",\n axis_label_text_font_size=\"10pt\",\n\n axis_line_color='#AAAAAA',\n major_tick_line_color='#AAAAAA',\n major_label_text_color='#666666',\n\n major_tick_line_cap=\"round\",\n axis_line_cap=\"round\",\n axis_line_width=1,\n major_tick_line_width=1,\n)\n\nxaxis = LinearAxis(SingleIntervalTicker(interval=1), axis_label=\"Children per woman (total fertility)\", **AXIS_FORMATS)\nyaxis = LinearAxis(SingleIntervalTicker(interval=20), axis_label=\"Life expectancy at birth (years)\", **AXIS_FORMATS) \nplot.add_layout(xaxis, 'below')\nplot.add_layout(yaxis, 'left')", "Add the background year text\nWe add this first so it is below all the other glyphs", "# Add the year in background (add before circle)\ntext_source = ColumnDataSource({'year': ['%s' % years[0]]})\ntext = Text(x=2, y=35, text='year', text_font_size='150pt', text_color='#EEEEEE')\nplot.add_glyph(text_source, text)", "Add the bubbles and hover\nWe add the bubbles using the Circle glyph. We start from the first year of data and that is our source that drives the circles (the other sources will be used later).\nplot.add_glyph returns the renderer, and we pass this to the HoverTool so that hover only happens for the bubbles on the page and not other glyph elements.", "# Add the circle\nrenderer_source = sources['_%s' % years[0]]\ncircle_glyph = Circle(\n x='fertility', y='life', size='population',\n fill_color='region_color', fill_alpha=0.8, \n line_color='#7c7e71', line_width=0.5, line_alpha=0.5)\ncircle_renderer = plot.add_glyph(renderer_source, circle_glyph)\n\n# Add the hover (only against the circle and not other plot elements)\ntooltips = \"@index\"\nplot.add_tools(HoverTool(tooltips=tooltips, renderers=[circle_renderer]))", "Add the legend\nFinally we manually build the legend by adding circles and texts to the upper-right portion of the plot.", "text_x = 7\ntext_y = 95\nfor i, region in enumerate(regions):\n plot.add_glyph(Text(x=text_x, y=text_y, text=[region], text_font_size='10pt', text_color='#666666'))\n plot.add_glyph(Circle(x=text_x - 0.1, y=text_y + 2, fill_color=Spectral6[i], size=10, line_color=None, fill_alpha=0.8))\n text_y = text_y - 5 ", "Add the slider and callback\nLast, but not least, we add the slider widget and the JS callback code which changes the data of the renderer_source (powering the bubbles / circles) and the data of the text_source (powering background text). After we've set() the data we need to trigger() a change. slider, renderer_source, text_source are all available because we add them as args to Callback.\nIt is the combination of sources = %s % (js_source_array) in the JS and Callback(args=sources...) that provides the ability to look-up, by year, the JS version of our python-made ColumnDataSource.", "# Add the slider\ncode = \"\"\"\n var year = slider.get('value'),\n sources = %s,\n new_source_data = sources[year].get('data');\n renderer_source.set('data', new_source_data);\n text_source.set('data', {'year': [String(year)]});\n\"\"\" % js_source_array\n\ncallback = CustomJS(args=sources, code=code)\nslider = Slider(start=years[0], end=years[-1], value=1, step=1, title=\"Year\", callback=callback, name='testy')\ncallback.args[\"renderer_source\"] = renderer_source\ncallback.args[\"slider\"] = slider\ncallback.args[\"text_source\"] = text_source", "Embed in a template and render\nLast but not least, we use vplot to stick togethre the chart and the slider. And we embed that in a template we write using the script, div output from components.\nWe display it in IPython and save it as an html file.", "# Stick the plot and the slider together\nlayout = vplot(plot, slider)\n\n# Open our custom template\nwith open('gapminder_template.jinja', 'r') as f:\n template = Template(f.read())\n\n# Use inline resources\nresources = Resources(mode='inline') \ntemplate_variables = {\n 'bokeh_min_js': resources.js_raw[0]\n}\nhtml = file_html(layout, resources, \"Bokeh - Gapminder Bubble Plot\", template=template, template_variables=template_variables)\n\ndisplay(HTML(html))" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ozorich/phys202-2015-work
assignments/assignment10/ODEsEx01.ipynb
mit
[ "Ordinary Differential Equations Exercise 1\nImports", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom scipy.integrate import odeint\nfrom IPython.html.widgets import interact, fixed\n\n", "Euler's method\nEuler's method is the simplest numerical approach for solving a first order ODE numerically. Given the differential equation\n$$ \\frac{dy}{dx} = f(y(x), x) $$\nwith the initial condition:\n$$ y(x_0)=y_0 $$\nEuler's method performs updates using the equations:\n$$ y_{n+1} = y_n + h f(y_n,x_n) $$\n$$ h = x_{n+1} - x_n $$\nWrite a function solve_euler that implements the Euler method for a 1d ODE and follows the specification described in the docstring:", "def solve_euler(derivs, y0, x):\n \"\"\"Solve a 1d ODE using Euler's method.\n \n Parameters\n ----------\n derivs : function\n The derivative of the diff-eq with the signature deriv(y,x) where\n y and x are floats.\n y0 : float\n The initial condition y[0] = y(x[0]).\n x : np.ndarray, list, tuple\n The array of times at which of solve the diff-eq.\n \n Returns\n -------\n y : np.ndarray\n Array of solutions y[i] = y(x[i])\n \"\"\"\n # YOUR CODE HERE\n h=x[1]-x[0]\n y=np.zeros((len(x)))\n y[0]=y0\n for i in range(1,len(x)):\n n=i-1\n y[i]=y[i-1]+h*derivs(x[n],y[n])\n \n return y \n\n \n\nsolve_euler(lambda y, x: 1, 0, [0,1,2])\n\nassert np.allclose(solve_euler(lambda y, x: 1, 0, [0,1,2]), [0,1,2])", "The midpoint method is another numerical method for solving the above differential equation. In general it is more accurate than the Euler method. It uses the update equation:\n$$ y_{n+1} = y_n + h f\\left(y_n+\\frac{h}{2}f(y_n,x_n),x_n+\\frac{h}{2}\\right) $$\nWrite a function solve_midpoint that implements the midpoint method for a 1d ODE and follows the specification described in the docstring:", "def solve_midpoint(derivs, y0, x):\n \"\"\"Solve a 1d ODE using the Midpoint method.\n \n Parameters\n ----------\n derivs : function\n The derivative of the diff-eq with the signature deriv(y,x) where y\n and x are floats.\n y0 : float\n The initial condition y[0] = y(x[0]).\n x : np.ndarray, list, tuple\n The array of times at which of solve the diff-eq.\n \n Returns\n -------\n y : np.ndarray\n Array of solutions y[i] = y(x[i])\n \"\"\"\n h=x[1]-x[0]\n y=np.zeros((len(x)))\n y[0]=y0\n \n for i in range(1,len(x)):\n n=i-1\n y[i]=y[n]+h*derivs((y[n]+h/2*derivs(y[n],x[n])),x[n]+h/2)\n return y\n\nassert np.allclose(solve_midpoint(lambda y, x: 1, 0, [0,1,2]), [0,1,2])", "You are now going to solve the following differential equation:\n$$\n\\frac{dy}{dx} = x + 2y\n$$\nwhich has the analytical solution:\n$$\ny(x) = 0.25 e^{2x} - 0.5 x - 0.25\n$$\nFirst, write a solve_exact function that compute the exact solution and follows the specification described in the docstring:", "def solve_exact(x):\n \"\"\"compute the exact solution to dy/dx = x + 2y.\n \n Parameters\n ----------\n x : np.ndarray\n Array of x values to compute the solution at.\n \n Returns\n -------\n y : np.ndarray\n Array of solutions at y[i] = y(x[i]).\n \"\"\"\n y=np.zeros((len(x)))\n for i in range(1,len(x)):\n y[i]=.25*np.exp(2*x[i])-.5*x[i]-0.25\n return y\n \n\nassert np.allclose(solve_exact(np.array([0,1,2])),np.array([0., 1.09726402, 12.39953751]))", "In the following cell you are going to solve the above ODE using four different algorithms:\n\nEuler's method\nMidpoint method\nodeint\nExact\n\nHere are the details:\n\nGenerate an array of x values with $N=11$ points over the interval $[0,1]$ ($h=0.1$).\nDefine the derivs function for the above differential equation.\nUsing the solve_euler, solve_midpoint, odeint and solve_exact functions to compute\n the solutions using the 4 approaches.\n\nVisualize the solutions on a sigle figure with two subplots:\n\nPlot the $y(x)$ versus $x$ for each of the 4 approaches.\nPlot $\\left|y(x)-y_{exact}(x)\\right|$ versus $x$ for each of the 3 numerical approaches.\n\nYour visualization should have legends, labeled axes, titles and be customized for beauty and effectiveness.\nWhile your final plot will use $N=10$ points, first try making $N$ larger and smaller to see how that affects the errors of the different approaches.", "x=np.linspace(0,1,11)\ndef derivs(y,x): #is this right gotta check\n dy=x+2*y\n return dy\ny4=solve_exact(x)\ny1=solve_euler(derivs,0, x)\ny2=solve_midpoint(derivs,0,x)\ny3=odeint(derivs,0,x)\n\n\n\n\n\nplt.plot?\n\nplt.figure(figsize=(8,11))\n\nplt.subplot(2,1,1)\nplt.plot(x,y1,color='b',label='Euler')\nplt.title('Approximations')\nplt.xlabel('x')\nplt.ylabel('y(x)')\nplt.plot(x,y2,color='r',label='Midpoint')\nplt.plot(x,y3,color='g',label='odeint',linestyle='--', )\nplt.title('y(x) Calculations')\nplt.plot(x,y4,color='k', linestyle='',marker='o',label='Exact')\nplt.grid(False)\nplt.legend(loc=4)\n\nplt.subplot(2,1,2)\nplt.plot(x,abs(y1-y4),color='b',label='Euler Error',linestyle='', marker='o')\nplt.plot(x,abs(y2-y4),color='g',linestyle='',marker='^',label='Midpoint Error')\n# plt.plot(x,abs(y3-y4),color='k',linestyle='',marker='>',label='Odeint vs Exact') #makes weird graph dont understand\nplt.grid(False)\nplt.title('Method Error compared to Exact')\nplt.ylabel('Difference from Exact')\nplt.xlabel('x')\n\nplt.legend(loc=2)\n\nassert True # leave this for grading the plots" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
mne-tools/mne-tools.github.io
dev/_downloads/99798f35196ac117a93fa936a5c48a93/20_visualize_evoked.ipynb
bsd-3-clause
[ "%matplotlib inline", "Visualizing Evoked data\nThis tutorial shows the different visualization methods for\n:class:~mne.Evoked objects.\nAs usual we'll start by importing the modules we need:", "import numpy as np\nimport mne", "Instead of creating the :class:~mne.Evoked object from an\n:class:~mne.Epochs object, we'll load an existing :class:~mne.Evoked\nobject from disk. Remember, the :file:.fif format can store multiple\n:class:~mne.Evoked objects, so we'll end up with a list of\n:class:~mne.Evoked objects after loading. Recall also from the\ntut-section-load-evk section of the introductory Evoked tutorial\n&lt;tut-evoked-class&gt; that the sample :class:~mne.Evoked objects have not\nbeen baseline-corrected and have unapplied projectors, so we'll take care of\nthat when loading:", "root = mne.datasets.sample.data_path() / 'MEG' / 'sample'\nevk_file = root / 'sample_audvis-ave.fif'\nevokeds_list = mne.read_evokeds(evk_file, baseline=(None, 0), proj=True,\n verbose=False)\n\n# Show condition names and baseline intervals\nfor e in evokeds_list:\n print(f'Condition: {e.comment}, baseline: {e.baseline}')", "To make our life easier, let's convert that list of :class:~mne.Evoked\nobjects into a :class:dictionary &lt;dict&gt;. We'll use /-separated\ndictionary keys to encode the conditions (like is often done when epoching)\nbecause some of the plotting methods can take advantage of that style of\ncoding.", "conds = ('aud/left', 'aud/right', 'vis/left', 'vis/right')\nevks = dict(zip(conds, evokeds_list))\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ this is equivalent to:\n# {'aud/left': evokeds_list[0], 'aud/right': evokeds_list[1],\n# 'vis/left': evokeds_list[2], 'vis/right': evokeds_list[3]}", "Plotting signal traces\n.. sidebar:: Butterfly plots\nPlots of superimposed sensor timeseries are called \"butterfly plots\"\n because the positive- and negative-going traces can resemble butterfly\n wings.\nThe most basic plot of :class:~mne.Evoked objects is a butterfly plot of\neach channel type, generated by the evoked.plot() &lt;mne.Evoked.plot&gt;\nmethod. By default, channels marked as \"bad\" are suppressed, but you can\ncontrol this by passing an empty :class:list to the exclude parameter\n(default is exclude='bads'):", "evks['aud/left'].plot(exclude=[])", "Notice the completely flat EEG channel and the noisy gradiometer channel\nplotted in red color. Like many MNE-Python plotting functions,\nevoked.plot() &lt;mne.Evoked.plot&gt; has a picks parameter that can\nselect channels to plot by name, index, or type. In the next plot, we'll show\nonly magnetometer channels and also color-code the channel traces by their\nlocation by passing spatial_colors=True. Finally, we'll superimpose a\ntrace of the root mean square (RMS) of the signal across channels by\npassing gfp=True. This parameter is called gfp for historical\nreasons and behaves correctly for all supported channel types: for MEG data,\nit will plot the RMS; while for EEG, it would plot the\n:term:global field power &lt;GFP&gt; (an average-referenced RMS), hence its\nname:", "evks['aud/left'].plot(picks='mag', spatial_colors=True, gfp=True)", "Interesting time periods can be highlighted via the highlight parameter.", "time_ranges_of_interest = [\n (0.05, 0.14),\n (0.22, 0.27)\n]\nevks['aud/left'].plot(\n picks='mag', spatial_colors=True, gfp=True,\n highlight=time_ranges_of_interest\n)", "Plotting scalp topographies\nIn an interactive session, the butterfly plots seen above can be\nclick-dragged to select a time region, which will pop up a map of the average\nfield distribution over the scalp for the selected time span. You can also\ngenerate scalp topographies at specific times or time spans using the\n:meth:~mne.Evoked.plot_topomap method:", "times = np.linspace(0.05, 0.13, 5)\nevks['aud/left'].plot_topomap(ch_type='mag', times=times, colorbar=True)\n\nfig = evks['aud/left'].plot_topomap(ch_type='mag', times=times, average=0.1)", "It is also possible to pass different time durations to average over for each\ntime point. Passing a value of None will disable averaging for that\ntime point:", "averaging_durations = [0.01, 0.02, 0.03, None, None]\nfig = evks['aud/left'].plot_topomap(\n ch_type='mag', times=times, average=averaging_durations\n)", "Additional examples of plotting scalp topographies can be found in\nex-evoked-topomap.\nArrow maps\nScalp topographies at a given time point can be augmented with arrows to show\nthe estimated magnitude and direction of the magnetic field, using the\nfunction :func:mne.viz.plot_arrowmap:", "mags = evks['aud/left'].copy().pick_types(meg='mag')\nmne.viz.plot_arrowmap(mags.data[:, 175], mags.info, extrapolate='local')", "Joint plots\nJoint plots combine butterfly plots with scalp topographies, and provide an\nexcellent first-look at evoked data; by default, topographies will be\nautomatically placed based on peak finding. Here we plot the\nright-visual-field condition; if no picks are specified we get a separate\nfigure for each channel type:", "evks['vis/right'].plot_joint()", "Like :meth:~mne.Evoked.plot_topomap, you can specify the times at which\nyou want the scalp topographies calculated, and you can customize the plot in\nvarious other ways as well. See :meth:mne.Evoked.plot_joint for details.\nComparing Evoked objects\nTo compare :class:~mne.Evoked objects from different experimental\nconditions, the function :func:mne.viz.plot_compare_evokeds can take a\n:class:list or :class:dict of :class:~mne.Evoked objects and plot them\nall on the same axes. Like most MNE-Python visualization functions, it has a\npicks parameter for selecting channels, but by default will generate one\nfigure for each channel type, and combine information across channels of the\nsame type by calculating the :term:global field power. Information\nmay be combined across channels in other ways too; support for combining via\nmean, median, or standard deviation are built-in, and custom callable\nfunctions may also be used, as shown here:", "def custom_func(x):\n return x.max(axis=1)\n\n\nfor combine in ('mean', 'median', 'gfp', custom_func):\n mne.viz.plot_compare_evokeds(evks, picks='eeg', combine=combine)", "One nice feature of :func:~mne.viz.plot_compare_evokeds is that when\npassing evokeds in a dictionary, it allows specifying plot styles based on\n/-separated substrings of the dictionary keys (similar to epoch\nselection; see tut-section-subselect-epochs). Here, we specify colors\nfor \"aud\" and \"vis\" conditions, and linestyles for \"left\" and \"right\"\nconditions, and the traces and legend are styled accordingly.", "mne.viz.plot_compare_evokeds(evks, picks='MEG 1811', colors=dict(aud=0, vis=1),\n linestyles=dict(left='solid', right='dashed'))", "The legends generated by :func:~mne.viz.plot_compare_evokeds above used the\ndictionary keys provided by the evks variable. If instead you pass a\n:class:list or :class:tuple of :class:~mne.Evoked objects, the legend\nkeys will be generated automatically from the comment attribute of the\n:class:~mne.Evoked objects (or, as sequential integers if the comment\nattribute is empty or ambiguous). To illustrate this, we'll make a list of\nfive :class:~mne.Evoked objects: two with identical comments, two with\nempty comments (either an empty string or None), and one with a unique\nnon-empty comment:", "temp_list = list()\nfor idx, _comment in enumerate(('foo', 'foo', '', None, 'bar'), start=1):\n _evk = evokeds_list[0].copy()\n _evk.comment = _comment\n _evk.data *= idx # so we can tell the traces apart\n temp_list.append(_evk)\n\nmne.viz.plot_compare_evokeds(temp_list, picks='mag')", "Image plots\nLike :class:~mne.Epochs, :class:~mne.Evoked objects also have a\n:meth:~mne.Evoked.plot_image method, but unlike epochs.plot_image()\n&lt;mne.Epochs.plot_image&gt;, evoked.plot_image() &lt;mne.Evoked.plot_image&gt;\nshows one channel per row instead of one epoch per row. Again, a\npicks parameter is available, as well as several other customization\noptions; see :meth:~mne.Evoked.plot_image for details.", "evks['vis/right'].plot_image(picks='meg')", "Topographical subplots\nFor sensor-level analyses, it can be useful to plot the response at each\nsensor in a topographical layout. The :func:~mne.viz.plot_compare_evokeds\nfunction can do this if you pass axes='topo', but it can be quite slow\nif the number of sensors is too large, so here we'll plot only the EEG\nchannels:", "mne.viz.plot_compare_evokeds(evks, picks='eeg', colors=dict(aud=0, vis=1),\n linestyles=dict(left='solid', right='dashed'),\n axes='topo', styles=dict(aud=dict(linewidth=1),\n vis=dict(linewidth=1)))", "For a larger number of sensors, the method evoked.plot_topo()\n&lt;mne.Evoked.plot_topo&gt; and the function :func:mne.viz.plot_evoked_topo\ncan both be used. The :meth:~mne.Evoked.plot_topo method will plot only a\nsingle condition, while the :func:~mne.viz.plot_evoked_topo function can\nplot one or more conditions on the same axes, if passed a list of\n:class:~mne.Evoked objects. The legend entries will be automatically drawn\nfrom the :class:~mne.Evoked objects' comment attribute:", "mne.viz.plot_evoked_topo(evokeds_list)", "By default, :func:~mne.viz.plot_evoked_topo will plot all MEG sensors (if\npresent), so to get EEG sensors you would need to modify the evoked objects\nfirst (e.g., using mne.pick_types).\n<div class=\"alert alert-info\"><h4>Note</h4><p>In interactive sessions, both approaches to topographical plotting allow\n you to click one of the sensor subplots to open a larger version of the\n evoked plot at that sensor.</p></div>\n\n3D Field Maps\nThe scalp topographies above were all projected into two-dimensional overhead\nviews of the field, but it is also possible to plot field maps in 3D. This\nrequires a :term:trans file to transform locations between the coordinate\nsystems of the MEG device and the head surface (based on the MRI). You can\ncompute 3D field maps without a trans file, but it will only work for\ncalculating the field on the MEG helmet from the MEG sensors.", "subjects_dir = root.parent.parent / 'subjects'\ntrans_file = root / 'sample_audvis_raw-trans.fif'", "By default, MEG sensors will be used to estimate the field on the helmet\nsurface, while EEG sensors will be used to estimate the field on the scalp.\nOnce the maps are computed, you can plot them with evoked.plot_field()\n&lt;mne.Evoked.plot_field&gt;:", "maps = mne.make_field_map(evks['aud/left'], trans=str(trans_file),\n subject='sample', subjects_dir=subjects_dir)\nevks['aud/left'].plot_field(maps, time=0.1)", "You can also use MEG sensors to estimate the scalp field by passing\nmeg_surf='head'. By selecting each sensor type in turn, you can compare\nthe scalp field estimates from each.", "for ch_type in ('mag', 'grad', 'eeg'):\n evk = evks['aud/right'].copy().pick(ch_type)\n _map = mne.make_field_map(evk, trans=str(trans_file), subject='sample',\n subjects_dir=subjects_dir, meg_surf='head')\n fig = evk.plot_field(_map, time=0.1)\n mne.viz.set_3d_title(fig, ch_type, size=20)" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
TimOomis/ADS-DV
Assignments/DV-assignment-02.ipynb
mit
[ "ADS-DV\nPlotting scatter plots and histograms\nSummary\nThis assignment first shows you how to download csv data from an online source. Then we're exploring a dataset of all the cities in the world and compare cities in The Netherlands to the rest of the world.\nLoading data CSV and Pandas\nWhile reproducible research recommends having your data somewhere where you know it will not change, it may not be feasible to put large datafiles in your portfolio. We will work with a database of information about cities around the world:\nhttps://www.maxmind.com/en/free-world-cities-database\nWorking with data structures can be done in many ways in Python. There are the standard Python arrays, lists and tuples. You can also use the arrays in the numpy package which allow you to do heavy math operations efficiently. For data analysis Pandas is often used, because data can be put into so-called dataframes. Data-frames store data with column and row names and can easily be manipulated and plotted. You will learn more about Pandas in the Machine Learning workshops. A short intro can be found here:\nhttp://pandas.pydata.org/pandas-docs/stable/10min.html", "import urllib.request as urllib, zipfile, os\n\nurl = 'http://download.maxmind.com/download/worldcities/'\nfilename = 'worldcitiespop.txt.gz'\ndatafolder = 'data/'\n\ndownloaded = urllib.urlopen(url + filename)\nbuf = downloaded.read()\n\ntry:\n os.mkdir(datafolder)\nexcept FileExistsError:\n pass\n\nwith open(datafolder + filename, 'wb') as f:\n f.write(buf)\n\nimport pandas as pd\ncities = pd.read_csv(datafolder + filename, sep=',', low_memory=False, encoding = 'ISO-8859-1')", "Data manipulation\nWe can take a peek at the data by checking out the final rows of data. Do you see any potential problem with this dataset?", "cities.tail()\n#NAN VALUES: AKA NOT A NUMBER\n\n\ncities.sort_values(by='Population', ascending=False).head()", "By sorting the cities on population we immediately see the entries of a few of the largest cities in the world.\nAssignment A\nTo get an idea of where in the world the cities in the dataset are located, we want to make a scatter plot of the position of all the cities in the dataset.\nDon't worry about drawing country borders, just plot the locations of the cities.\nRemember to use all the basic plot elements you need to understand this plot.", "import numpy as np\nfrom matplotlib import pyplot as plt\n%matplotlib inline \n\ny = list(cities.Latitude)\nx = list(cities.Longitude)\n\nplt.scatter(x,y, 1, [0,0,0,1])", "Assignment B\nNow we want to plot the cities in The Netherlands only. Use a scatter plot again to plot the cities, but now vary the size of the marker and the color with the population of that city.\nUse a colorbar to show how the color of the marker relates to its population.\nUse sensible limits to your axes so that you show only mainland The Netherlands (and not the Dutch Antilles).", "dutch_cities = cities[ cities['Country'] =='nl' ]\nplt.figure(figsize=[7,7]);\n\n\ncm = plt.cm.get_cmap('YlOrRd')\ny = dutch_cities.Latitude\nx = dutch_cities.Longitude\npop = dutch_cities.Population\npopsize = pop / 450\n\nplt.xlim(3, 8)\nplt.ylim(50.70, 53.6)\n\nsc= plt.scatter(x,y,popsize,c=pop, cmap=cm, vmin=pop.min(), vmax=pop.max())\n\ncolobar = plt.colorbar(sc)\n\n", "Assignment C\nUsing assignment B, we could clearly see larger cities such as Amsterdam, Rotterdam and even Eindhoven. But we still do not really have a clear overview of how many big cities there are. To show a distribution we use a histogram plot.\nWhat happens if we do not call the .dropna() function?\nAdd proper basic plot elements to this plot and try to annotate which data point is Amsterdam and Eindhoven.", "Eind = [i for i, j in enumerate(dutch_cities.City) if j == 'eindhoven']\nAdam = [n for n, m in enumerate(dutch_cities.City) if m == 'amsterdam']\n\nPopEind = dutch_cities.iloc[Eind].Population/1000\nPopAdam = dutch_cities.iloc[Adam].Population/1000\n\nplt.figure();\nbars = plt.hist(np.asarray(dutch_cities.dropna().Population/1000), 100, normed=1);\n\nplt.annotate('Eindhoven', xy=(PopEind,0), xytext=(PopEind, 0.005),\n arrowprops=dict(facecolor='red', shrink = 0.01),\n )\nplt.annotate('Amsterdam', xy=(PopAdam,0), xytext=(PopAdam, 0.005),\n arrowprops=dict(facecolor='grey', shrink = 0.05),\n )\nplt.xlabel('Aantal inwoners in duizenden')\nplt.ylabel('Proportie steden met zoveel inwoners')\n", "Assignment D\nNow we want to compare how the distribution of Dutch cities compares to that of the entire world.\nUse subplots to show the dutch distribution (top plot) and the world distribution (bottom plot).", "plt.figure(figsize=[20, 8]);\nplt.subplot(2,1,1);\nplt.title(\"Dutch City Distribution\")\nplt.hist(np.asarray(dutch_cities.dropna().Population/1000), bins=np.arange(0, 200, 1), normed=1);\nplt.ylim(0.00, 0.10)\nplt.subplot(2,1,2);\nplt.title(\"Global City Distribution\")\nplt.hist(np.asarray(cities.dropna().Population/1000), bins=np.arange(0, 200, 1), normed=1);\n\n## add the subplot of the world cities below this Dutch one", "Assignment E\nWrite what conclusions you can deduce from the above plots?", "#It seems to me that there seem to be bigger cities in the Netherlands in general, although the Global distribution has the highest population in one city." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
peterwittek/ipython-notebooks
Simulating_POVMs.ipynb
gpl-3.0
[ "Introduction\nThis notebook is the computational appendix of arXiv:1609.06139. We demonstrate how to use some convenience functions to decide whether a qubit or qutrit POVM is simulable and how much noise is needed to make it simulable. We also give the details how to reproduce the numerical results shown in the paper. Furthermore, we show how to decompose a simulable POVM into a convex combination of projective measurements.\nTo improve readability of this notebook, we placed the supporting functions to a separate file; please download this in the same folder as the notebook if you would like to evaluate it. The following dependencies must also be available: the Python Interface for Conic Optimization Software Picos and its dependency cvxopt, at least one SDP solver (SDPA as an executable in the path or Mosek with its Python interface installed; cvxopt as a solver is not recommended), and a vertex enumerator (cdd with its Python interface or lrs/plrs as an executable in the path).\nFirst, we import everything we will need:", "from __future__ import print_function, division\nfrom fractions import Fraction\nimport numpy as np\nimport numpy.linalg\nimport random\nimport time\nfrom povm_tools import basis, check_ranks, complex_cross_product, dag, decomposePovmToProjective, \\\n enumerate_vertices, find_best_shrinking_factor, get_random_qutrit, \\\n get_visibility, Pauli, truncatedicosahedron", "Checking criticial visibility\nThe question whether a qubit or qutrit POVM is simulable by projective measurements boils down to an SDP feasibility problem. Few SDP solvers can handle feasibility problems, so from a computational point of view, it is always easier to phrase the question as an SDP optimization, which would return the critical visibility, below which the amount of depolarizing noise would allow simulability. Recall Eq. (8) from the paper that defines the noisy POVM that we obtain subjecting a POVM $\\mathbf{M}$ to a depolarizing channel $\\Phi_t$:\n$\\left[\\Phi_t\\left(\\mathbf{M}\\right)\\right]_i := t M_i + (1-t)\\frac{\\mathrm{tr}(M_i)}{d} \\mathbb{1}$.\nIf this visibility $t\\in[0,1]$ is one, the POVM $\\mathbf{M}$ is simulable.\nQubit example\nAs an example, we study the tetrahedron measurement (see Appendix B in arXiv:quant-ph/0702021):", "def dp(v):\n result = np.eye(2, dtype=np.complex128)\n for i in range(3):\n result += v[i]*Pauli[i]\n return result\n\nb = [np.array([ 1, 1, 1])/np.sqrt(3),\n np.array([-1, -1, 1])/np.sqrt(3),\n np.array([-1, 1, -1])/np.sqrt(3),\n np.array([ 1, -1, -1])/np.sqrt(3)]\nM = [dp(bj)/4 for bj in b]", "Next we check with an SDP whether it is simulable by projective measurements. A four outcome qubit POVM $\\mathbf{M} \\in\\mathcal{P}(2,4)$ is simulable if and only if \n$M_{1}=N_{12}^{+}+N_{13}^{+}+N_{14}^{+},$\n$M_{2}=N_{12}^{-}+N_{23}^{+}+N_{24}^{+},$\n$M_{3}=N_{13}^{-}+N_{23}^{-}+N_{34}^{+},$\n$M_{4}=N_{14}^{-}+N_{24}^{-}+N_{34}^{-},$\nwhere Hermitian operators $N_{ij}^{\\pm}$ satisfy $N_{ij}^{\\pm}\\geq0$ and $N_{ij}^{+}+N_{ij}^{-}=p_{ij}\\mathbb{1}$, where $i<j$ , $i,j=1,2,3,4$ and $p_{ij}\\geq0$ as well as $\\sum_{i<j}p_{ij}=1$, that is, the $p_{ij}$ values form a probability vector. This forms an SDP feasibility problem, which we can rephrase as an optimization problem by adding depolarizing noise to the left-hand side of the above equations and maximizing the visibility $t$:\n$\\max_{t\\in[0,1]} t$\nsuch that\n$t\\,M_{1}+(1-t)\\,\\mathrm{tr}(M_{1})\\frac{\\mathbb{1}}{2}=N_{12}^{+}+N_{13}^{+}+N_{14}^{+},$\n$t\\,M_{2}+(1-t)\\,\\mathrm{tr}(M_{2})\\frac{\\mathbb{1}}{2}=N_{12}^{-}+N_{23}^{+}+N_{24}^{+},$\n$t\\,M_{3}+(1-t)\\,\\mathrm{tr}(M_{3})\\frac{\\mathbb{1}}{2}=N_{13}^{-}+N_{23}^{-}+N_{34}^{+},$\n$t\\,M_{4}+(1-t)\\,\\mathrm{tr}(M_{4})\\frac{\\mathbb{1}}{2}=N_{14}^{-}+N_{24}^{-}+N_{34}^{-}$.\nIf it is, the critical visibility is one, we have a simulable measurement. We solve this SDP with the function get_visibility for the tetrahedron measurement, indicating that it is not simulable:", "get_visibility(M)", "Qutrit example\nWe take the qutrit POVM from Section 4.1 in arXiv:quant-ph/0310013:", "psi0 = np.array([[1/np.sqrt(2)], [1/np.sqrt(2)], [0]])\nomega = np.exp(2*np.pi*1j/3)\nD = [[omega**(j*k/2) * sum(np.power(omega, j*m) * np.kron(basis((k+m) % 3), basis(m).T)\n for m in range(3)) for k in range(1, 4)] for j in range(1, 4)]\npsi = [[D[j][k].dot(psi0) for k in range(3)] for j in range(3)]\nM = [np.kron(psi[k][j], psi[k][j].conj().T)/3 for k in range(3) for j in range(3)]", "The SDP to be solved is more intricate than the qubit case. Using the notations of Lemma 14, let $\\mathbf{M}\\in\\mathcal{P}(d,n)$ be $n$-outcome measurement on $d$ dimensional Hilbert space. Let $m\\leq n$. The critical visibility $t_{m}(\\mathbf{M})$ can be computed via the following SPD programme\n$\\max_{t\\in[0,1]} t$\nSuch that\n$t\\mathbf{M}+(1-t)\\left(\\mathrm{tr}(M_1)\\frac{\\mathbb{1}}{d},\\ldots,\\mathrm{tr}(M_n)\\frac{\\mathbb{1}}{d}\\right)=\\mathbf{M}=\\sum_{X\\in[n]2} \\mathbf{N}_X + \\sum{Y\\in[n]_3} \\mathbf{N}_Y$,\n$\\left{\\mathbf{N}X\\right}{X\\in[n]3}\\ ,\\ \\left{p_X\\right}{X\\in[n]2}\\ , \\left{\\mathbf{N}_Y\\right}{Y\\in[n]3}\\ ,\\ \\left{p_Y\\right}{X\\in[n]_3}$,\n$\\mathbf{M}=\\sum_{X\\in[n]2} \\mathbf{N}_X + \\sum{Y\\in[n]_3} \\mathbf{N}_Y$, \n$[\\mathbf{N}_X]_i \\geq 0\\ ,\\ [\\mathbf{N}_Y]_i \\geq 0\\,\\ \\ i=1,\\ldots,n$, \n$[\\mathbf{N}_X]_i = 0$ for $i\\notin{X}, [\\mathbf{N}_Y]_i = 0$ for $i\\notin{Y}$,\n$\\mathrm{tr}\\left([\\mathbf{N}_Y]_i\\right) = p_Y$ for $i\\in{Y}$,\n$\\sum_{i=1}^n [\\mathbf{N}X]_i= p_X \\mathbb{1}\\ , \\sum{i=1}^n[\\mathbf{N}_Y]_i= p_Y \\mathbb{1}$\n$p_X \\geq 0\\ ,\\ p_Y\\geq 0\\ ,\\ \\sum_{X\\in[n]2} p_X+\\sum{Y\\in[n]_3} p_Y=1$.\nSolving this SDP for the qutrit POVM above, we see that the visibility is far from one:", "get_visibility(M, solver=None, proj=True)", "We can also look only for the visibility needed to decompose the POVM into general 3-outcome POVMs.", "get_visibility(M, solver=None, proj=False)", "Next we look at a projective-simulable POVM:", "psi = [get_random_qutrit()]\npsi.append(complex_cross_product(psi[0], np.array([[0], [0], [1]])))\npsi.append(complex_cross_product(psi[0], psi[1]))\nphi = [get_random_qutrit()]\nphi.append(complex_cross_product(phi[0], np.array([[0], [0], [1]])))\nphi.append(complex_cross_product(phi[0], phi[1]))\nM = [0.5*np.kron(psi[0], psi[0].conj().T),\n 0.5*np.kron(psi[1], psi[1].conj().T),\n 0.5*np.kron(psi[2], psi[2].conj().T) + 0.5*np.kron(phi[0], phi[0].conj().T),\n 0.5*np.kron(phi[1], phi[1].conj().T),\n 0.5*np.kron(phi[2], phi[2].conj().T),\n np.zeros((3, 3), dtype=np.float64),\n np.zeros((3, 3), dtype=np.float64),\n np.zeros((3, 3), dtype=np.float64),\n np.zeros((3, 3), dtype=np.float64)]", "The result is very close to one:", "get_visibility(M)", "External polytopes approximating $\\mathcal{P}(2,4)$\nHere we repeat some of the theory we presented in Appendix D. We would like to find an external polytope that tightly approximates $\\mathcal{P}(2,4)$ and then check how much we have to \"shrink\" it until it fits inside the set of simulable POVMs. Since the operators ${\\mathbb{1}, \\sigma_x, \\sigma_y, \\sigma_z}$ form is a basis for the real space of Hermitian matrices $\\mathrm{Herm}(\\mathbb{C}^2)$, we can write any matrix in this set as $M = \\alpha \\mathbb{1} + x \\sigma_x + y \\sigma_y + z \\sigma_z$ for some $\\alpha, x, y, z$ real numbers. We relax the positivity condition of the measurement effects by requiring $\\mathrm{tr}(M|\\psi_j\\rangle\\langle\\psi_j|)\\geq0$ for some collection of pure states ${|\\psi_j\\rangle\\langle\\psi_j|}_{j=1}^N$, which in turn can always be expressed as $|\\psi_j\\rangle\\langle\\psi_j|=(1/2)(\\mathbb{1}-\\vec{v}_j \\cdot \\vec{\\sigma})$ for a vector $\\vec{v}_j$ from a unit sphere in $\\mathbb{R}^3$. Thus, with the measurement effects also expressed in the same basis, we can write the relaxed positivity conditions as\n$(x,y,z)\\cdot v_j \\leq \\alpha,\\ i=1,\\ldots,N$,\nwhere \"$\\cdot$\" denotes the standard inner product in $\\mathbb{R}^3$. We describe the approximating polytope as \n$\\begin{eqnarray}\n&\\alpha_i \\geq 0, \\ i=1,...,4, \\sum_i{\\alpha_i} = 1\\\n&\\sum_i{x_i} = \\sum_i{y_i} = \\sum_i{z_i} = 0.\n\\end{eqnarray}$\nThis yields sixteen real parameters, which would be expensive to treat computationally. We can, however, exploit certain properties that reduce the number of parameters. First of all, since the effects add up to the identity, we can drop the last four parameters. Then, due to invariance properties and the characteristics of extremal POVMs, one parameter is sufficient for the first effect, and three are enough for the second. In total, we are left with eight parameters.\nWe would like to define inequalities defining the facets of the polytope, run a vertex enumeration algorithm, and refine the polytope further. From a computational perspective, a critical point is enumerating the vertices given the inequalities. For this, two major implementations are available that use fundamentally different algorithms:\n\ncdd and its Python interface. \nlrs and its parallel variant plrs. We developed a simple Python wrapper for this implementation.\n\nUsing cdd results in fewer vertices, but lrs and plrs run at least a magnitude faster. The function enumerate_vertices abstracts away the implementation, and the user can choose between cdd, lrs, and plrs. Note that format of inequalities is $b+Ax\\geq 0$, where $b$ is the constant vector and $A$ is the coefficient matrix. Thus a line in our parametrization is of the form $[b, \\alpha_1, \\alpha_2, \\alpha_3, \\alpha_4, \\alpha_5, \\alpha_6, \\alpha_7, \\alpha_8]$, corresponding to an inequality $b +\\alpha_1 x_1 + \\alpha_2 x_2 + \\alpha_3 x_3 + \\alpha_4 x_4 + \\alpha_5 x_5 + \\alpha_6 x_6 + \\alpha_7 x_7 + \\alpha_8 x_8 \\geq 0$.\nSince $M_2$ lies in the plane, we consider an approximation to the circle, rather than the sphere. Furthermore, we can always assume that this vector lies in the $y$-positive semi-plane, and take a polygon approximating the semi-circle from the outside, defined by the points of tangency to the semi-circle. In order to obtain a reliable polytope, given only by rational coordinates, we set these points to be the image of a net of 100 points of the interval $[-1,1]$ via the stereographic projection. By dealing only with rational points, we ensure that we can go back and forth from inequalities to vertices recovering the same initial set.", "n = 25\n\n# crit is an approximation of 1/(1+sqrt2), the point whose 2D\n# stereographic projection is (1/sqrt2, 1/sqrt2)\ncrit = Fraction(4142, 10000)\n\n# for the interval [crit, 1] the projection from the pole P = (-1, 0)\n# approximates \"well\" the circle\nnn = Fraction(1 - crit, n)\n\n# u discretizes the quarter of circle where x, y \\geq 0\nu = []\nfor r in range(1, n + 1):\n # P = (0, -1), x \\in [crit, 1]\n u.append([Fraction(2*(crit + r*nn), (crit + r*nn)**2 + 1),\n Fraction(2, (crit + r*nn)**2 + 1) - 1])\n # P = (-1, 0), y \\in [crit, 1]\n u.append([Fraction(2, (crit + r*nn)**2 + 1) - 1,\n Fraction(2*(crit + r*nn), (crit + r*nn)**2 + 1)])\nu = np.array(u)\n\n# u1 discretizes the quarter of circle where x \\leq 0, y \\geq 0\nu1 = np.column_stack((-u[:, 0], u[:, 1]))\nu = np.row_stack((u, u1))\n\n# W1 encodes the polyhedron given by the tangency points in u\nW1 = np.zeros((u.shape[0] + 1, 9), dtype=fractions.Fraction)\nfor i in range(u.shape[0]):\n W1[i, 2:5] = np.array([1, -u[i, 0], -u[i, 1]])\n# This constraint is to get only the half polygon with positive y2\nW1[u.shape[0], 4] = 1", "Next, we would like to constrain the third effect $M_3$, and we start this approximation by defining a polytope approximating the unit sphere in $\\mathbb{R}^3$. Similiarly to the previous case, the approximation is defined by the points of tangency to the sphere, provided by the stereographic projection of a set of rational points contained in $[-1, 1]\\times[-1, 1]$.", "m1 = 2\nm2 = 1\n# crit is the same as above\nmm1 = Fraction(1, m1)\nmm2 = Fraction(crit, m2)\n\n# v1 discretizes the positive octant of the sphere\nv1 = []\n\n# P = (0, 0, -1), x, y \\in [0, 1]\nfor rx in range(1, m1 + 1):\n for ry in range(1, m1 + 1):\n v1.append([Fraction(2*(rx*mm1), (rx*mm1)**2 + (ry*mm1)**2 + 1),\n Fraction(2*(ry*mm1), (rx*mm1)**2 + (ry*mm1)**2 + 1),\n 1 - Fraction(2, (rx*mm1)**2 + (ry*mm1)**2 + 1)])\n\n# a second round to improve the approximation around the pole\n# P = (0, 0, -1), x, y \\in [0, crit]\nfor rx in range(1, m2 + 1):\n for ry in range(1, m2 + 1):\n v1.append([Fraction(2*(rx*mm2), (rx*mm2)**2 + (ry*mm2)**2 + 1),\n Fraction(2*(ry*mm2), (rx*mm2)**2 + (ry*mm2)**2 + 1),\n 1 - Fraction(2, (rx*mm2)**2 + (ry*mm2)**2 + 1)])\n\nv1 = np.array(v1)\n\n# we now reflect the positive octant to construct the whole sphere\nv1a = np.column_stack((-v1[:, 0], v1[:, 1], v1[:, 2]))\nv1 = np.row_stack((v1, v1a))\nv1b = np.column_stack((v1[:, 0], -v1[:, 1], v1[:, 2]))\nv1 = np.row_stack((v1, v1b))\nv1c = np.column_stack((v1[:, 0], v1[:, 1], -v1[:, 2]))\nv1 = np.row_stack((v1, v1c))\n\n# the following discretizes the quarters of equators where x, y, z > 0,\n# corresponding to the case where rx, ry = 0 above, around the origin\nyz = []\nxz = []\nxy = []\nfor r in range(1, m1+1):\n # P = [0, 0, -1], x = 0, y \\in [0, 1]\n yz.append([0,\n Fraction(2*(r*m1), (r*m1)**2 + 1),\n 1 - Fraction(2, (r*m1)**2 + 1)])\n # P = [0, 0,-1], y = 0, x \\in [0, 1]\n xz.append([Fraction(2*(r*m1), (r*m1)**2 + 1),\n 0,\n 1 - Fraction(2, (r*m1)**2 + 1)])\n # P = [0, -1, 0], z = 0, x \\in [0, 1]\n xy.append([Fraction(2*(r*m1), (r*m1)**2 + 1),\n 1 - Fraction(2, (r*m1)**2 + 1),\n 0])\n\nyz = np.array(yz)\nxz = np.array(xz)\nxy = np.array(xy)\n\nyz1 = np.column_stack((yz[:, 0], -yz[:, 1], yz[:, 2]))\nyz2 = np.column_stack((yz[:, 0], yz[:, 1], -yz[:, 2]))\nyz3 = np.column_stack((yz[:, 0], -yz[:, 1], -yz[:, 2]))\nyz = np.row_stack((yz, yz1, yz2, yz3))\n\nxz1 = np.column_stack((-xz[:, 0], xz[:, 1], xz[:, 2]))\nxz2 = np.column_stack((xz[:, 0], xz[:, 1], -xz[:, 2]))\nxz3 = np.column_stack((-xz[:, 0], xz[:, 1], -xz[:, 2]))\nxz = np.row_stack((xz, xz1, xz2, xz3))\n\nxy1 = np.column_stack((-xy[:, 0], xy[:, 1], xy[:, 2]))\nxy2 = np.column_stack((xy[:, 0], -xy[:, 1], xy[:, 2]))\nxy3 = np.column_stack((-xy[:, 0], -xy[:, 1], xy[:, 2]))\nxy = np.row_stack((xy, xy1, xy2, xy3))\n\nv2 = np.row_stack((yz, xz, xy))\n\nv = np.row_stack((v1, v2))", "The following constraints ensure that the third operator $M_3$ of the measurement is a quasi-effect, with the approximation given by $v$:", "W2 = np.zeros((v.shape[0], 9), dtype=fractions.Fraction)\nfor i in range(v.shape[0]):\n W2[i, 5:] = np.array([1, -v[i, 0], -v[i, 1], -v[i, 2]])", "The next set of constraints ensures the same condition for the last effect, which we express by $\\mathbb{1} - M_1 - M_2 - M_3$:", "W3 = np.zeros((v.shape[0], 9))\nfor i in range(v.shape[0]):\n W3[i] = [1, -1+v[i, 0], -1, v[i, 0], v[i, 1], -1,\n v[i, 0], v[i, 1], v[i, 2]]", "We need that $\\alpha_0, \\alpha_1, \\alpha_2 \\geq 0$:", "W4 = np.array([[0, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0]])", "We also require that $\\alpha_0+\\alpha_1+\\alpha_2 \\leq 1$, which corresponds to expressing the previous constraint for the last effect:", "W5 = np.array([[1, -1, -1, 0, 0, -1, 0, 0, 0]])", "Finally, we need that $\\alpha_0 \\geq \\alpha_1 \\geq \\alpha_2 \\geq 1-\\alpha_0-\\alpha_1-\\alpha_2$, a condition that we can impose without lost of generality due to relabeling. Once we have the last constraints, we stack the vectors in a single array.", "W6 = np.array([[0, 1, -1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, -1, 0, 0, 0],\n [-1, 1, 1, 0, 0, 2, 0, 0, 0]])\nhull = np.row_stack((W1, W2, W3, W4, W5, W6))", "We enumerate the vertices, which is a time-consuming operation:", "time0 = time.time()\next = enumerate_vertices(hull, method=\"plrs\", verbose=1)\nprint(\"Vertex enumeration in %d seconds\" % (time.time()-time0))", "As the last step, we iterate the SDP optimization described in Section \"Checking criticial visibility\" over all vertices to get the best shrinking factor. This takes several hours to complete. Parallel computations do not work from a notebook, but they do when the script is executed in a console. For this reason, here we disable parallel computations.", "time0 = time.time()\nalphas = find_best_shrinking_factor(ext, 2, solver=\"mosek\", parallel=False)\nprint(\"\\n Found in %d seconds\" % (time.time()-time0))", "External polytopes approximating $\\mathcal{P}_{\\mathrm{cov}}(3,9)$\nFollowing the same reasoning applied to approximate $\\mathcal{P}(2,4)$ we now approximate the set of qutrit covariant POVMs $\\mathcal{P}{\\mathrm{cov}}(3,9)$ regarding the discrete Heinsenberg group. This task is relatively simple, since we need only to consider a quasi-positive seed $M$ and derive the effects from it by conjugating by the unitaries $D{ij}$, which rotate the space by symmetric directions.", "w = np.cos(2*np.pi/3) + 1j*np.sin(2*np.pi/3)\nx = np.matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\nD = [[], [], []]\nfor j in range(3):\n for k in range(3):\n D[j].append(np.matrix((w**(j*k/2))*(\n sum(w**(j*m)*x[:, np.mod(k + m, 3)]*dag(x[:, m]) for m in\n range(3)))))", "We use the the approximation to the set of positive semi-definite operators given by $tr(M\\Psi_i)\\geq0$, for some finite set of rank-one projectors ${\\Psi_i}$. We generate random projectors and rotate them by the unitaries $D_{ij}$ above in order to obtain a more regular distribution in the space of projectors.", "# Discretization of the set of PSD matrices with 9*N elements\nN = 2\ndisc = []\nfor _ in range(N):\n psi = np.matrix(qutip.Qobj.full(qutip.rand_ket(3)))\n for j in range(3):\n for k in range(3):\n disc.append(D[j][k]*(psi*dag(psi))*dag(D[j][k]))", "We now translate each of the trace constraints above by writing each $M = (m_{ab})$ as a 8-dimensional real vector $(m_{00}, re(m_{01}), im(m_{01}),..., re(m_{12}), im(m_{12}))$, where $m_{22} = 1/3 -m_{00} -m_{11}$ since its trace is fixed.", "hull = []\nfor i in range(9*N):\n # each row of hull ensures tr(M*disc[i])>= 0\n hull.append([np.real(disc[i][2, 2])/3,\n np.real(disc[i][0, 0]) - np.real(disc[i][2, 2]),\n 2*np.real(disc[i][1, 0]), -2*np.imag(disc[i][1, 0]),\n 2*np.real(disc[i][2, 0]), -2*np.imag(disc[i][2, 0]),\n np.real(disc[i][1, 1]) - np.real(disc[i][2, 2]),\n 2*np.real(disc[i][2, 1]), -2*np.imag(disc[i][2, 1])])", "We then construct the polytope generated by these inequalities.", "cov_ext = enumerate_vertices(np.array(hull), method=\"plrs\")", "To have an idea on how good the approximation is, we can translate each vertice obtained into a quasi-POVM and check how negative its eigenvalues are.", "# Converting vectors into covariant POVMs\npovms = []\nfor i in range(cov_ext.shape[0]):\n eff = np.matrix([[cov_ext[i, 1],\n cov_ext[i, 2] + cov_ext[i, 3]*1j,\n cov_ext[i, 4] + cov_ext[i, 5]*1j],\n [cov_ext[i, 2] - cov_ext[i, 3]*1j,\n cov_ext[i, 6],\n cov_ext[i, 7] + cov_ext[i, 8]*1j],\n [cov_ext[i, 4] - cov_ext[i, 5]*1j,\n cov_ext[i, 7] - cov_ext[i, 8]*1j,\n 1/3 - cov_ext[i, 1] - cov_ext[i, 6]]])\n M = []\n for j in range(3):\n for k in range(3):\n M.append(D[j][k]*eff*dag(D[j][k]))\n povms.append(M)\n\n# Finding the least eigenvalues\nA = np.zeros((cov_ext.shape[0]))\nfor i in range(cov_ext.shape[0]):\n A[i] = min(numpy.linalg.eigvalsh(povms[i][0]))\na = min(A)", "We then optimise over the extremal points of the polytope.", "alphas = find_best_shrinking_factor(cov_ext, 3, parallel=True)", "Decomposition of qutrit three-outcome, trace-one POVMS into projective measurements\nWe implemented the constructive strategy to find a projective decomposition for trace-one qutrit measurements $\\mathbf{M}\\in\\mathcal{P}_1(3,3)$ we described in Appendix D.\nFirst we define a function to generate a random trace-1 POVM. This is the only step that requires an additional dependency compared to the ones we loaded in the beginning of the notebook. The dependency is QuTiP.", "from qutip import rand_unitary\n\ndef get_random_trace_one_povm(dim=3):\n U = rand_unitary(dim)\n M = [U[:, i]*dag(U[:, i]) for i in range(dim)]\n for _ in range(dim-1):\n U = rand_unitary(dim)\n r = random.random()\n for i in range(dim):\n M[i] = r*M[i] + (1-r)*U[:, i]*dag(U[:, i])\n return M", "Then we decompose a random POVM following the cascade of \"rank reductions\" described in Appendix D, and check the ranks:", "M = get_random_trace_one_povm()\nprint(\"Rank of POVM: \", check_ranks(M))\ncoefficients, projective_measurements = decomposePovmToProjective(M)", "As a sanity check, we look at the ranks of the effects of the individual projective measurements. We must point out that the numerical calculations occasionally fail, and we set the tolerance in rank calculations high.", "print(\"Ranks of projective measurements: \")\nfor measurement in projective_measurements:\n print(check_ranks(measurement, tolerance=0.01))", "We show that the projective measurements indeed return the POVM:", "N = coefficients[0]*projective_measurements[0] + \\\n coefficients[1]*(coefficients[2]*projective_measurements[1] + \n coefficients[3]*(coefficients[4]*(coefficients[6]*projective_measurements[2] + \n coefficients[7]*projective_measurements[3]) + \n coefficients[5]*(coefficients[8]*projective_measurements[4] +\n coefficients[9]*projective_measurements[5])))\nnot np.any(M - N > 10e-10)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
science-of-imagination/nengo-buffer
Project/trained_mental_scaling_testing.ipynb
gpl-3.0
[ "Testing the trained weight matrices (not in an ensemble)", "import nengo\nimport numpy as np\nimport cPickle\nimport matplotlib.pyplot as plt\nfrom matplotlib import pylab\nimport matplotlib.animation as animation", "Load the weight matrices from the training", "#Weight matrices generated by the neural network after training\n\n#Maps the label vectors to the neuron activity of the ensemble\nlabel_weights = cPickle.load(open(\"label_weights1000.p\", \"rb\"))\n#Maps the activity of the neurons to the visual representation of the image\nactivity_to_img_weights = cPickle.load(open(\"activity_to_img_weights_scale1000.p\", \"rb\"))\n#Maps the activity of the neurons of an image with the activity of the neurons of an image scaled\nscale_up_weights = cPickle.load(open(\"scale_up_weights1000.p\", \"rb\"))\nscale_down_weights = cPickle.load(open(\"scale_down_weights1000.p\", \"rb\"))\n\n#Create the pointers for the numbers\ntemp = np.diag([1]*10)\n\nZERO = temp[0]\nONE = temp[1]\nTWO = temp[2]\nTHREE= temp[3]\nFOUR = temp[4]\nFIVE = temp[5]\nSIX = temp[6]\nSEVEN =temp[7]\nEIGHT= temp[8]\nNINE = temp[9]\n\nlabels =[ZERO,ONE,TWO,THREE,FOUR,FIVE,SIX,SEVEN,EIGHT,NINE]\n\n#Visualize the one hot representation\nprint(ZERO)\nprint(ONE)", "Visualize the digit from one hot representation through the activity weight matrix to the image representation\n- Image is average digit from mnist dataset", "#Change this to imagine different digits\nimagine = ZERO\n#Can also imagine combitnations of numbers (ZERO + ONE)\n\n#Label to activity\ntest_activity = np.dot(imagine,label_weights)\n#Image decoded \ntest_output_img = np.dot(test_activity, activity_to_img_weights)\n\nplt.imshow(test_output_img.reshape(28,28),cmap='gray')\nplt.show()", "Visualize the rotation of the image using the weight matrix from activity to activity\n- does not use the weight matrix used on the recurrent connection", "#Change this to visualize different digits\nimagine = ZERO \n\n#How long the animation should go for\nframes=5\n\n#Make a list of the activation of rotated images and add first frame\nrot_seq = []\nrot_seq.append(np.dot(imagine,label_weights)) #Map the label vector to the activity vector\ntest_output_img = np.dot(rot_seq[0], activity_to_img_weights) #Map the activity to the visual representation\n\n#add the rest of the frames, using the previous frame to calculate the current frame\nfor i in range(1,frames):\n rot_seq.append(np.dot(rot_seq[i-1],scale_down_weights)) #add the activity of the current image to the list\n test_output_img = np.dot(rot_seq[i], activity_to_img_weights) #map the new activity to the visual image\nfor i in range(1,frames*2):\n rot_seq.append(np.dot(rot_seq[frames+i-2],scale_up_weights)) #add the activity of the current image to the list\n test_output_img = np.dot(rot_seq[i], activity_to_img_weights) #map the new activity to the visual image \n\n#Animation of rotation\nfig = plt.figure()\n\ndef updatefig(i):\n image_vector = np.dot(rot_seq[i], activity_to_img_weights) #map the activity to the image representation\n im = pylab.imshow(np.reshape(image_vector,(28,28), 'F').T, cmap=plt.get_cmap('Greys_r'),animated=True)\n \n return im,\n\nani = animation.FuncAnimation(fig, updatefig, interval=100, blit=True)\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
debsankha/network_course_python
talks/01-pythonbasics-builtins.ipynb
gpl-2.0
[ "Table of Contents\n\n1. Introduction to Interactive Network Analysis and Visualization with Python\n1.1 What is Python?\n1.1.1 How to use Python\n\n\n1.2 A short intro to jupyter\n1.2.1 Markdown is cool\n1.2.1.1 This is a heading\n\n\n1.2.2 Use the TAB\n1.2.3 Write your own documentations\n1.2.4 Inline plots & interactive widgets\n\n\n\n\n2. Short tutorial on python\n2.1 datatypes and operators\n2.1.1 integer, float, strings\n2.1.2 Assignments\n2.1.3 Conversion of datatypes:\n2.1.4 Arithmetic and operators\n\n\n2.2 Conditions and control statements (if, while)\n2.2.1 Comparison operators:\n2.2.2 is operator\n2.2.3 If ... else ...\n\n\n2.3 While loops:\n2.3.1 Warning: Make sure that the condition gets True after a finite number of steps!\n\n\n2.4 Sequences & for-loops\n2.4.1 Sequences\n2.4.2 Specialities of lists\n2.4.2.1 In contrast to tuples and strings, lists are mutable. Items can be replaced, removed or added.\n2.4.2.2 Warning: assignment to lists do not create a copy\n\n\n2.4.3 Specialities of strings:\n2.4.3.1 Strings have some nice methods\n\n\n2.4.4 For-loops\n2.4.4.1 Works on strings, tuples:\n2.4.4.2 Did I tell you about string formatting?\n\n\n2.4.5 List comprehensions: A short way to create a sequence.\n2.4.6 What are tuples for, you ask?\n2.4.7 dictionaries:\n\n\n2.5 functions:\n2.6 Using modules\n\n\n\n1. Introduction to Interactive Network Analysis and Visualization with Python\na GGNB short method course offered by Debsankha Manik and Jana Lassar at the MPI for Dynamics and Self-Organization in September 2014\n1.1 What is Python?\n\na general-purpose, high-level programming language.\nfree and open source\neasy to learn and easy to read\nportable to any OS\nsaves your time rather than CPU time\nused by many scientists \n -> large set of modules for data processing/analysis/visualization/...\n\n<img\nsrc=\"http://image.slidesharecdn.com/scientific-python-151121091532-lva1-app6892/95/scientific-python-28-638.jpg?cb=1448101074\" />\n1.1.1 How to use Python\n\ninteractive interpreter (python, ipython)\nscripts (with any text editor, there is no standard IDE)\njupyter (aka ipython notebook)\n\n1.2 A short intro to jupyter\n1.2.1 Markdown is cool\nMarkdown is a lightweight markup language.\n1.2.1.1 This is a heading\nThis is a list:\n\napples\noranges\npears\n\nA numbered list:\n\napples\noranges\npears\n\nLaTeX equations \n\\begin{align}\nf(x \\; | \\; \\mu, \\sigma) = \\frac{1}{\\sigma\\sqrt{2\\pi} } \\; e^{ -\\frac{(x-\\mu)^2}{2\\sigma^2} }\n\\end{align}\nAnd this is a link where you find more about markdown syntax.\n1.2.2 Use the TAB", "# print( # Tab now should display the docstring\n\n# Also woks:\nprint??", "1.2.3 Write your own documentations", "def isprime(n): \n \"\"\"Determine if a given number is a prime number\"\"\" \n for x in range(2, n-1): \n if n % x == 0: #if n modulo x equals 0\n return False #then n is not a prime number\n return True \n\nisprime??", "1.2.4 Inline plots & interactive widgets", "%matplotlib inline\nimport pylab as pl\nimport numpy as np\nfrom IPython.html.widgets import interact\n\ndef plot(frequency):\n x = np.arange(1000)\n pl.plot(x, np.sin(2*np.pi*frequency*x/1000))\ninteract(plot, frequency=(0,20,1))", "2. Short tutorial on python\n2.1 datatypes and operators\n2.1.1 integer, float, strings", "#name = object\na = 17\nb = 3.14\nc = \"bla\"\n\nprint(a)\nprint(b)\nprint(c)\n\nprint(type(a))\nprint(type(b))\nprint(type(c))", "2.1.2 Assignments", "a = 17\na = \"Used names can always be reassigned to other objects regardles of their data type!\"\nprint(a)", "2.1.3 Conversion of datatypes:", "print(int(5.5))\nprint(float('5.23'))\nprint(str(12))\nprint(bool('True'))", "2.1.4 Arithmetic and operators\n\n+\n-\n*\n/\n// (integer division)\n% (modulo operator)\n** (power)", "print(x+y)\nprint(\"bla\" + \"bla\")\nprint(x/y)\nprint(x*2)\nprint('bla'*3)\nprint(x/2)\nprint(x**3) #the exponentiation operator\nprint(x%2) #the remainder operator", "2.2 Conditions and control statements (if, while)\n2.2.1 Comparison operators:\n| Operator | True, if |\n| ------------- |:-------------:|\n| a == b | a equals b |\n| a > b | a is larger than b |\n| a < b | a is smaller than b |\n| a >= b | a is larger than b or equals b |\n| a <= b | a is smaller than b or equals b |\n| a != b | a and b are unequal |\n| a is b | a is the same object as b |\n| a is not b| a is not the same object as b |", "print(x==y)\nprint(x==5)", "2.2.2 is operator", "x = 3\ny = 3\nx is y", "'==' compares values while 'is' compares identities", "x = [1]\ny = [1]\nx is y", "But:", "x==y", "Warning: do not check equality of two floats (finite precision!!)", "from math import sin, pi\nsin(2*pi)==0", "2.2.3 If ... else ...", "number_of_people = 6\n\nif number_of_people < 5:\n print('Not enough people to play this game.')\nelif number_of_people < 10:\n print('More would be better, but its sufficient.')\nelif number_of_people < 20:\n print('Perfect! Enjoy!')\nelif number_of_people < 30:\n print('Less would be better, but it will work somehow.')\nelse:\n print('Sorry, but more than 30 is too much.')", "Conditional expressions:", "x = 12\n\n#the long version:\nif x%2==0:\n message = \"Even.\"\nelse:\n message = \"Odd.\"\nprint(message)\n\n#the short version:\nprint( \"Even.\" if x%2==0 else \"Odd.\" ) ", "2.3 While loops:", "value = 17\n\nwhile value < 21:\n print(value)\n value = value + 1\n\nvalue = 17\nmax_value = 30\n\nwhile True:\n value = value + 1\n if value > max_value:\n break #stop here and escape the while loop\n elif value%2==0:\n continue #stop here and continue the while loop\n print(value)", "2.3.1 Warning: Make sure that the condition gets True after a finite number of steps!\n2.4 Sequences & for-loops\n2.4.1 Sequences\n| Sequence | mutable? | data type |\n| ------------- |:-------------:|:--------:|\n| list | yes | arbitrary |\n| tuple | no | arbitrary |\n| string | no | Unicode symbols |", "a = [1,2,3,4,5] #a list\nb = (1,2,3,4,5) #a tuple\nc = '12345' #a string", "Since lists and tuples can contain arbitrary data types, they can be 'nested':", "nested_list = [[1,2,3],[4,5,6],[7,8,9]]", "All three sequence types (tuples, strings and lists) share much of their syntax and functionality.", "print(len(a),len(b),len(c))\n\nprint( a + a )\nprint( b + b )\nprint( c + c )", "single items are accessible by their index (starting from 0):", "print( a[0], b[1], c[2] )", "Negative indices are counted from the end (starting with -1)", "print ( a[-1], b[-3] )", "A subset of items can be accessed by \"slices\". \nSyntax: [I:J:K] means start from index I, stop at index J and take every K'th item. If I is omitted, start from the first item, if J is omitted, stop at the last item, and if K is omitted, take every item.", "print( a[1:4] ) #get items from 1 to 4\nprint( a[3:5] ) #get items from 3 to 5\nprint( a[:4] ) #get items from 0 to 4\nprint( a[3:] ) #get items from 3 to the end\nprint( a[::2] ) #get every second item", "The in-operator checks whether an item is in the sequence:", "print((2,3) in (1,2,3,4,5))\nprint('cde' in 'abcdefgh')", "2.4.2 Specialities of lists\n2.4.2.1 In contrast to tuples and strings, lists are mutable. Items can be replaced, removed or added.", "a = [1,2,3,4] #create list\na[2] = 12 #replace item 2 by value 12\na.append(34) #add value 34 to the end\na.extend([0,0,0]) #add several values to the end\na.pop() #remove last item\na.insert(3, 'blub')#insert object before index 3\na.reverse() #reverse list\nprint(a)", "2.4.2.2 Warning: assignment to lists do not create a copy", "s = [1,2]\nt = s\n\nt.append(99)\nprint(s, t)", "2.4.3 Specialities of strings:", "# Strings can be enlosed by both single quote and double quote.\ns='My home'\nt=\"My home\"\ns==t", "strings can contain quotation marks themselves. single/double quotes become important then:", "newstring=\"This is Mary's home\"\nanotherstring='And he said: \"Let there be light\"'\n\n# And if you **really** need it, both single and double quotes:\nhuge_string=\"\"\"This 'string' contains \"both\" types of quote\"\"\"", "2.4.3.1 Strings have some nice methods", "print(huge_string.upper())\nprint(huge_string.startswith('a'))\nprint(huge_string.find('contain'))\nprint(huge_string.split(' '))\nprint(huge_string.count('s'))", "2.4.4 For-loops\nRemember how while loops were prone to infinite loop bugs?\nPython gives you a chance to avoid them in most cases:", "i = 1\nwhile i < 50:\n if i%7 == 0:\n print(i)\n i += 1\n\nfor i in range(1, 50, 1):\n if i%7 == 0:\n print(i)", "2.4.4.1 Works on strings, tuples:", "count = 0\n\nst = \"home, sweet home\"\nfor char in st:\n if char == 'h':\n count += 1\nprint(\"%c appears %d times in \\\"%s\\\"\"%('h', count, st))", "2.4.4.2 Did I tell you about string formatting?", "print(\"%s is %d years old\"%('John', 34))\nprint(\"{name} is {age} years old, {name} has 3 children\".format(name='John', age=34))", "For-loops can not only iterate through sequences, but also through 'iterable' objects, like range().", "#Example: We want to sum up all numbers betwen 0 and 100. \n#Instead of manually typing a list of all numbers, we can use range: \ns = 0\nfor i in range(101):\n s = s + i\nprint(s) ", "2.4.5 List comprehensions: A short way to create a sequence.", "#long version: \"for-loop\"\nli = []\nfor i in range(100):\n li.append(i*2)\n\n#short version:\nli = [2*i for i in range(101)]\n\nprint(li)", "List comprehensions can be used as a filter:", "li = [1/i for i in range(101) if i != 0]\nprint(li)", "Also to place a default value", "li = [1/i if i!=0 else None for i in range(101) ]\nprint(li)", "And to write insanely obtuse code and mess with your collaborators :D", "nested_list = [[1,2], [3], [4,5,6]]\nflattened_list = [item for sublist in nested_list for item in sublist]\nprint(flattened_list)", "2.4.6 What are tuples for, you ask?\nThey are almost like lists, but immutable:", "l = [1,2,3,4,5]\nt = [1,2,3,4,5]\n#or, simply:\nt = tuple(l)\n\nprint(l[2:5])\nprint(t[2:5])\n\nprint(2 in l)\nprint(2 in t)\n\nl[2] = 99\nprint(l)\nt[2] = 99\nprint(t)", "What's the use of immutability? glad you asked\n2.4.7 dictionaries:\nstores key->value pairs:", "d={'germany':'berlin', 'france':'paris', 'poland':'warsaw', 'denmark':'copenhagen'}\n\nd['germany']", "You can add keys anytime:", "d['belgium']='brussels'", "useful things you can do with dictionaries:", "d.keys()\n\nd.values()", "Any datatype can be used as a key in dictionary, so long as it is hashable \nThumbrule: integer, float, tuple, string are OK; ~~list~~, other ~~dictionaries~~ are NOT OK", "mycrazydict = {(1,3,7):'odd!', [2,4,6]: 'even!'}", "2.5 functions:", "def triple(x):\n return x*3\n \n\ntriple(4)", "Very important: writing documentation for your code", "def triple(x):\n \"\"\"\n This function accepts a number, returns 3 times that number.\n \"\"\"\n return x*3\n \n\ntriple #Shift+tab shows documentation in ipython\n\nhelp(triple) #this also works", "when you pass any object to a function, any change done to the object will affect it globally:", "def modify(l):\n l.append(23)\n return 0\n\nl=[1,2,3]\nmodify(l)\nprint(l)", "but reassignments inside the function is not reflected globally", "def change(l):\n l=[1,2,34]\n return 0\n\nmylist=[3,6]\nchange(mylist)\n\nprint mylist", "2.6 Using modules\nOne of the most useful things about python is that there are tons of modules available, that do many of the tasks you might need to do:", "import math\nmath.sin(0.1)\n\nhelp(math)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Vvkmnn/books
AutomateTheBoringStuffWithPython/lesson42.ipynb
gpl-3.0
[ "Lesson 42:\nReading Excel Spreadsheets\nThe openpyxl module allows you to manipulate Excel sheets within Python. \nExcel files have the following terminology: \n* A collection of sheets is a workbook, and saved with a .xlsx extension.\n* A workbook contains multiple sheets, each of which is a single spreadsheet, or worksheet. \n* Each sheet has columns and rows, defined by letters and numbers respecitvely.\n* The intersection between a column and a row is a cell.", "import openpyxl", "We must first nagivate to the directory containing the spreadsheets, which for this notebook is the subdirectory 'files'.", "# Import OS module to navigate directories\nimport os\n\n# Change the directory to the excel file location, using relative and absolute paths as previously discussed.\nos.chdir('files')\n\nos.listdir()", "We must now open the workbook file.", "workbook = openpyxl.load_workbook('example.xlsx')\ntype(workbook)", "Once the workbook is loaded, we can interact with specific sheets by loading them via workbook methods.", "sheet = workbook.get_sheet_by_name('Sheet1')\ntype(sheet)", "We can also use the .get_sheet_names() method to print all sheet names, in case we aren't sure.", "workbook.get_sheet_names()", "We can now interact with specific cells by creating cell objects, referenced via a sheet method.", "# Just references an object exists; requires an additional method to interact with\nsheet['A1']", "The .value method returns the actual value in the cell.", "cell = sheet['A1']\ncell.value", "This particular cell returns a datetime reference from Excel via Python's own datetime module. A string value is available by passing into the str() function:", "print(str(cell.value))\nprint(str(sheet['A1'].value))", "All cell values inherit their data types from Excel.", "print(\"The value in cell %s is '%s' and is type %s.\" %('A1', sheet['A1'].value, type(sheet['A1'].value)))\nprint(\"The value in cell %s is '%s' and is type %s.\" %('B1', sheet['B1'].value, type(sheet['B1'].value)))\nprint(\"The value in cell %s is '%s' and is type %s.\" %('C1', sheet['C1'].value, type(sheet['C1'].value)))", "You can also reference cells via rows and columns. Excel rows start at 1 and columns at A.", "# B1 Cell\nsheet.cell(row = 1, column = 2)", "This can be useful for iterative or looping operations.", "for i in range(1,8):\n print(i, sheet.cell(row=i, column=2).value)", "Recap\n\nThe OpenPyXl module (openpyxl) handles Excel spreadsheet files (.xlsx).\nopenpyxl.load_workbook() opens a workbook file at a location and returns a workbook object.\nThe .get_sheet_names() method on a workbook prints all the sheet names inide a workbook.\nThe .get_sheet_name() method creates a worksheet object from a sheet in the workbook.\nThe index notation (sheet['A1']) returns cell objects. \nCell objects use the .value method which allows you to see the content of the cell. \nThe cell() method also returns a cell object from a sheet, from a defined row and column." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
tkphd/pycalphad
examples/EquilibriumWithOrdering.ipynb
mit
[ "Equilibrium Properties and Partial Ordering (Al-Fe and Al-Ni)", "# Only needed in a Jupyter Notebook\n%matplotlib inline\n# Optional plot styling\nimport matplotlib\nmatplotlib.style.use('bmh')\n\nimport matplotlib.pyplot as plt\nfrom pycalphad import equilibrium\nfrom pycalphad import Database, Model\nimport pycalphad.variables as v\nimport numpy as np", "Al-Fe (Heat Capacity and Degree of Ordering)\nHere we compute equilibrium thermodynamic properties in the Al-Fe system. We know that only B2 and liquid are stable in the temperature range of interest, but we just as easily could have included all the phases in the calculation using my_phases = list(db.phases.keys()). Notice that the syntax for specifying a range is (min, max, step). We can also directly specify a list of temperatures using the list syntax, e.g., [300, 400, 500, 1400].\nWe explicitly indicate that we want to compute equilibrium values of the heat_capacity and degree_of_ordering properties. These are both defined in the default Model class. For a complete list, see the documentation. equilibrium will always return the Gibbs energy, chemical potentials, phase fractions and site fractions, regardless of the value of output.", "db = Database('alfe_sei.TDB')\nmy_phases = ['LIQUID', 'B2_BCC']\neq = equilibrium(db, ['AL', 'FE', 'VA'], my_phases, {v.X('AL'): 0.25, v.T: (300, 2000, 50), v.P: 101325},\n output=['heat_capacity', 'degree_of_ordering'])\nprint(eq)", "We also compute degree of ordering at fixed temperature as a function of composition.", "eq2 = equilibrium(db, ['AL', 'FE', 'VA'], 'B2_BCC', {v.X('AL'): (0,1,0.01), v.T: 700, v.P: 101325},\n output='degree_of_ordering')\nprint(eq2)", "Plots\nNext we plot the degree of ordering versus temperature. We can see that the decrease in the degree of ordering is relatively steady and continuous. This is indicative of a second-order transition from partially ordered B2 to disordered bcc (A2).", "plt.gca().set_title('Al-Fe: Degree of bcc ordering vs T [X(AL)=0.25]')\nplt.gca().set_xlabel('Temperature (K)')\nplt.gca().set_ylabel('Degree of ordering')\nplt.gca().set_ylim((-0.1,1.1))\n# Generate a list of all indices where B2 is stable\nphase_indices = np.nonzero(eq.Phase.values == 'B2_BCC')\n# phase_indices[2] refers to all temperature indices\n# We know this because pycalphad always returns indices in order like P, T, X's\nplt.plot(np.take(eq['T'].values, phase_indices[2]), eq['degree_of_ordering'].values[phase_indices])\nplt.show()", "For the heat capacity curve shown below we notice a sharp increase in the heat capacity around 750 K. This is indicative of a magnetic phase transition and, indeed, the temperature at the peak of the curve coincides with 75% of 1043 K, the Curie temperature of pure Fe. (Pure bcc Al is paramagnetic so it has an effective Curie temperature of 0 K.)\nWe also observe a sharp jump in the heat capacity near 1800 K, corresponding to the melting of the bcc phase.", "plt.gca().set_title('Al-Fe: Heat capacity vs T [X(AL)=0.25]')\nplt.gca().set_xlabel('Temperature (K)')\nplt.gca().set_ylabel('Heat Capacity (J/mol-atom-K)')\n# np.squeeze is used to remove all dimensions of size 1\n# For a 1-D/\"step\" calculation, this aligns the temperature and heat capacity arrays\n# In 2-D/\"map\" calculations, we'd have to explicitly select the composition of interest\nplt.plot(eq['T'].values, np.squeeze(eq['heat_capacity'].values))\nplt.show()", "To understand more about what's happening around 700 K, we plot the degree of ordering versus composition. Note that this plot excludes all other phases except B2_BCC. We observe the presence of disordered bcc (A2) until around 13% Al or Fe, when the phase begins to order.", "plt.gca().set_title('Al-Fe: Degree of bcc ordering vs X(AL) [T=700 K]')\nplt.gca().set_xlabel('X(AL)')\nplt.gca().set_ylabel('Degree of ordering')\n# Select all points in the datasets where B2_BCC is stable, dropping the others\neq2_b2_bcc = eq2.where(eq2.Phase == 'B2_BCC', drop=True)\nplt.plot(eq2_b2_bcc['X_AL'].values, eq2_b2_bcc['degree_of_ordering'].values.squeeze())\nplt.show()", "Al-Ni (Degree of Ordering)", "db_alni = Database('NI_AL_DUPIN_2001.TDB')\nphases = ['LIQUID', 'FCC_L12']\neq_alni = equilibrium(db_alni, ['AL', 'NI', 'VA'], phases, {v.X('AL'): 0.10, v.T: (300, 2500, 20), v.P: 101325},\n output='degree_of_ordering')\nprint(eq_alni)", "Plots\nIn the plot below we observe two phases designated FCC_L12. This is indicative of a miscibility gap. The ordered gamma-prime phase steadily decreases in amount with increasing temperature until it completely disappears around 750 K, leaving only the disordered gamma phase.", "from pycalphad.plot.utils import phase_legend\nphase_handles, phasemap = phase_legend(phases)\n\nplt.gca().set_title('Al-Ni: Phase fractions vs T [X(AL)=0.1]')\nplt.gca().set_xlabel('Temperature (K)')\nplt.gca().set_ylabel('Phase Fraction')\nplt.gca().set_ylim((0,1.1))\nplt.gca().set_xlim((300, 2000))\n\nfor name in phases:\n phase_indices = np.nonzero(eq_alni.Phase.values == name)\n plt.scatter(np.take(eq_alni['T'].values, phase_indices[2]), eq_alni.NP.values[phase_indices], color=phasemap[name])\nplt.gca().legend(phase_handles, phases, loc='lower right')", "In the plot below we see that the degree of ordering does not change at all in each phase. There is a very abrupt disappearance of the completely ordered gamma-prime phase, leaving the completely disordered gamma phase. This is a first-order phase transition.", "plt.gca().set_title('Al-Ni: Degree of fcc ordering vs T [X(AL)=0.1]')\nplt.gca().set_xlabel('Temperature (K)')\nplt.gca().set_ylabel('Degree of ordering')\nplt.gca().set_ylim((-0.1,1.1))\n# Generate a list of all indices where FCC_L12 is stable and ordered\nL12_phase_indices = np.nonzero(np.logical_and((eq_alni.Phase.values == 'FCC_L12'),\n (eq_alni.degree_of_ordering.values > 0.01)))\n# Generate a list of all indices where FCC_L12 is stable and disordered\nfcc_phase_indices = np.nonzero(np.logical_and((eq_alni.Phase.values == 'FCC_L12'),\n (eq_alni.degree_of_ordering.values <= 0.01)))\n# phase_indices[2] refers to all temperature indices\n# We know this because pycalphad always returns indices in order like P, T, X's\nplt.plot(np.take(eq_alni['T'].values, L12_phase_indices[2]), eq_alni['degree_of_ordering'].values[L12_phase_indices],\n label='$\\gamma\\prime$ (ordered fcc)', color='red')\nplt.plot(np.take(eq_alni['T'].values, fcc_phase_indices[2]), eq_alni['degree_of_ordering'].values[fcc_phase_indices],\n label='$\\gamma$ (disordered fcc)', color='blue')\nplt.legend()\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
GoogleCloudPlatform/training-data-analyst
quests/scientific/coastline.ipynb
apache-2.0
[ "<title>Coastline image classification using ML Toolbox and Transfer Learning</title>\n<h1>Coastline image classification using ML Toolbox and Transfer Learning</h1>\n\nWe have a bunch of aerial images of coastlines that were photographed from drones. They were then tagged into several categories based on their potential for flooding. We'd like to be able to train a machine learning model to identify the appropriate category that new drone images of a shoreline are depicting.\n<p>\nThis dataset is courtesy of Texas A&M University (See https://storage.googleapis.com/tamucc_coastline/GooglePermissionForImages_20170119.pdf for details).\n<p>\n<h2> Dataset organization </h2>\n<p>\nOrganize the dataset into two parts:\n<ol>\n<li> Upload the images to Google Cloud Storage.\n<li> Create a BigQuery table that has two columns: image_url and label. The image_url is the GCS location and the label is the true classification type.\n</ol>", "%bash\npip install pip==9.0.3\nbq mk demos\nbq rm -f demos.coastline_images\nbq load \\\n --source_format=CSV --skip_leading_rows=1 \\\n demos.coastline_images \\\n gs://tamucc_coastline/labeled_images.csv \\\n image_url:string,short_label:string,label:string", "This is what the labeled dataset looks like (To train on a different set of images, start with a BigQuery table that has these two columns).", "import google.datalab.bigquery as bq\n\nsql = \"SELECT image_url, label FROM demos.coastline_images\"\ndf = bq.Query(sql).execute().result().to_dataframe()\nprint 'Have a total of {} labeled images'.format(len(df))\ndf.head()\n\nimport matplotlib.pyplot as plt\nimport google.datalab.bigquery as bq\ndf = bq.Query(\"SELECT image_url, label FROM demos.coastline_images\").execute().result().to_dataframe()\ndf['label'].value_counts().plot(kind='bar', title='Full dataset');", "<h2> Create a smaller subset </h2>\n<p>\nNormally, of course, you would train on all your data. However, for learning purposes, we don't want you to wait hours at each stage. Therefore, let's intentionally use only a small subset of the entire dataset. When adapting this sample to work on your own data, skip over this section or comment out the subsample clause.", "%bash\nQUERY='SELECT image_url, label FROM '\nTABLE=\"${PROJECT_ID}.demos.coastline_images\"\nSUBSAMPLE='WHERE (short_label = \"2A\" OR short_label = \"3A\" OR short_label = \"9\") AND MOD(ABS(FARM_FINGERPRINT(image_url)), 10) = 0'\nbq rm -f demos.coastline_train\nbq query --use_legacy_sql=false --destination_table=demos.coastline_train \"$QUERY \\`$TABLE\\` $SUBSAMPLE\"\n\nimport matplotlib.pyplot as plt\nimport google.datalab.bigquery as bq\ntrain_df = bq.Query(\"SELECT image_url, label FROM demos.coastline_train\").execute().result().to_dataframe()\ntrain_df['label'].value_counts().plot(kind='bar', title='Training dataset');\n\nlen(train_df)", "<h2>Create bucket</h2>\n<p>\nCreate a new bucket to hold your model etc. At the end of this lab, you can delete that bucket.", "bucket = 'gs://' + datalab_project_id() + '-txf'\npreprocessed_dir = bucket + '/preprocessed'\nmodel_dir = bucket + '/model'\n\nimport subprocess\nprint 'Creating bucket named {}'.format(bucket)\nsubprocess.check_call('gsutil mb {}'.format(bucket).split());", "<h2> Preprocessing </h2>\n<p>\nNow that we have the images in Cloud Storage and the labels in BigQuery, we can use Datalab's ML Toolbox to do transfer learning. This consists of three steps: (1) Preprocessing (2) Training, and (3) Deploying.\n\nPreprocessing the images will resize the images, and do some image augmentation. This would take about 3 hours for the 8000 coastline images. Even on 200 images, this will take about 30 minutes", "import mltoolbox.image.classification as model\nfrom google.datalab.ml import *\nimport os\n\ntrain_set = BigQueryDataSet(\"SELECT image_url, label FROM demos.coastline_train\")\njob = model.preprocess_async(train_set, preprocessed_dir, cloud={'max_num_workers': 4})", "<b> Wait! </b>\nYou have two options to proceed:\n<ol>\n<li> Monitor the above preprocessing job, and proceed with this notebook only after prepocessing is complete. The preprocessing job took about 30 minutes for me. You can explicitly wait for the job completion by running the Option 1 cell. Make sure to uncomment the last line in the cell.\n<li> Stop the job from the Dataflow console and then copy the results of my run into your GCS bucket by running the Option 2 cell. Make sure to uncomment the last 2 lines in the cell.\n</ol>", "# option 1 above. Wait for your preprocessing job to complete.\n# If you choose this option, uncomment the next line and run this cell.\n# job.wait()\n\n%bash\n# option 2 above. Copy my preprocessing results and move on.\n# If you choose this option, uncomment the last two lines and run this cell.\nFROM=gs://cloud-training-demos/coastline/preprocessed\nTO=\"gs://${PROJECT_ID}-txf\"\n#echo \"Copying already preprocessed files from $FROM to $TO\"\n#gsutil -m cp -r $FROM $TO", "<h2> Training </h2>\n<p>\nTrain the last few layers of the inception model based on our images.", "import mltoolbox.image.classification as model\nfrom google.datalab.ml import *\njob = model.train_async(preprocessed_dir, 32, 5000, model_dir, # batchsize, numsteps\n cloud=CloudTrainingConfig('us-central1', 'BASIC'))", "Wait for the above job to complete before proceeding to next step (monitor job progress on the GCP console under ML Engine | Jobs). It should take about 15 minutes to complete.\nThe training log may contain harmless warnings and errors. Please check the message Task completed successfully at the end of the log to confirm the successful completion.\n<p>\n<h2> Evaluation </h2>", "from google.datalab.ml import *\nprint model_dir\ntb_id = TensorBoard.start(model_dir)\n\nTensorBoard.stop(tb_id)", "When I did it, I got 85% accuracy on the held-out (evaluation dataset). This is not bad considering that we started with just about 200 images!\n<p>\n<h2> Deploy and predict model </h2>\n<p>\nDeploy the trained model as a web service. This simply a short cut to the underlying gcloud commands.", "Models().create('coastline')\nModelVersions('coastline').deploy('v1', model_dir)", "Once the model is deployed, we can send along an image and we will get back the classification probabilities. (these images are not among the 200 or so images used in training or in evaluation)", "images = [\n 'gs://tamucc_coastline/esi_images/IMG_2007_SecDE_Spr12.jpg', # sheltered tidal flats\n 'gs://tamucc_coastline/esi_images/IMG_0297_SecBC_Spr12.jpg', # scarps and steep slopes in clay\n 'gs://tamucc_coastline/esi_images/IMG_3264_SecDE_Spr12.jpg', # sand-grained beaches\n]\n# resize=True because otherwise the images are too large to send for online prediction.\nmodel.predict('coastline.v1', images, resize=True, cloud=True)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
martadesimone/Protoplanetarydisks
UpperSco_Table.ipynb
gpl-2.0
[ "import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom astropy.table import Table\nfrom astropy import units as u\nfrom astropy.modeling.blackbody import blackbody_nu", "Function to calculate the Disk Mass from the Fmm, and the corrispective Error", "def Mdisk(F,d,r,k,Td,lam):\n #Eq 2 Andrews et al. 2013 \n #F is *mm* Flux in *Jy*\n #d is the distance in *pc*\n #r is the dust to mass ratio *adimensional*\n #k is the dust opacity in *cm^2/g*\n #lam is the wavelenght in *mm*\n c=2.99792458e10 # Light velocity *cm/s*\n h=6.6260755e-27 # Planck constant in erg*s (cgs)\n kb=1.3806488e-16 # Boltzmann constant in erg/K (cgs)\n \n ni=c/(lam*0.1) #in Hz\n B_ni=1e23*2*h*ni**3/c**2*(1./(np.exp(h*ni/kb/Td)-1)) # Planck function in *Jy* --> 1Jy=1e-23erg/s/cm^2/Hz\n# B_ni=1e23*blackbody_nu(ni, Td) # Planck function in *Jy* --> 1Jy=1e-23erg/s/cm^2/Hz\n \n# return np.log10(F)+2*np.log10(d*3.08568e18)-np.log10(r*k*1.988e33)-np.log10(B_ni) #log(Disk mass/*Msol*) \n return F*(d*3.086e18)**2/k/1.988e33/r/B_ni #Disk mass in *Msol*\n \ndef eMd(errF,F,Md):\n return Md*errF/F\n\nUppSco=Table.read('UpperScorpius_Tab.fit')\n#UppSco.show_in_browser()\n\nname=UppSco['_2MASS']\nlogMs=UppSco['logM']\nup_logMs=UppSco['e_logm_lc'] #upper boundary in logM*\ndown_logMs=UppSco['e_logM'] #Lower boundary in logM*\nFmm_tab=UppSco['Snu'] #Integrated flux density at 887um\ne_Fmm=UppSco['e_Snu']\necal_Fmm=np.sqrt((e_Fmm)**2+(0.1*Fmm_tab)**2) #Error on Fmm including the calibration error(10%) in quadratic sum\n\n\nnotdelta= Fmm_tab < 3*e_Fmm #upperlimits\ndelta=np.logical_not(notdelta) #observed sources\n\nupp=UppSco['l_Mdust']\nFmm=Fmm_tab.copy()\nFmm[notdelta]=3.*e_Fmm[notdelta] # upper limit is about 3 sigma", "Calculate the Disk Mass with the function above", "d=145. # distance of Chamaleon in pc\ne_d=20. # error on distance d in pc\nr=1. #dust to gass mass ratio\nk=2.3*(338./225.4)**(0.4) #cm^2/g at 887um\nlam=0.88 #wavelenght in mm\n\nTd=20. #Temperature of the disk in K\n\nMd=Mdisk(Fmm*1e-3,d,r,k,Td,lam)/3e-6 #Fmm has to be in *Jy*; Md in Earth masses!\ne_Md=eMd(ecal_Fmm,Fmm,Md) #error on Md which include the error on flux\nadd_Md=e_Md+2.*e_d*Md/d #error on Md which include the error on flux and on the distance.\n\n#Md[notdelta]=3.*e_Md[notdelta]\n\nlogMd=np.log10(Md)\nprint len(Md)\nprim=UppSco['Type']!='Debris/Ev. Trans.' # only primordial disks not debris/evolved\n\n\nT=Table([name,logMs,up_logMs,down_logMs,upp, Md,e_Md,add_Md, Fmm, e_Fmm,ecal_Fmm], \n names=('Name','logM*','up_logM*','down_logM*','l_Md','Md_20','e_Md_20','add_Md_20', 'Fmm','e_Fmm','e+cal_Fmm')) \nT2=Table([name[prim],logMs[prim],up_logMs[prim],down_logMs[prim],upp[prim], Md[prim],e_Md[prim],add_Md[prim],\n Fmm[prim], e_Fmm[prim],ecal_Fmm[prim]], \n names=('Name','logM*','up_logM*','down_logM*','l_Md','Md_20','e_Md_20','add_Md_20', 'Fmm','e_Fmm','e+cal_Fmm')) \n\nT2.write('UppSco_Tab.fit', format='fits', overwrite='True')" ]
[ "code", "markdown", "code", "markdown", "code" ]
mne-tools/mne-tools.github.io
0.17/_downloads/9460321824116e4964fbe6d88d27462e/plot_cluster_stats_evoked.ipynb
bsd-3-clause
[ "%matplotlib inline", "Permutation F-test on sensor data with 1D cluster level\nOne tests if the evoked response is significantly different\nbetween conditions. Multiple comparison problem is addressed\nwith cluster level permutation test.", "# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>\n#\n# License: BSD (3-clause)\n\nimport matplotlib.pyplot as plt\n\nimport mne\nfrom mne import io\nfrom mne.stats import permutation_cluster_test\nfrom mne.datasets import sample\n\nprint(__doc__)", "Set parameters", "data_path = sample.data_path()\nraw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nevent_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'\nevent_id = 1\ntmin = -0.2\ntmax = 0.5\n\n# Setup for reading the raw data\nraw = io.read_raw_fif(raw_fname)\nevents = mne.read_events(event_fname)\n\nchannel = 'MEG 1332' # include only this channel in analysis\ninclude = [channel]", "Read epochs for the channel of interest", "picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,\n exclude='bads')\nevent_id = 1\nreject = dict(grad=4000e-13, eog=150e-6)\nepochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=(None, 0), reject=reject)\ncondition1 = epochs1.get_data() # as 3D matrix\n\nevent_id = 2\nepochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=(None, 0), reject=reject)\ncondition2 = epochs2.get_data() # as 3D matrix\n\ncondition1 = condition1[:, 0, :] # take only one channel to get a 2D array\ncondition2 = condition2[:, 0, :] # take only one channel to get a 2D array", "Compute statistic", "threshold = 6.0\nT_obs, clusters, cluster_p_values, H0 = \\\n permutation_cluster_test([condition1, condition2], n_permutations=1000,\n threshold=threshold, tail=1, n_jobs=1)", "Plot", "times = epochs1.times\nplt.close('all')\nplt.subplot(211)\nplt.title('Channel : ' + channel)\nplt.plot(times, condition1.mean(axis=0) - condition2.mean(axis=0),\n label=\"ERF Contrast (Event 1 - Event 2)\")\nplt.ylabel(\"MEG (T / m)\")\nplt.legend()\nplt.subplot(212)\nfor i_c, c in enumerate(clusters):\n c = c[0]\n if cluster_p_values[i_c] <= 0.05:\n h = plt.axvspan(times[c.start], times[c.stop - 1],\n color='r', alpha=0.3)\n else:\n plt.axvspan(times[c.start], times[c.stop - 1], color=(0.3, 0.3, 0.3),\n alpha=0.3)\nhf = plt.plot(times, T_obs, 'g')\nplt.legend((h, ), ('cluster p-value < 0.05', ))\nplt.xlabel(\"time (ms)\")\nplt.ylabel(\"f-values\")\nplt.show()" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
kikocorreoso/tutormagic
examples/Examples.ipynb
mit
[ "Examples of the options\nThis is a notebook example to show the abilities of tutormagic. You can install the extension using pip or conda.\npython\npip install tutormagic\nor\npython\nconda install tutormagic", "%load_ext tutormagic", "Option language: -l or --lang\nChoose the language you want to use for pythontutor.com. Available options are:\n\npython2\npython3\npy3anaconda (experimental and unsupported)\njava\njavascript\ntypescript\nruby\nc\nc++\n\nIf this option is not used the default value is python3.\nExample:", "%%tutor -l python3\na = 1\nb = 2\nprint(a + b)", "Option height: -h or --height\nChange the height of the output area display in pixels.\nExample:", "%%tutor -l python3 -h 100\na = 1\nb = 2\nprint(a + b)", "Option tab: -t or --tab\nOpen pythontutor in a new tab. height option will be ignored if tab option is used.\nExample:", "%%tutor -l python3 -t\na = 1\nb = 2\nprint(a + b)", "Option secure: -s or --secure\nOpen pythontutor using https in a new tab. height and tab options will be ignored if secure option is used.\nExample:", "%%tutor -l python3 -s\na = 1\nb = 2\nprint(a + b)", "Option link: -k or --link\nJust display a link to pythontutor with your defined code.\nExample:", "%%tutor -k\na = 1\nb = 1\nprint(a + b)", "Option run: -r or --run\nUse this option if you also want to run the code in the cell in the notebook.\nExample:", "%%tutor -r\na = 1\nb = 1\nprint(\"This is run in the notebook as well as in PythonTutor: \", a + b)\nprint(\"So, you have access to 'a' and 'b' vars...\")\n\nprint(\"the value of a from the previous cell is: \", a)", "Option cumulative: --cumulative\nPythonTutor config: Set the cumulative option to True.\nExample:", "%%tutor --cumulative\ndef func():\n return 10\na = func()\nprint(a)", "Option heapPrimitives: --heapPrimitives\nRender objects on the heap.\nExample:", "%%tutor --heapPrimitives\na = 1\nb = 1\nprint(a + b)", "Option textReferences: --textReferences\nUse text labels for references.\nExample:", "%%tutor --textReferences --heapPrimitives\na = 1\nb = 1\nprint(a + b)", "Option curInstr: --curInstr\nPythonTutor config: Start at the defined step.\nExample:", "%%tutor --curInstr 2\na = 1\nb = 1\nprint(a + b)", "Option verticalStack: --verticalStack\nPythonTutor config: Set visualization to stack atop one another.\nExample:", "%%tutor --verticalStack\na = 1\nb = 1\nprint(a + b)", "Option lang py3anaconda: --lang py3anaconda\nThis option allows you to import more modules like numpy. It is experimental and unsupported (check pythontutor web page for more information).\nExample:", "%%tutor --lang py3anaconda\nimport numpy as np\n\narr = np.arange(10)\n\nprint(arr * 10)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
oditorium/blog
iPython/Error-Estimation-for-Survey-Data.ipynb
agpl-3.0
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom scipy.stats import norm", "Error Estimation for Survey Data\nthe issue we have is the following: we are drawing indendent random numbers from a binary distribution of probability $p$ (think: the probability of a certain person liking the color blue) and we have two groups (think: male and female). Those two groups dont necessarily have the same size.\nThe question we ask is what difference we can expect in the spread of the ex-post estimation of $p$\nWe first define our population parameters", "N_people = 500\nratio_female = 0.30\nproba = 0.40", "Closed Form Approximation\nof course we could have done this analytically using Normal approximation: we have two independent Normal random variables, both with expectation $p$. The variance of the $male$ variable is $p(1-p)/N_{male}$ and the one of the female one accordingly. The overall variance of the difference (or sum, it does not matter here because they are uncorrelated) is\n$$\nvar = p(1-p)\\times \\left(\\frac{1}{N_{male}} + \\frac{1}{N_{female}}\\right)\n$$\nUsing the female/male ratio $r$ instead we can write for the standard deviation\n$$\nsd = \\sqrt{var} = \\sqrt{\\frac{1}{N}\\frac{p(1-p)}{r(1-r)}}\n$$\nmeaning that we expect the difference in estimators for male and female of the order of $sd$", "def the_sd(N, p, r):\n N = float(N)\n p = float(p)\n r = float(r)\n \n return sqrt(1.0/N*(p*(1.0-p))/(r*(1.0-r)))\n\ndef sd_func_factory(N,r):\n \n def func(p):\n return the_sd(N,p,r)\n \n return func\n\nf = sd_func_factory(N_people, ratio_female)\nf2 = sd_func_factory(N_people/2, ratio_female)", "Thats the one-standard deviation range about the estimator. For example: if the underlying probability is $0.25=25\\%$ then the difference between the estimators for the male and the female group is $4.2\\%$ for the full group (sd), or $5.9\\%$ for if only half of the people replied (sd2)", "p = linspace(0,0.25,5)\nf = sd_func_factory(N_people, ratio_female)\nf2 = sd_func_factory(N_people/2, ratio_female)\nsd = list(map(f, p))\nsd2 = list(map(f2, p))\npd.DataFrame(data= {'p':p, 'sd':sd, 'sd2':sd2})", "that's the same relationship as a plot", "p = linspace(0,0.25,50)\nsd = list(map(f, p))\nsd2 = list(map(f2, p))\nplot (p,p, 'k')\nplot (p,p-sd, 'g--')\nplot (p,p+sd, 'g--')\nplot (p,p-sd2, 'r--')\nplot (p,p+sd2, 'r--')\ngrid(b=True, which='major', color='k', linestyle='--')", "For reference, the 2-sided tail probabilites as a function of $z$ (the way to read it is as follows: the probability of a Normal distribution being 2 standard deviations away from its mean to either side is about 0.05, or 5%). Saying it the other way round, a two-standard-deviation difference corresponds to about 95% confidence", "z=linspace(1.,3,100)\nplot(z,1. - (norm.cdf(z)-norm.cdf(-z)))\ngrid(b=True, which='major', color='k', linestyle='--')\nplt.title(\"Probability of being beyond Z (2-sided) vs Z\")", "Using Monte Carlo\nwe also need some additional parameters for our Monte Carlo", "number_of_tries = 1000", "We do some intermediate calculations...", "N_female = int (N_people * ratio_female)\nN_male = N_people - N_female", "...and then generate our random numbers...", "data_male = np.random.binomial(n=1, p=proba, size=(number_of_tries, N_male))\ndata_female = np.random.binomial(n=1, p=proba, size=(number_of_tries, N_female))", "...that we then reduce in one dimension (ie, over that people in the sample) to obtain our estimator for the probas for males and females as well as the difference. On the differences finally we look at the mean (should be zero-ish) and the standard deviation (should be consistent with the numbers above)", "proba_male = map(numpy.mean, data_male)\nproba_female = map(numpy.mean, data_female)\nproba_diff = list((pm-pf) for pm,pf in zip(proba_male, proba_female))\nnp.mean(proba_diff), np.std(proba_diff)" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
geoscixyz/computation
docs/case-studies/DCIP/Kevitsa_DC.ipynb
mit
[ "Kevitsa DC Forward Similation", "import cPickle as pickle\nfrom SimPEG import EM, Mesh, Utils, Maps\nfrom SimPEG.Survey import Data\n%pylab inline\nimport numpy as np\nfrom pymatsolver import PardisoSolver\nfrom matplotlib.colors import LogNorm\nfrom ipywidgets import interact, IntSlider", "Setup\nWe have stored the data and simulation mesh so that they can just be downloaded and used here", "url = \"https://storage.googleapis.com/simpeg/kevitsa_synthetic/\"\nfiles = ['dcipdata_12150N.txt', 'dc_mesh.txt', 'dc_sigma.txt', 'dc_topo.txt']\nkeys = ['data', 'mesh', 'sigma', 'topo']\ndownloads = Utils.download([url + f for f in files], folder='./KevitsaDC', overwrite=True)\ndownloads = dict(zip(keys, downloads))\n\nmesh = Mesh.TensorMesh.readUBC(downloads[\"mesh\"])\nsigma = mesh.readModelUBC(downloads[\"sigma\"])\ntopo = np.loadtxt(downloads[\"topo\"])\ndcipdata = np.loadtxt(downloads[\"data\"])\nactind = ~np.isnan(sigma)\nmesh.plotGrid()", "Model\nThis model is a synthetic based on geologic surfaces interpreted from seismic data over the Kevitsa deposit in Finland. Synthetic 3D conductivity model is generated, and below figure shows conductivity section acrosses the mineralzined zone of interest. Nearsurface conductor on the lefthand side corresponds to sedimentary unit, and embedded conductor on the righthand side indicates conductive mineralized zone.", "figsize(8, 4)\nindy = 6\ntemp = 1./sigma.copy()\ntemp[~actind] = np.nan\nout = mesh.plotSlice(temp, normal=\"Y\", ind=indy, pcolorOpts={\"norm\": LogNorm(), \"cmap\":\"jet_r\"}, clim=(1e0, 1e3))\nplt.ylim(-800, 250)\nplt.xlim(5000, 11000)\nplt.gca().set_aspect(2.)\nplt.title((\"y= %d m\")%(mesh.vectorCCy[indy]))\ncb = plt.colorbar(out[0], orientation=\"horizontal\")\ncb.set_label(\"Resistivity (Ohm-m)\")", "Survey\nDirect current (DC) resisistivity and IP survey have been perforemd by using Titan24 system; pole-dpole array was used. We use a same survey set up at 12150N line, having 61 current sources (poles). Largest offset between current pole and potential eletrodes are around 2 km. We read in field data using below script, and form a DC survey object that we can pass to our DC problem.", "def getGeometricFactor(locA, locB, locsM, locsN, eps = 0.01): \n \"\"\"\n Geometric factor for a pole-dipole survey\n \"\"\"\n MA = np.abs(locA[0] - locsM[:, 0]) \n MB = np.abs(locB[0] - locsM[:, 0]) \n NA = np.abs(locA[0] - locsN[:, 0]) \n NB = np.abs(locB[0] - locsN[:, 0]) \n geometric = 1./(2*np.pi) * (1/MA - 1/NA)\n return geometric\n\n\nA = dcipdata[:,:2]\nB = dcipdata[:,2:4]\nM = dcipdata[:,4:6]\nN = dcipdata[:,6:8]\n\nElec_locs = np.vstack((A, B, M, N))\nuniqElec = Utils.uniqueRows(Elec_locs)\nnElec = len(uniqElec[1])\npts = np.c_[uniqElec[0][:,0], uniqElec[0][:,1]]\nelec_topo = EM.Static.Utils.drapeTopotoLoc(mesh, pts[:,:2], actind=actind)\nElec_locsz = np.ones(Elec_locs.shape[0]) * np.nan\n\nfor iElec in range (nElec):\n inds = np.argwhere(uniqElec[2] == iElec)\n Elec_locsz[inds] = elec_topo[iElec,2] \n \nElec_locs = np.c_[Elec_locs, Elec_locsz]\nnloc = int(Elec_locs.shape[0]/4)\nA = Elec_locs[:nloc]\nB = Elec_locs[nloc:2*nloc]\nM = Elec_locs[2*nloc:3*nloc]\nN = Elec_locs[3*nloc:4*nloc]\n\nuniq = Utils.uniqueRows(np.c_[A, B])\nnSrc = len(uniq[1])\nmid_AB = A[:,0]\nmid_MN = (M[:,0] + N[:,0]) * 0.5\nmid_z = -abs(mid_AB - mid_MN) * 0.4\nmid_x = abs(mid_AB + mid_MN) * 0.5\n\nsrcLists = []\nappres = []\ngeometric = []\nvoltage = []\ninds_data = []\n\nfor iSrc in range (nSrc):\n inds = uniq[2] == iSrc\n # TODO: y-location should be assigned ...\n locsM = M[inds,:]\n locsN = N[inds,:] \n inds_data.append(np.arange(len(inds))[inds])\n rx = EM.Static.DC.Rx.Dipole(locsM, locsN) \n locA = uniq[0][iSrc,:3]\n locB = uniq[0][iSrc,3:] \n src = EM.Static.DC.Src.Pole([rx], locA) \n# src = EM.Static.DC.Src.Dipole([rx], locA, locB) \n geometric.append(getGeometricFactor(locA, locB, locsM, locsN)) \n appres.append(dcipdata[:,8][inds])\n voltage.append(dcipdata[:,9][inds])\n srcLists.append(src)\ninds_data = np.hstack(inds_data)\ngeometric = np.hstack(geometric)\ndobs_appres = np.hstack(appres)\ndobs_voltage = np.hstack(voltage) * 1e-3\nDCsurvey = EM.Static.DC.Survey(srcLists)\nDCsurvey.dobs = dobs_voltage", "Problem\nThis is a physics behind DC resistivity survey. Here we solve Poisson's equation and compute potential in our discretized domain. Survey information is required to run simulation.", "m0 = np.ones(actind.sum())*np.log(1e-3)\nactMap = Maps.InjectActiveCells(mesh, actind, np.log(1e-8))\nmapping = Maps.ExpMap(mesh) * actMap\nproblem = EM.Static.DC.Problem3D_N(mesh, sigmaMap=mapping)\nproblem.Solver = PardisoSolver\nif DCsurvey.ispaired:\n DCsurvey.unpair()\nproblem.pair(DCsurvey)", "Forward Simulation\nThings are set. Now we can run simulaton by passing conductivity model to the DC problem.", "f = problem.fields(np.log(sigma)[actind])\ndpred = DCsurvey.dpred(np.log(sigma)[actind], f=f)\nappres = dpred / geometric\ndcdata = Data(DCsurvey, v=dpred)\nappresdata = Data(DCsurvey, v=appres)", "Plot the Data\nWe are going to plot simulated data for each current pole. By moving slider bar below, you can explore the data at different current pole location. We provide both voltage and apparent resistivity.", "def vizdata(isrc):\n fig = plt.figure(figsize = (7, 2))\n src = srcLists[isrc]\n rx = src.rxList[0] \n data_temp = dcdata[src, rx]\n appres_temp = appresdata[src, rx]\n midx = (rx.locs[0][:,0] + rx.locs[1][:,0]) * 0.5\n midz = (rx.locs[0][:,2] + rx.locs[1][:,2]) * 0.5\n ax = plt.subplot(111)\n ax_1 = ax.twinx()\n ax.plot(midx, data_temp, 'k.-')\n ax_1.plot(midx, appres_temp, 'r.-')\n ax.set_xlim(5000, 11000)\n ax.set_ylabel(\"Voltage\")\n ax_1.set_ylabel(\"$\\\\rho_a$ (Ohm-m)\")\n ax.grid(True)\n plt.show()\ninteract(vizdata, isrc=(0, DCsurvey.nSrc-1, 1))", "Plot the currents\nDid you understand simluated data? why they are changing? Here we show how currents flow in the earth medium. Similarly, you can move slider bar to see how current changes depending upon current source location.", "fig = plt.figure(figsize = (7, 1.5))\ndef vizJ(isrc):\n indy = 6\n src = srcLists[isrc]\n rx = src.rxList[0]\n out = mesh.plotSlice(f[src, 'j'], vType=\"E\", normal=\"Y\", view=\"vec\", ind=indy, streamOpts={\"color\":\"k\"}, pcolorOpts={\"norm\": LogNorm(), \"cmap\":\"viridis\"}, clim=(1e-10, 1e-4))\n plt.plot(src.loc[0], src.loc[1], 'ro')\n plt.ylim(-800, 250)\n plt.xlim(5000, 11000)\n plt.gca().set_aspect(2.)\n # plt.title((\"y= %d m\")%(mesh.vectorCCy[indy]))\n plt.title(\"\")\n cb = plt.colorbar(out[0], orientation=\"horizontal\")\n cb.set_label(\"Current density (A/m$^2$)\")\n midx = (rx.locs[0][:,0] + rx.locs[1][:,0]) * 0.5\n midz = (rx.locs[0][:,2] + rx.locs[1][:,2]) * 0.5\n plt.plot(midx, midz, 'g.', ms=4)\n plt.gca().get_xlim()\n plt.show()\ninteract(vizJ, isrc=(0, DCsurvey.nSrc-1, 1))", "Plot Pseudo section\nWe are going to plot simulated data for each current pole. By moving slider bar below, you can explore the data at different current pole location.", "vmin, vmax = 1, 1e4\nappres = dpred/geometric\ntemp = appres.copy()\nUtils.plot2Ddata(np.c_[mid_x[inds_data], mid_z[inds_data]], temp, ncontour=100, dataloc=True, scale=\"log\", contourOpts={\"vmin\":np.log10(vmin), \"vmax\":np.log10(vmax)})\ncb = plt.colorbar(out[0], orientation=\"horizontal\", format=\"1e%.0f\", ticks=np.linspace(np.log10(vmin), np.log10(vmax), 3))\ncb.set_label(\"Resistivity (Ohm-m)\")\n# plt.title(\"Line 12150N\")", "Plot Field data (pseudo-section)\nLet's see how the field data looks like on this line (12150N). Are they similar with our simulated data?", "vmin, vmax = 1, 1e4\ntemp = dcipdata[:,8].copy()\ntemp[dcipdata[:,8]<vmin] = vmin\ntemp[dcipdata[:,8]>vmax] = vmax\nout = Utils.plot2Ddata(np.c_[mid_x[inds_data], mid_z[inds_data]], temp[inds_data], ncontour=100, dataloc=True, scale=\"log\")\ncb = plt.colorbar(out[0], orientation=\"horizontal\", format=\"1e%.0f\", ticks=np.linspace(np.log10(vmin), np.log10(vmax), 3))\ncb.set_label(\"Resistivity (Ohm-m)\")\n# plt.title(\"Line 12150N\")" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Jhanelle/Jhanelle_New_Version_of_final_project
bin/Compiled_Codes_for_Final_Project.ipynb
mit
[ "Loading Data\nStatistis for my data", "# Identitfy version of software used\npd.__version__\n\n#Identify version of software used\nnp.__version__\n\n# import libraries\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n#stats library\n\nimport statsmodels.api as sm\nimport scipy\n\n#T-test is imported to complete the statistical analysis\n\nfrom scipy.stats import ttest_ind\n\nfrom scipy import stats\n\n#The function below is used to show the plots within the notebook\n\n%matplotlib inline", "Loading Data using Pandas", "data=pd.read_csv('../data/Testdata.dat', delimiter=' ')\n\n# Print the first 8 rows of the dataset \ndata.head(8)\n\n#Print the last 8 rows of the dataset \ndata.tail(8)\n\n# Commands used to check the title names in each column as some of the titles were omitted\ndata.dtypes.head()\n", "Hypothesis and Questions\n..........\nThings I need to do:\nFormat the date properly. I need help do use regular expression to fix the format of my date", "# Here we extract only two columns from the data as these are the main variables for the statistcal analysis\n\nstrain_df=data[['strain','speed']]\nstrain_df.head()\n\n# Eliminate NaN from the dataset \n\nstrain_df=strain_df.dropna()\nstrain_df.head()\n\n#Resample the data to group by strain\n\nstrain_resampled=strain_df.groupby('strain')\nstrain_resampled.head()\n\n#Created a histogram to check the normal distribution the data\n\nstrain_resampled.hist(column='speed', bins=50)\n\n# I need help adding titles to these histograms", "Interpretation of Histograms\nBased on the histograms of the respective strain it is clear that the data does not follow a normal distribution.\nTherefore t-tests and linear regression cannot be applied to this data set as planned.", "# I know I should applu Apply Kruskal. wallis statistics to data, however I am not sure how to deal with the array\n\nscipy.stats.mstats.kruskalwallis( , )\n\ndef test_mean1():\n '''This function created to give the mean values for the different strains in the dataset. \n The input of the function is the raw speed of the strains \n while the output is the mean of the strains tested'''\n mean=strain_resampled.mean()\n assert mean > -1, 'The mean should be greater than 0.00'\n return(mean)\n #assert mean == speed > 0.00, 'The mean should be greater than 0'\n #assert mean == speed < 0.00, 'The mean should not be less than 0'\n \n\ndef test_mean1():\n '''This function created to give the mean values for the different strains in the dataset. \n The input of the function is the raw speed of the strains \n while the output is the mean of the strains tested'''\n mean=MX1027_mean.mean()\n assert mean < -1, 'The mean should be greater than 0.00'\n return(mean)\n #assert mean == speed > 0.00, 'The mean should be greater than 0'\n #assert mean == speed < 0.00, 'The mean should not be less than 0'\n\nMX1027=strain_df.groupby('strain').get_group('MX1027')\n\nMX1027.head()\n\nMX1027.mean()\n\nN2=strain_df.groupby('strain').get_group('N2')\n\nN2.head()\n\nN2.mean()\n\ndef test_mean():\n\n n=('N2')\n for n in N2:\n assert n == -1, 'The mean is greater than 0'\n assert n > 0, 'Yes, the mean is greater than 0'\n \n mean = N2.mean()\n \n return(mean)\n\ndef test_mean2():\n \n n= ('MX1027')\n \n for n in MX1027:\n assert n >= 0.1, \"The mean is greater than 0.1\"\n \n mean_2 = MX1027.mean()\n \n return (mean_2)\n print('mean is:', mean_2)\n\ntest_mean()\n\ntest_mean2()\n\nN2.mean()\n\nprint(MX1027_mean.mean())\n\nmean()\n\nMX1027_mean=['0.127953']\n\nN2_mean= ['0.084662']\n\nnew_data= strain_df.iloc[3,1]\n\nnew_data1= strain_df.iloc[4,1]\n\nnew_data.mean()\n\nnew_data\n\nnew_data.mean()\n\n#def mean():\n #mean=strain_resampled.mean()\n #return(mean)\n\n#mean()\n\n# more generic fucntion to find the mean\n\ndef hope_mean(strain_df, speed):\n n= len(strain_df)\n if n == 0.127953:\n return 0.127953\n hope_mean =(sum(strain_df.speed))/n\n print(hope_mean)\n return hope_mean\n\n#Create test functions of the mean of N2\n\ndef test_mean1():\n \"\"\"The input of the function is the mean of N2\nwhere the output is the expected mean\"\"\"\n #obs = N2.mean()\n #exp = 0.084662\n #assert obs == exp, ' The mean of N2 should be 0.084662'\n \n#Create test function for the mean of MX1027\n\n#def test_mean2():\n #\"\"\"The input of the function is the mean of MX1027\n #where the output is the expected mean\"\"\"\n #obs = MX1027.mean()\n #exp= 0.127953\n #assert obs == exp, ' The mean of MX1027 should be 0.127953'" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
roatienza/Deep-Learning-Experiments
versions/2022/tools/python/input_demo.ipynb
mit
[ "Load and visualize input data using torchvision and torchaudio\nLet us load the python modules first.", "import torchaudio\nimport torchvision\nimport torch\nimport matplotlib.pyplot as plt\nfrom IPython.display import Audio", "torchvision for loading input images\nThis is done through the torchvision.io.read_image function which returns a torch.Tensor of the image.", "img = torchvision.io.read_image(\"data/birdie2.jpg\")\nimg = torchvision.transforms.ToPILImage()(img)\ndisplay(img)", "Visualize the transforms done before model prediction\nIn an earlier example, we used the torchvision.transforms module to apply a series of transformations to the input image before passing it to the model. Let us visualize the resulting image.", "img = torchvision.io.read_image(\"data/birdie2.jpg\")\nnormalize = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\nimg = torchvision.transforms.Resize(256)(img)\nimg = torchvision.transforms.CenterCrop(224)(img).float()/255.\nimg = normalize(img)\nimg = (img * 255).to(torch.uint8)\nimg = torchvision.transforms.ToPILImage()(img)\ndisplay(img)", "plot_waveform function from torchaudio\nThis function accepts raw waveform and sampling rate to plot the waveform for visualization.", "def plot_waveform(waveform, sample_rate, title=\"Waveform\", xlim=None, ylim=None):\n waveform = waveform.numpy()\n\n num_channels, num_frames = waveform.shape\n time_axis = torch.arange(0, num_frames) / sample_rate\n\n figure, axes = plt.subplots(num_channels, 1)\n if num_channels == 1:\n axes = [axes]\n for c in range(num_channels):\n axes[c].plot(time_axis, waveform[c], linewidth=1)\n axes[c].grid(True)\n if num_channels > 1:\n axes[c].set_ylabel(f'Channel {c+1}')\n if xlim:\n axes[c].set_xlim(xlim)\n if ylim:\n axes[c].set_ylim(ylim)\n figure.suptitle(title)\n plt.show(block=False)\n", "Using torchaudio to load audio and display waveform using plot_waveform\nThis is an alternative method to librosa for loading audio.", "wav_path = \"data/ljspeech.wav\"\nmetadata = torchaudio.info(wav_path)\nprint(metadata)\n\nwav, sample_rate = torchaudio.load(wav_path)\ndisplay(Audio(wav, rate=sample_rate))\nplot_waveform(wav, sample_rate)\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ChrisRucker/Samples
CRUCK v1.ipynb
mit
[ "<hr>\n<p><center>CHRISTOPHER</center><br><font size=\"2\"><center>ASSOCIATE DATA SCIENTIST</center></font>\n<br><center>RUCKER</center></p>\n<hr>\n\n<p><font size=\"6\"><em>-</em> LOADDATA <em>-</em></font></p>", "pwd\n\nimport pandas as pd\n\ndf = pd.read_csv('data.csv')\n\ndf.tail()", "<p><font size=\"6\"><em>-</em> CLEANDATA <em>-</em></font></p>", "modelRatings = df.pivot_table(index=['EQP_LOCAL_EQP'],columns=['EQP_MODEL_EQP'],values='EQP_FAIL_CNT_EQP').iloc[:, 1:10]\nmodelRatings.head()", "<p><font size=\"6\"><em>-</em> SUBRATINGS <em>-</em></font></p>", "MCARD9060Ratings = modelRatings['MCARD9060']\nMCARD9060Ratings.head()", "<p><font size=\"6\"><em>-</em> PAIRWISE <em>-</em></font></p>", "similarModels = modelRatings.corrwith(MCARD9060Ratings)\nsimilarModels = similarModels.dropna()\ndf = pd.DataFrame(similarModels)\ndf.head(20)", "<p><font size=\"6\"><em>-</em> SCORE <em>-</em></font></p>", "similarModels.sort_values(ascending=False).head(20)", "<p><font size=\"6\"><em>-</em> WRITEUP <em>-</em></font></p>\n\n<p><font size=\"4\">Forecast equipment failure ranked by pairwise correlation using criteria such as model number, subscriber number, and failure count.\n\nA dataset of ~400K tuple using model AHTC8717 correlated with model MCARD9060 are the covariants.\n\nThe algorithm found 6 out of the top 10 equipment failure issues for 60% accuracy</p>" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
gregcaporaso/short-read-tax-assignment
ipynb/mock-community/taxonomy-assignment-blast+.ipynb
bsd-3-clause
[ "Data generation: using python to sweep over methods and parameters\nThis notebook demonstrates taxonomy classification using blast+ followed by consensus assignment in QIIME2's q2-feature-classifier.\nEnvironment preparation", "from os.path import join, expandvars\nfrom joblib import Parallel, delayed\nfrom glob import glob\nfrom os import system\nfrom tax_credit.framework_functions import (parameter_sweep,\n generate_per_method_biom_tables,\n move_results_to_repository)\n\n\nproject_dir = expandvars(\"$HOME/Desktop/projects/tax-credit\")\nanalysis_name= \"mock-community\"\ndata_dir = join(project_dir, \"data\", analysis_name)\n\nreference_database_dir = expandvars(\"$HOME/Desktop/ref_dbs/\")\nresults_dir = expandvars(\"$HOME/Desktop/projects/mock-community/\")", "Preparing data set sweep\nFirst, we're going to define the data sets that we'll sweep over. The following cell does not need to be modified unless if you wish to change the datasets or reference databases used in the sweep.", "dataset_reference_combinations = [\n ('mock-1', 'gg_13_8_otus'), # formerly S16S-1\n ('mock-2', 'gg_13_8_otus'), # formerly S16S-2\n ('mock-3', 'gg_13_8_otus'), # formerly Broad-1\n ('mock-4', 'gg_13_8_otus'), # formerly Broad-2\n ('mock-5', 'gg_13_8_otus'), # formerly Broad-3\n ('mock-6', 'gg_13_8_otus'), # formerly Turnbaugh-1\n ('mock-7', 'gg_13_8_otus'), # formerly Turnbaugh-2\n ('mock-8', 'gg_13_8_otus'), # formerly Turnbaugh-3\n ('mock-9', 'unite_20.11.2016_clean_fullITS'), # formerly ITS1\n ('mock-10', 'unite_20.11.2016_clean_fullITS'), # formerly ITS2-SAG\n ('mock-12', 'gg_13_8_otus'), # Extreme\n ('mock-13', 'gg_13_8_otus_full16S_clean'), # kozich-1\n ('mock-14', 'gg_13_8_otus_full16S_clean'), # kozich-2\n ('mock-15', 'gg_13_8_otus_full16S_clean'), # kozich-3\n ('mock-16', 'gg_13_8_otus'), # schirmer-1\n ('mock-18', 'gg_13_8_otus'),\n ('mock-19', 'gg_13_8_otus'),\n ('mock-20', 'gg_13_8_otus'),\n ('mock-21', 'gg_13_8_otus'),\n ('mock-22', 'gg_13_8_otus'),\n ('mock-23', 'gg_13_8_otus'),\n ('mock-24', 'unite_20.11.2016_clean_fullITS'),\n ('mock-25', 'unite_20.11.2016_clean_fullITS'),\n ('mock-26-ITS1', 'unite_20.11.2016_clean_fullITS'),\n ('mock-26-ITS9', 'unite_20.11.2016_clean_fullITS'),\n]\n\nreference_dbs = {'gg_13_8_otus_clean' : (join(reference_database_dir, 'gg_13_8_otus/99_otus_clean_515f-806r.qza'),\n join(reference_database_dir, 'gg_13_8_otus/taxonomy/99_otu_taxonomy.qza')),\n 'gg_13_8_otus' : (join(reference_database_dir, 'gg_13_8_otus/rep_set/99_otus_515f-806r_trim250.qza'), \n join(reference_database_dir, 'gg_13_8_otus/taxonomy/99_otu_taxonomy.qza')),\n 'gg_13_8_otus_full16S_clean' : (join(reference_database_dir, 'gg_13_8_otus/99_otus_clean.qza'), \n join(reference_database_dir, 'gg_13_8_otus/taxonomy/99_otu_taxonomy.qza')),\n 'gg_13_8_otus_full16S' : (join(reference_database_dir, 'gg_13_8_otus/rep_set/99_otus.qza'), \n join(reference_database_dir, 'gg_13_8_otus/taxonomy/99_otu_taxonomy.qza')),\n 'unite_20.11.2016_clean_fullITS' : (join(reference_database_dir, 'sh_qiime_release_20.11.2016/developer/sh_refs_qiime_ver7_99_20.11.2016_dev_clean.qza'), \n join(reference_database_dir, 'sh_qiime_release_20.11.2016/developer/sh_taxonomy_qiime_ver7_99_20.11.2016_dev_clean.qza')),\n 'unite_20.11.2016_clean' : (join(reference_database_dir, 'sh_qiime_release_20.11.2016/developer/sh_refs_qiime_ver7_99_20.11.2016_dev_clean_ITS1Ff-ITS2r.qza'), \n join(reference_database_dir, 'sh_qiime_release_20.11.2016/developer/sh_taxonomy_qiime_ver7_99_20.11.2016_dev.qza')),\n 'unite_20.11.2016' : (join(reference_database_dir, 'sh_qiime_release_20.11.2016/developer/sh_refs_qiime_ver7_99_20.11.2016_dev_ITS1Ff-ITS2r_trim250.qza'), \n join(reference_database_dir, 'sh_qiime_release_20.11.2016/developer/sh_taxonomy_qiime_ver7_99_20.11.2016_dev.qza'))}\n", "Preparing the method/parameter combinations and generating commands\nNow we set the methods and method-specific parameters that we want to sweep. Modify to sweep other methods. Note how method_parameters_combinations feeds method/parameter combinations to parameter_sweep() in the cell below.", "method_parameters_combinations = {\n 'blast+' : {'p-evalue': [0.001],\n 'p-maxaccepts': [1, 10, 100],\n 'p-perc-identity': [0.80, 0.97, 0.99],\n 'p-min-consensus': [0.51, 0.75, 0.99]}\n }", "Now enter the template of the command to sweep, and generate a list of commands with parameter_sweep().\nFields must adhere to following format:\n {0} = output directory\n {1} = input data\n {2} = reference sequences\n {3} = reference taxonomy\n {4} = method name\n {5} = other parameters", "command_template = \"mkdir -p {0}; qiime feature-classifier blast --i-query {1} --o-classification {0}/rep_seqs_tax_assignments.qza --i-reference-reads {2} --i-reference-taxonomy {3} {5}; qiime tools export {0}/rep_seqs_tax_assignments.qza --output-dir {0}\"\n \ncommands = parameter_sweep(data_dir, results_dir, reference_dbs,\n dataset_reference_combinations,\n method_parameters_combinations, command_template,\n infile='rep_seqs.qza', output_name='rep_seqs_tax_assignments.qza')\n", "As a sanity check, we can look at the first command that was generated and the number of commands generated.", "print(len(commands))\ncommands[0]", "Finally, we run our commands.", "Parallel(n_jobs=4)(delayed(system)(command) for command in commands)", "Generate per-method biom tables\nModify the taxonomy_glob below to point to the taxonomy assignments that were generated above. This may be necessary if filepaths were altered in the preceding cells.", "taxonomy_glob = join(results_dir, '*', '*', '*', '*', 'taxonomy.tsv')\ngenerate_per_method_biom_tables(taxonomy_glob, data_dir)", "Move result files to repository\nAdd results to the tax-credit directory (e.g., to push these results to the repository or compare with other precomputed results in downstream analysis steps). The precomputed_results_dir path and methods_dirs glob below should not need to be changed unless if substantial changes were made to filepaths in the preceding cells.", "precomputed_results_dir = join(project_dir, \"data\", \"precomputed-results\", analysis_name)\nmethod_dirs = glob(join(results_dir, '*', '*', '*', '*'))\nmove_results_to_repository(method_dirs, precomputed_results_dir)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ghvn7777/ghvn7777.github.io
content/fluent_python/7_decorate.ipynb
apache-2.0
[ "装饰器用于在源码中 “标记” 函数,以某种方式增强函数行为。这是一项强大的功能,但是如果想掌握,必须理解闭包\nnonlocal 是新出现的关键字,在 Python 3.0 中引入。作为 Python 程序员,如果严格遵守基于类的面向对象编程方式,即使不知道这个关键字也没事,但是如果想自己实现函数装饰器,那就必须了解闭包的方方面面,因此也就需要知道 nonlocal\n这一章中我们主要讨论的话题如下:、\n\nPython 如何计算装饰器语法\nPython 如何判断变量是不是局部的\n闭包存在的原因和工作原理\nnonlocal 能解决什么问题\n\n掌握这些知识,可以进一步探讨装饰器:\n\n实现行为良好的装饰器\n标准库中有用的装饰器\n实现一个参数化装饰器\n\n下面我们先介绍基础知识:\n基础知识\n假如有个 decorate 装饰器\n@decorate\ndef target():\n print('running target()')\n上面的写法与下面效果一样:\n```\ndef target():\n print('running target()')\ntarget = decorate(target)\n```", "def deco(func):\n def inner():\n print('running inner()')\n return inner()\n\n@deco\ndef target():\n print('running target()')\n \ntarget", "Python 何时执行装饰器\n装饰器一个关键特性是,它们被装饰的函数定义之后立即运行。这通常是在导入模块(Python 加载模块时),如下面的 register.py 模块", "#!/usr/bin/env python\n# encoding: utf-8\n\nregistry = []\n\ndef register(func):\n print('running register(%s)' % func)\n registry.append(func)\n return func\n\n@register\ndef f1():\n print('running f1()')\n\n@register\ndef f2():\n print('running f2()')\n\ndef f3():\n print('running f3()')\n\ndef main():\n print('running main()')\n print('registry ->', registry)\n f1()\n f2()\n f3()\n\nif __name__ == '__main__':\n main()\n\n# 运行后答案如下\n# running register(<function f1 at 0x7fbac67ca6a8>)\n# running register(<function f2 at 0x7fbac67ca730>)\n# running main()\n# registry -> [<function f1 at 0x7fbac67ca6a8>, <function f2 at 0x7fbac67ca730>]\n# running f1()\n# running f2()\n# running f3()", "看到 register 在模块中其它函数之前运行(两次)。调用 register 时,传给它的参数是被装饰的函数,例如 <function f1 at 0x7fbac67ca6a8>\n加载模块后,registry 有两个被装饰函数的引用:f1 和 f2。这两个函数,以及 f3,只在 main 明确调用它们时候才执行\n如果单纯导入 register.py", "import register", "此时查看 registry 的值:", "register.registry", "这主要为了强调,函数装饰器在导入模块时立即执行,而被装饰的函数只在明确调用时执行。这突出了 Python 程序员说的 导入时 和 运行时 的区别\n考虑到装饰器在真实代码中的常用方式,上面的例子有两个不同寻常的地方:\n\n装饰器函数与被装饰的函数在同一个模块定义,实际上装饰器通常在一个模块中定义,然后应用到其它模块中的函数上\nregister 装饰器返回的函数与通过参数传入的相同,实际上大多数装饰器会在内部定义一个函数,然后返回\n\n使用装饰器改进 “策略” 模式\n在上一章的商品折扣例子中有一个问题,每次通过 best_promo 函数判断最大折扣,这个函数也需要一个折扣函数列表,如果忘了添加,会导致一些不容易被发现的问题。下面使用注册装饰器解决了这个问题:", "promos = []\n\ndef promotion(promo_func):\n promos.append(promo_func)\n return promo_func\n\n@promotion\ndef fidelity_promo(order):\n '''为积分为 1000 或以上的顾客提供 5% 折扣'''\n return order.total() * .05 if order.customer.fidelity >= 1000 else 0\n\n@promotion\ndef bulk_item_promo(order):\n '''单个商品为 20 个或以上时提供 %10 折扣'''\n \n discount = 0\n for item in order.cart:\n if item.quantity >= 20:\n discount += item.total() * .1\n return discount\n\n@promotion\ndef large_order_promo(order):\n '''订单中的不同商品达到 10 个或以上时提供 %7 折扣'''\n \ndef best_promo(order):\n return max(promo(order) for promo in promos)", "这个方案有几个优点\n\n促销策略函数无需使用特殊名(即不用以 _promo 结尾)\n@promotion 装饰器突出了被装饰函数的作用,还可以临时禁止某个折扣策略,只需要把装饰器注释\n促销折扣策略可以在其它模块中定义,在系统任意地方都行,只要使用 @promotion 装饰器即可\n\n不过,多数的装饰器会修改被装饰函数。通常,它们会定义一个内部函数,然后将其返回,替换被装饰的函数。使用内部函数的代码几乎都要靠闭包才能正确工作。为了理解闭包,我们先退后一步,了解 Python 中变量的作用域\n变量作用域规则", "def f1(a):\n print(a)\n print(b)\n\nf1(3)", "这里出现错误不奇怪,如果先给全局变量 b 赋值,然后调用 f1,就不会出错", "b = 6\nf1(3)", "这也很正常,下面是一个会让你大吃一惊的例子:),前面代码和上面的例子一样,可是,在赋值之前,第二个 print 失败了。", "b = 6\ndef f2(a):\n print(a)\n print(b)\n b = 9\nf2(3)", "首先输出了 3,表明 print(a) 执行了。但是 print(b) 无法执行,这是因为,Python 编译函数的定义时,判断 b 是局部变量,因为在函数中给它赋值了。生成的字节码证实了这种判断,Python 会尝试从本地环境获取 b。后面调用 f2(3) 时,f2 的定义体会获取并打印局部变量 a 的值,但是尝试获取局部变量 b 的时候,发现 b 没有绑定值。\n这不是缺陷,而是设计选择,Python 不要求声明变量,但是假定在函数定义体中赋值的变量是局部变量。\n如果在函数中赋值时想让解释器把 b 当成全局变量,要求使用 global 声明:", "b = 6\ndef f3(a):\n global b\n print(a)\n print(b)\n b = 9\nf3(3)\n\nf3(3)\n\nb = 30\nb", "了解了 Python 的变量作用域之后,我们就可以讨论闭包了\n闭包\n人们有时会把闭包和匿名函数弄混,这是有历史原因的,在函数内部定义函数不常见,直到开始使用匿名函数才这么做。而且,只有涉及嵌套函数时才有闭包的问题。因此,很多人是同时知道这两个概念的\n其实,闭包指延伸了作用域的函数,其中包含函数定义体中引用、但是不在定义体中定义的非全局变量。函数是不是匿名的没有关系,关键是它能访问定义体之外定义的非全局变量。\n我们通过例子来理解,加入 avg 函数是计算不断增加的系列值的均值,例如整个历史中的某个商品的平均收盘价,每天都会增加新价格,因此平均值要考虑至目前为止的所有价格。\n一开始,avg 是这样的:", "class Averager():\n \n def __init__(self):\n self.series = []\n \n def __call__(self, new_value):\n self.series.append(new_value)\n total = sum(self.series)\n return total / len(self.series)\n \navg = Averager()\navg(10)\n\navg(11)\n\navg(12)", "下面是函数式实现,使用高阶函数 make_averager,调用 make_averager,返回一个 averager 函数对象。每次调用 averager 时,都会把参数添加到系列值中,然后计算当前平均值", "def make_averager():\n series = []\n \n def averager(new_value):\n series.append(new_value)\n total = sum(series)\n return total / len(series)\n return averager\n\navg = make_averager()\n\navg(10)\n\navg(11)\n\navg(12)", "注意,这两个例子有共通之处,调用 Averager() 或 make_averager() 得到一个可调用对象 avg,它会更新历史值,然后计算当前均值。\nAverager 将历史值存在哪里很明显,self.series 属性中,avg 函数将 series 存在哪里呢?\n注意 series 是 make_averager 的局部变量,因为那个函数的定义体中初始化了 series,series = [],可是调用 avg(10) 时候,make_averager 已经返回了,而它本身地作用域也没有了\n在 averager 函数中,series 是自由变量。这是一个技术术语,指未在本地作用域绑定的变量,averager 的闭包延伸到这个函数的作用于之外,包含自由变量 series 的绑定\n审查返回的 averager 对象,我们会在 __code__ 属性发现保存局部变量和自由变量的名称", "avg.__code__.co_varnames\n\navg.__code__.co_freevars", "series 的绑定在返回的 avg 函数的 __closure__ 属性中,avg.__closure__ 中的各个元素对应于 avg.__code__.co_freevars 中的一个名称。这些元素是 cell 对象,有个 cell_contents 属性,保存着真正的值。这些属性如下所示:", "avg.__code__.co_freevars\n\navg.__closure__\n\navg.__closure__[0].cell_contents", "综上,闭包是一种函数,它会保留定义函数时存在的自有变量的绑定,这样调用函数时,虽然定义域不可用了,但是仍然能使用这些绑定\n注意,只有嵌套在其它函数中的函数才可能需要处理不在全局作用域中的外部变量\nnonlocal 声明\n前面实现的计算平均值方法效率不高,因为每次把值存到历史数列中,遍历历史数列求平均数,更好的方法是只存储目前的平均数和个数,然后用这两个数计算平均值\n下面是一个有缺陷的的程序,只是为了阐明某个观点,我们来看一下:", "def make_averager():\n count = 0\n total = 0\n \n def averager(new_value):\n count += 1\n total += new_value\n return total / count\n \n return averager\n\navg = make_averager()\navg(10)", "问题是,当 count 是数字和不可变类型时, count += 1 和 count = count + 1 是等价的。我们在 averager 函数中为 count 赋值了,这会把 Count变成局部变量,total 变量也会受这个问题影响。\n在前面的例子没有这个问题,因为我们没有给 series 赋值,我们只是调用 series.append(), 并把 series 传给 sum 和 len。也就是说,我们利用了列表是可变对象的这一事实\n但是对于数字,字符串,元组等不可变类型来说,只能读取,不能更新。如果尝试像上面重新绑定,就会隐式的创建局部变量 count。这样 count 就不是自由变量了,因此不会保存到闭包中\n为了解决这个问题,Python 3 引入了 nonlocal 声明。它的作用是把变量标记为自由变量,即使在函数中为变量赋予新值了,也会变成自由变量。如果为 nonlocal 声明的变量赋予新值,闭包中保存的绑定会更新。最新版 make_averager 的正确实现如下所示:", "def make_averager():\n count = 0\n total = 0\n \n def averager(new_value):\n nonlocal count, total\n count += 1\n total += new_value\n return total / count\n \n return averager\n\navg = make_averager()\navg(10)\n\navg(11)", "Python 2 中的处理方法可以把 count 和 total 存储为可变对象,例如列表和字典就可以了。\n实现一个简单的装饰器\n下面定义一个装饰器,会在每次调用被装饰的函数时计时,然后把经过的时间、传入的参数和调用结果打印", "import time\n\ndef clock(func):\n def clocked(*args): \n #返回计时器的精准时间(系统的运行时间),包含整个系统的睡眠时间。系统起始运行时间不确定,所以一般只有两个时间差才有效\n t0 = time.perf_counter() \n result = func(*args)\n elapsed = time.perf_counter() - t0\n name = func.__name__\n # repr 方法用得好,用的对象例如列表不能用 str(),但是可以用 repr() 获取对象的标准字符串表示形式\n arg_str = ', '.join(repr(arg) for arg in args) \n # %r 把对象转成标准字符串形式,因为不知道返回值类型\n print('[%0.8fs]%s(%s) -> %r' % (elapsed, name, arg_str, result)) \n return result\n return clocked\n\nimport time\n\n@clock\ndef snooze(seconds):\n time.sleep(seconds)\n \n@clock\ndef factorial(n):\n return 1 if n < 2 else n * factorial(n - 1) \n\nprint('*' * 40, 'Calling snooze(.123)')\nsnooze(.123)\nprint('*' * 40, 'Calling factorial(6)')\nfactorial(6)", "clock 中还定义 clocked 函数的原因是 clock 函数中的内容在模块引入后会被执行,所以再嵌套一个函数,保证不会在模块引入后直接执行装饰器内容,然后在 clocked 中对原函数计时\n工作原理\n@clock\ndef factorial(n):\n return 1 if n &lt; 2 else n * factorial(n - 1)\n等价于 \ndef factorial(n):\n return 1 if n &lt; 2 else n * factorial(n - 1) \nfactorial = clock(factorial)\n因此,在这两个示例中,factorial 会作为 func 参数传给 clock,然后 clock 函数会返回 clocked 函数, Python 解释器在背后会把 clocked 赋值给 factorial。可以看到查看 factorial 的 __name__ 属性会得到以下结果:", "factorial.__name__", "所以,现在 factorial 保存的是 clocked 函数的引用。此后,调用 factorial(n),执行的都是 clocked(n)。clocked 大致做了以下几件事\n\n记录初试时间 t0\n调用原来的 factorial 函数,保存结果\n计算经过的时间\n格式化收集的数据,打印\n返回第二步保存的结果\n\n上面的 clock 装饰器有几个缺点,不支持关键字参数,而且遮盖了被装饰函数的 __name__ 和 __doc__ 属性。下面使用 functools.wraps 装饰器可以把相关属性从 func 复制到 clocked 中。此外还能正确处理关键字参数", "import time\nimport functools\n\ndef clock(func):\n @functools.wraps(func)\n def clocked(*args, **kwargs): \n t0 = time.time()\n result = func(*args, **kwargs)\n elapsed = time.time() - t0\n name = func.__name__\n \n arg_lst = []\n if args:\n arg_lst.append(', '.join(repr(arg) for arg in args))\n if kwargs:\n pairs = ['%s=%r' % (k, w) for k, w in sorted(kwargs.items())]\n arg_lst.append(', '.join(pairs))\n arg_str = ', '.join(arg_lst)\n \n print('[%0.8fs]%s(%s) -> %r' % (elapsed, name, arg_str, result)) \n return result\n return clocked\n\nimport time\n\n@clock\ndef snooze(seconds):\n time.sleep(seconds)\n \n@clock\ndef factorial(n):\n return 1 if n < 2 else n * factorial(n - 1) \n\nprint('*' * 40, 'Calling snooze(.123)')\nsnooze(.123)\nprint('*' * 40, 'Calling factorial(6)')\nfactorial(6)\n\nfactorial.__name__", "看到 factorial 的属性已经被复制到了 clocked 中。functools.wraps 只是标准库中拿来即用的装饰器之一,下面介绍 functools 模块中最令人印象深刻的两个装饰器: lru_cache 和 singledispatch\n标准库中的装饰器\nPython 内置了 3 个用于装饰器方法的函数:property, classmethod, staticmethod。property 在第 19 章讨论, 另外两个在第 9 章讨论\n另一个常见的装饰器是 functools.wraps,它的作用是协助构建行为良好的装饰器。我们在前面用过,现在来介绍标准库中最值得关注的两个装饰器 lru_cache 和全新的 singledispatch(Python 3.4 新增)。这两个装饰器都在 functools 模块中定义。接下来分别讨论它们\n使用 functools.lru_cache 做备忘\nfunctools.lru_cache 是一个非常实用的装饰器,它实现了备忘功能。这是一项优化技术,它把耗时的函数结果保存起来,避免传入相同的参数时重复计算。 LRU 3 个字母是 “Least Recently Used” 的缩写,表明缓存不会无限制增长,一段时间不用的缓存条目会被扔掉\n下面是一个生成 n 个斐波那契数的例子:", "@clock\ndef fibonacci(n):\n if n < 2: \n return n\n return fibonacci(n - 2) + fibonacci(n - 1)\n\nprint(fibonacci(6))", "看到很浪费时间, fibonacci(1) 调用了 8 次, fibonacci(2) 调用了 5 次。但是如果增加两行代码,性能会显著改善,如下:", "@functools.lru_cache() # note\n@clock\ndef fibonacci(n):\n if n < 2: \n return n\n return fibonacci(n - 2) + fibonacci(n - 1)\n\nprint(fibonacci(6))", "注意,必须像常规函数那样调用 lru_cache。这一行中有一对括号: @functools.lru_cache()。这么做的原因是,lru_cache() 可以接受配置参数,稍后说明\n这里还叠放了装饰器,lru_cache() 应用到了 @clock 返回的函数上\n这样一来,看到执行时间减半,而且 n 的每个值只调用一次函数。\n除了优化递归算法之外,lru_cache 在从 Web 中获取信息的应用也能发挥巨大作用。特别要注意,lru_cache 可以使用两个可选的参数来配置。它的签名是:functools.lru_cache(maxsize = 128, typed = False)\nmaxsize 指定最多存储多少个调用结果,缓存满了后,旧的结果会被扔掉,为了得到最佳性能, maxsize 应该设为 2 的次幂。typed 参数如果设为 True,把不同参数类型得到的结构分开保存,即把通常认为相等的浮点数和整数参数(如 1 和 1.0)区分开。顺便说一下,因为 lru_cache 使用字典存储结果,而且键根据调用时传入的定位参数和关键字创建,所以被 lru_cache 装饰的函数,它的所有的参数必须是可散列的。\n单分派泛函数\n假如我们在调试一个 Web 应用程序,想生成 HTML,显示不同类型的 Python 对象。\n我们可能会编写这样的函数:", "import html\ndef htmlize(obj):\n content = html.escape(repr(obj)) # 对字符串进行转义,详情见注释说明\n return '<pre>{}</pre>'.format(content)\n\n'''\nhtml.escape(s, quote=True)\nConvert the characters &, < and > in string s to HTML-safe sequences. \nUse this if you need to display text that might contain such characters in HTML.\nIf the optional flag quote is true, the characters (\") and (') are also translated; \nthis helps for inclusion in an HTML attribute value delimited by quotes, as in <a href=\"...\">.\n'''", "这个函数适用于任何 Python 类型,但是我们想做个扩展,让它使用特别的方式显示某些类型。\n\nstr: 把内部的换行符换成 &lt;br&gt;\\n;不使用 &lt;pre&gt;,而是使用 &lt;p&gt;\nint: 以十进制和十六进制显示数字\nlist:输出一个 HTML 列表,根据各个元素的类型进行格式化\n\n我们想要的行为如下所示:", "htmlize({1, 2, 3}) # 默认情况下, 在 <pre> 标签内显示 HTML 转义后的字符串表示形式\n\nhtmlize(abs) \n\nhtmlize('Heimlich & Co.\\n- a game')# str 对象也是显示 HTML 转义后的形式,\\n 换成 <br>\\n 并且放到 <p> 标签内\n\nhtmlize(42) # 数字显示 10进制和 16进制形式\n\nprint(htmlize(['alpha', 66, {3, 2, 1}])) #列表根据各自的类型格式化显示", "Python 不支持重载方法或函数,所以我们不能使用不同签名定义 htmlize 的变体。也无法使用不同方式处理不同的数据类型。在 Python 中,一种常见的做法是把 htmlize 变成一个分派函数,使用一串 if else 来调用专门的函数,但是这样太笨,而且不好维护\nPython 3.4 新增了 functools.singledispatch 装饰器可以把整体方案拆成多个模块,甚至可以为你无法修改的类提供专门的函数。使用 @singledispathc 会把普通的函数变成泛函数。根据第一参数的类型,以不同方式执行相同操作的一组函数", "from functools import singledispatch\nfrom collections import abc\nimport numbers\nimport html\n\n@singledispatch\ndef htmlize(obj):\n content = html.escape(repr(obj))\n return '<pre>{}</pre>'.format(content)\n\n@htmlize.register(str) #各个函数用 @base_function.register(type) 装饰\ndef _(text): #专门函数的名称不重要, _ 是个不错的选择\n content = html.escape(text).replace('\\n', '<br>\\n')\n return '<p>{0}</p>'.format(content)\n\n@htmlize.register(numbers.Integral) # Integral 是 int 的虚拟超类\ndef _(n):\n return '<pre>{0} (0x{0:x})</pre>'.format(n)\n\n@htmlize.register(tuple) #可以叠放多个装饰器,让同一个函数支持不同的类型\n@htmlize.register(abc.MutableSequence)\ndef _(seq):\n inner = '</li>\\n<li>'.join(htmlize(item) for item in seq)\n return '<ul>\\n<li>' + inner + '</li>\\n</ul>'", "只要有可能,注册的专门函数应该处理抽象基类(例如 numbers.Integral 和 abc.MutableSequence),不要处理具体实现(如 int 和 list)。这样,代码支持的兼容类型更广泛\n叠放装饰器\n上面已经用过了叠放装饰器的方式,@lru_cache 应用到 @clock 装饰 fibonacci 得到的结果上。上面的例子最后也用到了两个 @htmlize.register 装饰器\n把 @d1 和 @d2 两个装饰器按顺序应用到 f 函数上,作用相当于 f = d1(d2(f)),也就是说 \n@d1\n@d2\ndef f():\n print('f')\n等同于\n```\ndef f():\n print('f')\nf = d1(d2(f))\n```\n除了叠放装饰器以外,我们还用到了接收参数的装饰器,例如上面的 htmlize.register(type)\n参数化装饰器\nPython 把被装饰的函数作为第一个参数传给装饰器函数。我们可以创建一个装饰器工厂函数,把参数传给他,返回一个装饰器,然后再把它应用到要装饰器函数上,我们以见过的最简单的装饰器为例说明:", "registry = []\n\ndef register(func):\n print('funning register(%s)' % func)\n registry.append(func)\n return func\n\n@register\ndef f1():\n print('running f1()')\n \nprint('running main()')\nprint('registry ->', registry)\nf1()", "为了便于启动和禁用 register 执行的函数注册功能,我们为他提供一个可选的 activate 参数,设为 False 时,不注册被装饰的函数。实现如下,从概念上来看,这个新的 register 函数不是装饰器,而是装饰器工厂函数。调用它会返回真正的装饰器,这才是应用到目标上的装饰器\n为了接受参数,新的 register 装饰器必须作为函数调用", "registry = set() #添加删除元素更快(相比列表)\n\ndef register(active = True):\n def decorate(func): #这是真正的装饰器,它的参数是一个函数\n print('running register(active=%s)->decorate(%s)' % (active, func))\n if active:\n registry.add(func)\n else:\n registry.discard(func)\n \n return func #decorate 是装饰器函数,所以返回 func\n return decorate # register 是装饰器工厂函数,返回 decorate\n\n@register(active = False) # @register 工厂函数必须作为函数调用,传入需要的参数\ndef f1():\n print('running f1()')\n\n@register() # 如果不传入参数也要作为函数调用\ndef f2():\n print(\"running f2()\")\n \ndef f3():\n print('running f3()')\n \nregistry", "这里的关键是,register() 要返回 decorate,并把它应用到被装饰器函数上,注意只有 f2 在 registry 中,因为 f1 传给装饰器工厂函数的参数是 False。如果不能使用 @ 语法,那就要像常规函数那样使用 register,若想把 f 添加到 registry 中,则装饰 f 函数的语法是 register()(f)。下面演示了如何把函数添加到 registry 中,以及如何从中删除函数", "register()(f3)\n\nregistry\n\nregister(active=False)(f2)\n\nregistry", "参数化装饰器原理相当复杂,我们讨论的是很简单的内容,参数化装饰器通常会把被装饰的函数替换掉,而且结构上需要多一层嵌套,接下来会讨论这种函数金字塔\n参数化 clock 装饰器\n我们这次为 clock 装饰器添加一个功能,让用户传入一个格式字符串,控制被装饰函数的输出,见下面例子,为了方便起见,我们下面用的是最初实现的 clock,而不是示例中使用 @functools.wraps 的改进后的版本,因为那一版增加了一层函数", "import time\n\nDEFAULT_FMT = '[{elapsed:0.8f}s] {name}({args}) -> {result}'\n\ndef clock(fmt = DEFAULT_FMT): # 参数化装饰器工厂函数\n def decorate(func): # 真正的装饰器\n def clocked(*_args): # 包装被装饰器的函数\n t0 = time.time() \n _result = func(*_args) \n elapsed = time.time() - t0\n name = func.__name__\n args = ', '.join(repr(arg) for arg in _args) \n result = repr(_result)\n print(fmt.format(**locals())) #是为了在 fmt 中引用 clocked 的局部变量 --> 用得好!\n return result\n return clocked\n return decorate\n\n@clock() #不传入参数调用 clock(),应用的装饰器默认格式的 str\ndef snooze(seconds):\n time.sleep(seconds)\n \nfor i in range(3):\n snooze(.123)", "下面是用户传入自定义的格式字符串的调用:", "@clock('{name}: {elapsed}s')\ndef snooze(seconds):\n time.sleep(seconds)\n \nfor i in range(3):\n snooze(.123)\n\n@clock('{name}({args}) dt={elapsed:0.3f}s')\ndef snooze(seconds):\n time.sleep(seconds)\n \nfor i in range(3):\n snooze(.123)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
phoebe-project/phoebe2-docs
2.0/tutorials/l3.ipynb
gpl-3.0
[ "\"Third\" Light\nSetup\nLet's first make sure we have the latest version of PHOEBE 2.0 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).", "!pip install -I \"phoebe>=2.0,<2.1\"", "As always, let's do imports and initialize a logger and a new bundle. See Building a System for more details.", "%matplotlib inline\n\nimport phoebe\nfrom phoebe import u # units\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nlogger = phoebe.logger()\n\nb = phoebe.default_binary()", "Relevant Parameters\nThe 'l3' parameter describes how much third light exists in a given passband. Since this is passband dependent and only used for flux measurments - it does not yet exist for a new empty Bundle.", "b.filter(qualifier='l3')", "So let's add a LC dataset", "b.add_dataset('lc', times=np.linspace(0,1,101), dataset='lc01')", "We now see that the LC dataset created an 'l3' parameters for the new dataset.", "b.filter(qualifier='l3')\n\nprint b['l3@lc01']", "Influence on Light Curves (Fluxes)\n\"Third\" light is simply additional flux added to the light curve from some external source - whether it be crowding from a background object, light from the sky, or an extra component in the system that is unaccounted for in the system hierarchy.\nTo see this we'll compare a light curve with and without \"third\" light.", "b.run_compute(irrad_method='none', model='no_third_light')\n\nb['l3@lc01'] = 5\n\nb.run_compute(irrad_method='none', model='with_third_light')", "As expected, adding 5 W/m^3 of third light simply shifts the light curve up by that exact same amount.", "fig = plt.figure()\nax1, ax2 = fig.add_subplot(121), fig.add_subplot(122)\n\naxs, artists = b['lc01'].plot(model='no_third_light', ax=ax1)\naxs, artists = b['lc01'].plot(model='with_third_light', ax=ax2)", "Influence on Meshes (Intensities)\n\"Third\" light does not affect the intensities stored in the mesh (including those in relative units). In other words, like distance, \"third\" light only scales the fluxes.\nNOTE: this is different than pblums which DO affect the relative intensities. Again, see the pblum tutorial for more details.\nTo see this we can run both of our models again and look at the values of the intensities in the mesh.", "b.add_dataset('mesh', times=[0], dataset='mesh01')\n\nb['l3@lc01'] = 0.0\n\nb.run_compute(irrad_method='none', model='no_third_light')\n\nb['l3@lc01'] = 5\n\nb.run_compute(irrad_method='none', model='with_third_light')\n\nprint \"no_third_light abs_intensities: \", b.get_value(qualifier='abs_intensities', component='primary', dataset='lc01', model='no_third_light').mean()\nprint \"with_third_light abs_intensities: \", b.get_value(qualifier='abs_intensities', component='primary', dataset='lc01', model='with_third_light').mean()\n\nprint \"no_third_light intensities: \", b.get_value(qualifier='intensities', component='primary', dataset='lc01', model='no_third_light').mean()\nprint \"with_third_light intensities: \", b.get_value(qualifier='intensities', component='primary', dataset='lc01', model='with_third_light').mean()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ueapy/ueapy.github.io
content/notebooks/2016-01-22-string-formatting.ipynb
mit
[ "name = '2016-01-22-string-formatting'\ntitle = 'Writing text files using new Python string formatting'\ntags = 'text format'\nauthor = 'Maria Zamyatina'\n\nfrom nb_tools import connect_notebook_to_post\nfrom IPython.core.display import HTML\n\nhtml = connect_notebook_to_post(name, title, tags, author)", "Sometimes in order to run some programming software we need to prepare an input description file which specifies the model setup (e.g., chemical mechanism, intergration method, desired type of results, etc). If we are planning to run the model several times with different, for example, initial conditions, constracting such a file using a script could be beneficial. During our last meeting we discussed how to assemble such a file with Python, and here is what we did.\nLet's assume that we need to construct a file containing the following information:\n```\n{!-- F90 --}\nINITVALUES\nO3 =7.50e+11;\nCH4 =4.55e+13;\nCO =2.55e+12;\nINLINE F90_INIT\nTSTART = 03600\nTEND = TSTART + 73600\nDT = 3600\nTEMP = 298\nENDINLINE\n```\nThe first line of this file is a header. Since it is not going to change in our model runs, we can store it in a separate file:", "header_text = '{!-*- F90 -*-}'\nwith open('header.inp','w') as header_file:\n header_file.write(header_text)", "The next four lines define the #INITVALUES section of the file, where the initial concentrations (actually, number densities) of chemical compounds of interest are set. If we want to change only numerical values in this section, it is logical to create a text template which would take into account the syntax of the target sortware and include some sort of 'gaps' to fill in with our initial values. One way of achieving that is to define a function that creates a multline string and has a number of arguments determining the initial concentrations of our chemical species:", "def gen_concs_string(O3=7.50e+11, CH4=4.55e+13, CO=2.55e+12):\n concs_string = \\\n\"\"\"\n#INITVALUES\nO3\\t={O3_nd:.2e};\nCH4\\t={CH4_nd:.2e};\nCO\\t={CO_nd:.2e};\"\"\"\\\n.format(O3_nd=O3, CH4_nd=CH4, CO_nd=CO)\n \n return concs_string", "For convinience we can even set default values for each of the arguments (e.g., here default ozone initial concentration is $7.5\\times 10^{11}$ molecules per $cm^{3}$).\nBy the way, we have just used a new style of string formatting in Python! An old way of doing the same would include a '%' sign in front of the function agruments inside the string expression and a line of code starting with '%' afterwards, like this\n```\n\"\"\"\nINITVALUES\nO3\\t=%(O3_nd).2e;\nCH4\\t=%(CH4_nd).2e;\nCO\\t=%(CO_nd).2e;\"\"\"\\\n%{\"O3\":O3_nd, \"CH4\":CH4_nd, \"CO_nd\":CO_nd}\n```\nbut using a new style makes your code more readable. For more examples on differences between old and new styles of string formatting in Python follow this link: PyFormat.\nWell, let's reinforce our knowledge and apply a new style of string formatting to reproduce the last section of the input file, which specifies model integration time and temperature:", "def gen_time_str(tstart, nhours, timestep, temp):\n time_string = \\\n\"\"\"\n#INLINE F90_INIT\nTSTART = {tstart}*{timestep}\nTEND = TSTART + {nhours}*3600\nDT = {timestep}\nTEMP = {temp}\n#ENDINLINE\"\"\"\\\n.format(tstart=tstart, nhours=nhours, timestep=timestep, temp=temp)\n \n return time_string", "And finally let's assemble our input description file:", "# Read header\nwith open('header.inp','r') as header_file:\n header_text = header_file.read()\n# Use default inital concentrations and set model integration time and temperature\nconcstr = gen_concs_string()\ntimestr = gen_time_str(0,7,3600,298)\n# Combine all strings together\nfull_str = header_text + concstr + timestr\n\n# Create function that writes a string to file\ndef write_str_to_file(string, filename='model.def'):\n with open(filename,'w') as def_file:\n def_file.write(string)\n# Call this function with your string\nwrite_str_to_file(full_str)", "Creating a file mask\nThere are plenty of other ways to use a new string formatting in Python to your advantage. For example, you could use it to create file names in a loop:", "file_mask = '+{one}hours_{two}UTC.jpg'\n\nfor i, j in zip((1,2,3), (4,5,6)):\n print(file_mask.format(one=i, two=j))\n\nHTML(html)" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tensorflow/docs-l10n
site/ko/addons/tutorials/tqdm_progress_bar.ipynb
apache-2.0
[ "Copyright 2020 The TensorFlow Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "TensorFlow 애드온 콜백: TQDM 진행률 표시줄\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://www.tensorflow.org/addons/tutorials/tqdm_progress_bar\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\">TensorFlow.org에서 보기</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/addons/tutorials/tqdm_progress_bar.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\">Google Colab에서 실행하기</a></td>\n <td><a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ko/addons/tutorials/tqdm_progress_bar.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\">GitHub에서 소스 보기</a></td>\n <td><a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/addons/tutorials/tqdm_progress_bar.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\">노트북 다운로드하기</a></td>\n</table>\n\n개요\n이 노트북은 TensorFlow 애드온에서 TQDMCallback을 사용하는 방법을 보여줍니다.\n설정", "!pip install -U tensorflow-addons\n\n!pip install -q \"tqdm>=4.36.1\"\n\nimport tensorflow as tf\nimport tensorflow_addons as tfa\n\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten\n\nimport tqdm\n\n# quietly deep-reload tqdm\nimport sys\nfrom IPython.lib import deepreload \n\nstdout = sys.stdout\nsys.stdout = open('junk','w')\ndeepreload.reload(tqdm)\nsys.stdout = stdout\n\ntqdm.__version__", "데이터 가져오기 및 정규화", "# the data, split between train and test sets\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n# normalize data\nx_train, x_test = x_train / 255.0, x_test / 255.0", "간단한 MNIST CNN 모델 빌드하기", "# build the model using the Sequential API\nmodel = Sequential()\nmodel.add(Flatten(input_shape=(28, 28)))\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(10, activation='softmax'))\n\nmodel.compile(optimizer='adam',\n loss = 'sparse_categorical_crossentropy',\n metrics=['accuracy'])", "기본 TQDMCallback 사용법", "# initialize tqdm callback with default parameters\ntqdm_callback = tfa.callbacks.TQDMProgressBar()\n\n# train the model with tqdm_callback\n# make sure to set verbose = 0 to disable\n# the default progress bar.\nmodel.fit(x_train, y_train,\n batch_size=64,\n epochs=10,\n verbose=0,\n callbacks=[tqdm_callback],\n validation_data=(x_test, y_test))", "아래는 위의 셀을 실행할 때 예상되는 출력입니다.", "# TQDMProgressBar() also works with evaluate()\nmodel.evaluate(x_test, y_test, batch_size=64, callbacks=[tqdm_callback], verbose=0)", "아래는 위의 셀을 실행할 때 예상되는 출력입니다." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
vicente-gonzalez-ruiz/YAPT
02-basics/containers/02-lists.ipynb
cc0-1.0
[ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Containers\" data-toc-modified-id=\"Containers-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Containers</a></span><ul class=\"toc-item\"><li><span><a href=\"#1.-Tuples\" data-toc-modified-id=\"1.-Tuples-1.1\"><span class=\"toc-item-num\">1.1&nbsp;&nbsp;</span>1. Tuples</a></span><ul class=\"toc-item\"><li><span><a href=\"#1.1-Tuples-are-(as-the-rest-of-elements-of-Python)-objects\" data-toc-modified-id=\"1.1-Tuples-are-(as-the-rest-of-elements-of-Python)-objects-1.1.1\"><span class=\"toc-item-num\">1.1.1&nbsp;&nbsp;</span>1.1 Tuples are (as the rest of elements of Python) objects</a></span></li><li><span><a href=\"#1.2.-Tuple-definition\" data-toc-modified-id=\"1.2.-Tuple-definition-1.1.2\"><span class=\"toc-item-num\">1.1.2&nbsp;&nbsp;</span>1.2. Tuple definition</a></span></li><li><span><a href=\"#1.3.-Counting-ocurrences-in-tuples\" data-toc-modified-id=\"1.3.-Counting-ocurrences-in-tuples-1.1.3\"><span class=\"toc-item-num\">1.1.3&nbsp;&nbsp;</span>1.3. Counting ocurrences in tuples</a></span></li><li><span><a href=\"#1.4.-Searching-for-an-item-in-a-tuple\" data-toc-modified-id=\"1.4.-Searching-for-an-item-in-a-tuple-1.1.4\"><span class=\"toc-item-num\">1.1.4&nbsp;&nbsp;</span>1.4. Searching for an item in a tuple</a></span></li><li><span><a href=\"#1.5.-Slicing-in-tuples\" data-toc-modified-id=\"1.5.-Slicing-in-tuples-1.1.5\"><span class=\"toc-item-num\">1.1.5&nbsp;&nbsp;</span>1.5. Slicing in tuples</a></span></li><li><span><a href=\"#1.6.-Functions-can-return-tuples\" data-toc-modified-id=\"1.6.-Functions-can-return-tuples-1.1.6\"><span class=\"toc-item-num\">1.1.6&nbsp;&nbsp;</span>1.6. Functions can return tuples</a></span></li><li><span><a href=\"#1.7.-Swapping-pairs-with-tuples-is-fun!\" data-toc-modified-id=\"1.7.-Swapping-pairs-with-tuples-is-fun!-1.1.7\"><span class=\"toc-item-num\">1.1.7&nbsp;&nbsp;</span>1.7. Swapping pairs with tuples is fun!</a></span></li><li><span><a href=\"#1.8.-Tuples-are-inmutable\" data-toc-modified-id=\"1.8.-Tuples-are-inmutable-1.1.8\"><span class=\"toc-item-num\">1.1.8&nbsp;&nbsp;</span>1.8. Tuples are inmutable</a></span></li></ul></li><li><span><a href=\"#2.-[0]-Lists\" data-toc-modified-id=\"2.-[0]-Lists-1.2\"><span class=\"toc-item-num\">1.2&nbsp;&nbsp;</span>2. [0] <a href=\"https://docs.python.org/3.7/library/stdtypes.html#sequence-types-list-tuple-range\" target=\"_blank\">Lists</a></a></span><ul class=\"toc-item\"><li><span><a href=\"#2.1-[1]-(Of-course)-lists-are-objects\" data-toc-modified-id=\"2.1-[1]-(Of-course)-lists-are-objects-1.2.1\"><span class=\"toc-item-num\">1.2.1&nbsp;&nbsp;</span>2.1 [1] (Of course) lists are objects</a></span></li><li><span><a href=\"#2.2-[0]-Appending-items-to-a-list-(O(1))\" data-toc-modified-id=\"2.2-[0]-Appending-items-to-a-list-(O(1))-1.2.2\"><span class=\"toc-item-num\">1.2.2&nbsp;&nbsp;</span>2.2 [0] Appending items to a list (O(1))</a></span></li><li><span><a href=\"#2.3-[0]-Inserting-items-(O(n))\" data-toc-modified-id=\"2.3-[0]-Inserting-items-(O(n))-1.2.3\"><span class=\"toc-item-num\">1.2.3&nbsp;&nbsp;</span>2.3 [0] Inserting items (O(n))</a></span></li><li><span><a href=\"#2.4-[0]-Deleting-items-from-a-list-by-content-(O(n)))\" data-toc-modified-id=\"2.4-[0]-Deleting-items-from-a-list-by-content-(O(n)))-1.2.4\"><span class=\"toc-item-num\">1.2.4&nbsp;&nbsp;</span>2.4 [0] Deleting items from a list by content (O(n)))</a></span></li><li><span><a href=\"#2.4-[0]-Deleting-items-from-the-begin-of-the-list-(O(1)))\" data-toc-modified-id=\"2.4-[0]-Deleting-items-from-the-begin-of-the-list-(O(1)))-1.2.5\"><span class=\"toc-item-num\">1.2.5&nbsp;&nbsp;</span>2.4 [0] Deleting items from the begin of the list (O(1)))</a></span></li><li><span><a href=\"#2.4-[0]-Deleting-items-from-the-end-of-the-list-(O(1)))\" data-toc-modified-id=\"2.4-[0]-Deleting-items-from-the-end-of-the-list-(O(1)))-1.2.6\"><span class=\"toc-item-num\">1.2.6&nbsp;&nbsp;</span>2.4 [0] Deleting items from the end of the list (O(1)))</a></span></li><li><span><a href=\"#2.5-[0]-Sorting-the-elements-of-a-list-(O(n-log-n))\" data-toc-modified-id=\"2.5-[0]-Sorting-the-elements-of-a-list-(O(n-log-n))-1.2.7\"><span class=\"toc-item-num\">1.2.7&nbsp;&nbsp;</span>2.5 [0] Sorting the elements of a list (O(n log n))</a></span></li><li><span><a href=\"#2.6-[1]-Erasing-all-list-items-(O(1))\" data-toc-modified-id=\"2.6-[1]-Erasing-all-list-items-(O(1))-1.2.8\"><span class=\"toc-item-num\">1.2.8&nbsp;&nbsp;</span>2.6 [1] Erasing all list items (O(1))</a></span></li><li><span><a href=\"#2.7-[0]-List-slicing-(O(s))\" data-toc-modified-id=\"2.7-[0]-List-slicing-(O(s))-1.2.9\"><span class=\"toc-item-num\">1.2.9&nbsp;&nbsp;</span>2.7 [0] List slicing (O(s))</a></span></li><li><span><a href=\"#2.8-[1]-Defining-lists-with-list-comprehensions:\" data-toc-modified-id=\"2.8-[1]-Defining-lists-with-list-comprehensions:-1.2.10\"><span class=\"toc-item-num\">1.2.10&nbsp;&nbsp;</span>2.8 [1] Defining lists with <a href=\"http://www.secnetix.de/olli/Python/list_comprehensions.hawk\" target=\"_blank\"><em>list comprehensions</em></a>:</a></span></li><li><span><a href=\"#2.9-[1]-Lists-are-mutable-objects\" data-toc-modified-id=\"2.9-[1]-Lists-are-mutable-objects-1.2.11\"><span class=\"toc-item-num\">1.2.11&nbsp;&nbsp;</span>2.9 [1] Lists are mutable objects</a></span></li></ul></li><li><span><a href=\"#3.-[0]--Sets\" data-toc-modified-id=\"3.-[0]--Sets-1.3\"><span class=\"toc-item-num\">1.3&nbsp;&nbsp;</span>3. [0] <a href=\"https://docs.python.org/3.7/library/stdtypes.html#set-types-set-frozenset\" target=\"_blank\">Sets</a></a></span><ul class=\"toc-item\"><li><span><a href=\"#3.2.-[0]-Sets-can-grow-(O(1))\" data-toc-modified-id=\"3.2.-[0]-Sets-can-grow-(O(1))-1.3.1\"><span class=\"toc-item-num\">1.3.1&nbsp;&nbsp;</span>3.2. [0] Sets can grow (O(1))</a></span></li><li><span><a href=\"#3.3.-[0]-Sets-can-not-contain-dupplicate-objects\" data-toc-modified-id=\"3.3.-[0]-Sets-can-not-contain-dupplicate-objects-1.3.2\"><span class=\"toc-item-num\">1.3.2&nbsp;&nbsp;</span>3.3. [0] Sets can not contain dupplicate objects</a></span></li><li><span><a href=\"#3.4.-[1]-Sets-can-not-contain-mutable-objects\" data-toc-modified-id=\"3.4.-[1]-Sets-can-not-contain-mutable-objects-1.3.3\"><span class=\"toc-item-num\">1.3.3&nbsp;&nbsp;</span>3.4. [1] Sets can not contain mutable objects</a></span></li><li><span><a href=\"#3.5-[0]-Intersection-of-sets-(O(min(len(s),-len(t)))\" data-toc-modified-id=\"3.5-[0]-Intersection-of-sets-(O(min(len(s),-len(t)))-1.3.4\"><span class=\"toc-item-num\">1.3.4&nbsp;&nbsp;</span>3.5 [0] Intersection of sets (O(min(len(s), len(t)))</a></span></li><li><span><a href=\"#3.6-[0]-Union-of-sets-(O(len(s)+len(t)))\" data-toc-modified-id=\"3.6-[0]-Union-of-sets-(O(len(s)+len(t)))-1.3.5\"><span class=\"toc-item-num\">1.3.5&nbsp;&nbsp;</span>3.6 [0] Union of sets (O(len(s)+len(t)))</a></span></li><li><span><a href=\"#3.7.-[0]-Sets-are-MUCH-more-efficient-for-searching-by-content-than-lists\" data-toc-modified-id=\"3.7.-[0]-Sets-are-MUCH-more-efficient-for-searching-by-content-than-lists-1.3.6\"><span class=\"toc-item-num\">1.3.6&nbsp;&nbsp;</span>3.7. [0] Sets are MUCH more <a href=\"https://wiki.python.org/moin/TimeComplexity\" target=\"_blank\">efficient for searching by content</a> than lists</a></span></li></ul></li><li><span><a href=\"#4-[0]-Dictionaries\" data-toc-modified-id=\"4-[0]-Dictionaries-1.4\"><span class=\"toc-item-num\">1.4&nbsp;&nbsp;</span>4 [0] <a href=\"https://docs.python.org/3.7/library/stdtypes.html#dict\" target=\"_blank\">Dictionaries</a></a></span><ul class=\"toc-item\"><li><span><a href=\"#4.1-[0]-Static-definition-of-a-dictionary\" data-toc-modified-id=\"4.1-[0]-Static-definition-of-a-dictionary-1.4.1\"><span class=\"toc-item-num\">1.4.1&nbsp;&nbsp;</span>4.1 [0] Static definition of a dictionary</a></span></li><li><span><a href=\"#4.2-[0]-Indexing-of-a-dictionary-by-a-key-(O(1))\" data-toc-modified-id=\"4.2-[0]-Indexing-of-a-dictionary-by-a-key-(O(1))-1.4.2\"><span class=\"toc-item-num\">1.4.2&nbsp;&nbsp;</span>4.2 [0] Indexing of a dictionary by a key (O(1))</a></span></li><li><span><a href=\"#4.3-[0]-Testing-if-a-key-is-the-dictionary-(O(1))\" data-toc-modified-id=\"4.3-[0]-Testing-if-a-key-is-the-dictionary-(O(1))-1.4.3\"><span class=\"toc-item-num\">1.4.3&nbsp;&nbsp;</span>4.3 [0] Testing if a key is the dictionary (O(1))</a></span></li><li><span><a href=\"#4.4-[1]-Getting-the-keys-(O(n))\" data-toc-modified-id=\"4.4-[1]-Getting-the-keys-(O(n))-1.4.4\"><span class=\"toc-item-num\">1.4.4&nbsp;&nbsp;</span>4.4 [1] Getting the keys (O(n))</a></span></li><li><span><a href=\"#4.5-[1]-Getting-the-values-(O(n))\" data-toc-modified-id=\"4.5-[1]-Getting-the-values-(O(n))-1.4.5\"><span class=\"toc-item-num\">1.4.5&nbsp;&nbsp;</span>4.5 [1] Getting the values (O(n))</a></span></li><li><span><a href=\"#4.4-[1]-Determining-the-position-of-a-key-in-a-dictionary-(O(n))\" data-toc-modified-id=\"4.4-[1]-Determining-the-position-of-a-key-in-a-dictionary-(O(n))-1.4.6\"><span class=\"toc-item-num\">1.4.6&nbsp;&nbsp;</span>4.4 [1] Determining the position of a key in a dictionary (O(n))</a></span></li><li><span><a href=\"#4.6-[0]-Inserting-a-new-entry-(O(1))\" data-toc-modified-id=\"4.6-[0]-Inserting-a-new-entry-(O(1))-1.4.7\"><span class=\"toc-item-num\">1.4.7&nbsp;&nbsp;</span>4.6 [0] Inserting a new entry (O(1))</a></span></li><li><span><a href=\"#[0]-4.7-Deleting-an-entry-(O(1))\" data-toc-modified-id=\"[0]-4.7-Deleting-an-entry-(O(1))-1.4.8\"><span class=\"toc-item-num\">1.4.8&nbsp;&nbsp;</span>[0] 4.7 Deleting an entry (O(1))</a></span></li><li><span><a href=\"#4.8-[1]-Dictionaries-are-mutable\" data-toc-modified-id=\"4.8-[1]-Dictionaries-are-mutable-1.4.9\"><span class=\"toc-item-num\">1.4.9&nbsp;&nbsp;</span>4.8 [1] Dictionaries are mutable</a></span></li><li><span><a href=\"#4.9-[0]-Looping-a-dictionary-(O(n))\" data-toc-modified-id=\"4.9-[0]-Looping-a-dictionary-(O(n))-1.4.10\"><span class=\"toc-item-num\">1.4.10&nbsp;&nbsp;</span>4.9 [0] Looping a dictionary (O(n))</a></span></li></ul></li><li><span><a href=\"#5.-Bytes\" data-toc-modified-id=\"5.-Bytes-1.5\"><span class=\"toc-item-num\">1.5&nbsp;&nbsp;</span>5. <a href=\"http://python-para-impacientes.blogspot.com.es/2014/07/tipos-de-cadenas-unicode-byte-y.html\" target=\"_blank\">Bytes</a></a></span><ul class=\"toc-item\"><li><span><a href=\"#5.1.-Creation-of-bytes-sequence\" data-toc-modified-id=\"5.1.-Creation-of-bytes-sequence-1.5.1\"><span class=\"toc-item-num\">1.5.1&nbsp;&nbsp;</span>5.1. Creation of bytes sequence</a></span></li><li><span><a href=\"#5.2.-Indexing-in-a-bytes-sequence\" data-toc-modified-id=\"5.2.-Indexing-in-a-bytes-sequence-1.5.2\"><span class=\"toc-item-num\">1.5.2&nbsp;&nbsp;</span>5.2. Indexing in a bytes sequence</a></span></li><li><span><a href=\"#5.3.-Concatenation-of-bytes-sequences\" data-toc-modified-id=\"5.3.-Concatenation-of-bytes-sequences-1.5.3\"><span class=\"toc-item-num\">1.5.3&nbsp;&nbsp;</span>5.3. Concatenation of bytes sequences</a></span></li><li><span><a href=\"#5.4.-Bytes-are-inmutable\" data-toc-modified-id=\"5.4.-Bytes-are-inmutable-1.5.4\"><span class=\"toc-item-num\">1.5.4&nbsp;&nbsp;</span>5.4. Bytes are inmutable</a></span></li></ul></li><li><span><a href=\"#6.-Bytearray\" data-toc-modified-id=\"6.-Bytearray-1.6\"><span class=\"toc-item-num\">1.6&nbsp;&nbsp;</span>6. <a href=\"http://ze.phyr.us/bytearray/\" target=\"_blank\">Bytearray</a></a></span></li><li><span><a href=\"#7.-Arrays\" data-toc-modified-id=\"7.-Arrays-1.7\"><span class=\"toc-item-num\">1.7&nbsp;&nbsp;</span>7. <a href=\"https://docs.python.org/3/library/array.html\" target=\"_blank\">Arrays</a></a></span><ul class=\"toc-item\"><li><span><a href=\"#Element-access\" data-toc-modified-id=\"Element-access-1.7.1\"><span class=\"toc-item-num\">1.7.1&nbsp;&nbsp;</span>Element access</a></span></li><li><span><a href=\"#Slice-access\" data-toc-modified-id=\"Slice-access-1.7.2\"><span class=\"toc-item-num\">1.7.2&nbsp;&nbsp;</span>Slice access</a></span></li><li><span><a href=\"#Appending-elements\" data-toc-modified-id=\"Appending-elements-1.7.3\"><span class=\"toc-item-num\">1.7.3&nbsp;&nbsp;</span>Appending elements</a></span></li><li><span><a href=\"#Concatenating-arrays\" data-toc-modified-id=\"Concatenating-arrays-1.7.4\"><span class=\"toc-item-num\">1.7.4&nbsp;&nbsp;</span>Concatenating arrays</a></span></li><li><span><a href=\"#Deleting-elements\" data-toc-modified-id=\"Deleting-elements-1.7.5\"><span class=\"toc-item-num\">1.7.5&nbsp;&nbsp;</span>Deleting elements</a></span></li></ul></li></ul></li></ul></div>\n\nContainers\n\nTuples.\nLists.\nSets.\nDictionaries.\nBytes.\nBytearrays.\nArrays.\n\n1. Tuples\nA tuples groups multiple (possiblely with different types) objects. They can be compared to (static) structures of other languages.\n1.1 Tuples are (as the rest of elements of Python) objects", "print(type(()))\n\nhelp(())", "1.2. Tuple definition\nAnd some timing ... :-)", "!python -m timeit \"x = (1, 'a', 'b', 'a')\"\n\na = (1, 'a', 'b', 'a')", "1.3. Counting ocurrences in tuples", "a.count('a')", "1.4. Searching for an item in a tuple", "a.index('b')", "1.5. Slicing in tuples", "a\n\na[2] # The 3-rd item\n\na[2:1] # Extract the tuple from the 2-nd item to the 1-st one\n\na[2:2] # Extract from the 2-nd item to the 2-nd item\n\na[2:3] # Extract from the 2-nd item to the 3-rd one\n\na[2:4] # Extract one item more\n\na[1:] # Extract from the 1-st to the end\n\na[:] # Extract all items (a==a[:])", "1.6. Functions can return tuples", "def return_tuple():\n return (1, 'a', 2)\nprint(return_tuple())", "1.7. Swapping pairs with tuples is fun!", "a = 1; b = 2\nprint(a, b)\n(a, b) = (b, a)\nprint(a, b)", "1.8. Tuples are inmutable\nThey can not grow:", "a = (1, 'a')\nprint(id(a),a)\n\na += (2,) # This creates a new instance of 'a'\nprint(id(a),a)", "... or be changed:", "a[1] = 2", "Tuples are inmutable!", "a = 1; b = 2\nprint('\"a\" is in', id(a))\nt = (a, b)\nprint('\"t\" is in', id(t), 'and contains', t)\na = 3\nprint('\"a\" is in', id(a))\nprint('\"t\" is in', id(t), 'and contains', t)", "2. [0] Lists\nA list is a data (usually dynamic) structure that holds a collection of objects, which can have different types. Internally, a list is represented as an array, therefore, lists are fast for appending and for set/pop/getting an item, and slow for removing an internal item.", "help([])", "2.1 [1] (Of course) lists are objects", "print(type([]));\n\n!python -m timeit \"x = [1, 'a', 'b', 'a']\" # List creation is more expensive than tuple creation", "(Tuples are about three times faster)", "a = [] # Empty list definition", "2.2 [0] Appending items to a list (O(1))", "a.append('Hello')\na.append('world!')\na", "Python lists can be \"promiscuous\":", "a.append(100)\na", "2.3 [0] Inserting items (O(n))", "a.insert(1,'wave!')\na", "2.4 [0] Deleting items from a list by content (O(n)))", "a.remove('Hello')\na\n\nb = ['a', 'b', 'a']; b.remove('a'); print(b)", "2.4 [0] Deleting items from the begin of the list (O(1)))", "a.pop(0)", "2.4 [0] Deleting items from the end of the list (O(1)))", "print(a)\na.pop(len(a)-1) # By index, equivalent to \"del a[0]\"\nprint(a)", "2.5 [0] Sorting the elements of a list (O(n log n))", "a = []\na.append('c')\na.append('b')\na.append('a')\na\n\na.sort()\na\n\na.reverse()\na\n\n# Indexing\nprint(a[1]) # Second element\nprint(a[-1]) # Last element\nprint(a[100]) # Error\n\na+a\n\na*3\n\na[::-1]\n\nx = [1,2,3,2,1] # Palindrome?\nx[::-1] == x", "2.6 [1] Erasing all list items (O(1))", "a.clear()\na", "2.7 [0] List slicing (O(s))", "a.append('Hello')\nprint(a, a[0])\n\na.append(1)\na.append(('a',2))\na.append('world!')\na\n\nprint(a[1:1], a[1:2], a[1:3], a[1:], a[:], a[1:3:2])", "2.8 [1] Defining lists with list comprehensions:", "[x**2 for x in range(10)]\n\n# http://stackoverflow.com/questions/31045518/finding-prime-numbers-using-list-comprehention\n[x for x in range(2, 2000) if all(x % y != 0 for y in range(2, int(x ** 0.5) + 1))]\n\nl = [[x+y for x in range(10)] for y in range(10)]\nl\n\nl[1][2]\n\n10 in l\n\n10 in l[1]\n\n# Lists of lists\nx = [[1,2],[2,3,4],['a']]\nx", "2.9 [1] Lists are mutable objects\nLists can be modified \"in-place\".", "l = [2,3]\n\nid(l)\n\nl[1] = 4\nl\n\nid(l)", "3. [0] Sets\nSets are implemented as hash table of (unordered) objects, therefore sets are good for get/set/delete/searching items and bad for . Sets do not support indexing, slicing, or other sequence-like behavior.", "a = {1, 2, 'a', (1, 2)}\na\n\nprint(type(a))\n\nhelp(a)", "3.2. [0] Sets can grow (O(1))", "a.add('a')\nprint(a)", "3.3. [0] Sets can not contain dupplicate objects", "a.add('a')\nprint(a)", "3.4. [1] Sets can not contain mutable objects\nMutable objects can not be hashed :-(", "a = set()\na.add([1,2]) # Sets can not contain lists\n\na = set() # Empty set\na.add({1,2,3}) # Sets can not contain sets", "3.5 [0] Intersection of sets (O(min(len(s), len(t)))", "a = {1,2,3}\nb = {2,3,4}\na.intersection(b)", "3.6 [0] Union of sets (O(len(s)+len(t)))", "a.union(b)", "3.7. [0] Sets are MUCH more efficient for searching by content than lists", "a = set(range(1000))\nprint(a)\n\n%timeit '0' in a\n\na = list(range(1000))\nprint(a)\n\n%timeit '0' in a", "4 [0] Dictionaries\nDictionaries are sets where each element (a key) has associated an object (a value). In fact, sets can be seen as dictionaries where the elments have not associations. As sets, dictionaries are efficient for indexing by keys.", "help({})", "4.1 [0] Static definition of a dictionary", "a = {'Macinstosh':'OSX', 'PC':'Windows', 'Macintosh-Linux':'Linux', 'PC-Linux':'Linux'}\na", "4.2 [0] Indexing of a dictionary by a key (O(1))", "a['PC']", "4.3 [0] Testing if a key is the dictionary (O(1))", "'PC-Linux' in a", "4.4 [1] Getting the keys (O(n))", "a.keys()", "4.5 [1] Getting the values (O(n))", "a.values()", "4.4 [1] Determining the position of a key in a dictionary (O(n))", "list(a.keys()).index(\"Macintosh-Linux\")\n\nfor i in a.keys():\n print(i)", "4.6 [0] Inserting a new entry (O(1))", "a['Celullar'] = \"Android\"\na", "[0] 4.7 Deleting an entry (O(1))", "del a['Celullar']\na\n\n# Modifiying an entry\na.update({\"PC\": \"Windows 10\"})\na", "4.8 [1] Dictionaries are mutable", "id(a)\n\na['Macintosh-Linux'] = 'Linux for the Mac'\na\n\nid(a)", "4.9 [0] Looping a dictionary (O(n))", "for i in a:\n print(i, a[i])\n\nfor i in a.values():\n print(i)\n\nfor i in a.items():\n print(i)", "5. Bytes\nA raw bytes sequence type.\n5.1. Creation of bytes sequence", "a = b'hello'\nprint(type(a))\n\nprint(a)", "5.2. Indexing in a bytes sequence", "chr(a[1])", "5.3. Concatenation of bytes sequences", "b = b'world!'\nprint('\"b\" is in', id(b))\nc = a + b' ' + b\nprint(c)", "5.4. Bytes are inmutable", "a = b'abc'\nprint(id(a))\na += b'efg'\nprint(id(a))", "6. Bytearray\nBytearray is a mutable implementation of an array of bytes. Therefore, appending data to a bytearray object is much faster than to a bytes object because in this last case, every append implies to create (and destroy the previous) new bytes object.", "%%timeit x = b''\nx += b'x'\n\n%%timeit x = bytearray()\nx.extend(b'x')\n\n# Array of bytes = 0\nx = bytearray(10)\n\nx\n\nlen(x)\n\nfor i in range(len(x)):\n x[i] += 1\n\nx\n\n# A byte in Python is a 0 <= value <= 255.\nx[1] = -1\n\n# A bytearray can be created from a list\nx = bytearray([1,2,3])\nx\n\nimport sys\nx = bytearray(sys.stdin.read(5).encode())\nx", "7. Arrays", "import array as arr\na = arr.array('d', [1.1, 3.5, 4.5])\nprint(a)", "Element access", "a[1]\n\na[1] = 4.0\na", "Slice access", "a[1:2]", "Appending elements", "a.append(5.0)\na", "Concatenating arrays", "a.extend([5.0, 6.0, 7.0])\na\n\na += arr.array('d', [1.1, 4.0, 4.5])\na", "Deleting elements", "# By value\na.remove(4.0)\na\n\n# By index\na.pop(3)\na" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ethen8181/machine-learning
text_classification/chisquare.ipynb
mit
[ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Chi-Square-Feature-Selection\" data-toc-modified-id=\"Chi-Square-Feature-Selection-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Chi-Square Feature Selection</a></span><ul class=\"toc-item\"><li><span><a href=\"#Implementation\" data-toc-modified-id=\"Implementation-1.1\"><span class=\"toc-item-num\">1.1&nbsp;&nbsp;</span>Implementation</a></span></li></ul></li><li><span><a href=\"#Reference\" data-toc-modified-id=\"Reference-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>Reference</a></span></li></ul></div>", "# code for loading the format for the notebook\nimport os\n\n# path : store the current path to convert back to it later\npath = os.getcwd()\nos.chdir(os.path.join('..', 'notebook_format'))\n\nfrom formats import load_style\nload_style(plot_style = False)\n\nos.chdir(path)\nimport numpy as np\nimport pandas as pd\n\n# 1. magic to print version\n# 2. magic so that the notebook will reload external python modules\n%load_ext watermark\n%load_ext autoreload \n%autoreload 2\n\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.feature_selection import chi2, SelectKBest\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n%watermark -a 'Ethen' -d -t -v -p numpy,pandas,sklearn", "Chi-Square Feature Selection\nFeature selection is a process where you automatically select those features in your data that contribute most to the prediction variable or output in which you are interested. The benefits of performing feature selection before modeling your data are:\n\nAvoid Overfitting: Less redundant data gives performance boost to the model and results in less opportunity to make decisions based on noise\nReduces Training Time: Less data means that algorithms train faster", "# suppose we have the following toy text data\nX = np.array(['call you tonight', 'Call me a cab', 'please call me... PLEASE!', 'he will call me'])\ny = [1, 1, 2, 0]\n\n# we'll convert it to a dense document-term matrix,\n# so we can print a more readable output\nvect = CountVectorizer()\nX_dtm = vect.fit_transform(X)\nX_dtm = X_dtm.toarray()\npd.DataFrame(X_dtm, columns = vect.get_feature_names())", "One common feature selection method that is used with text data is the Chi-Square feature selection. The $\\chi^2$ test is used in statistics to test the independence of two events. More specifically in feature selection we use it to test whether the occurrence of a specific term and the occurrence of a specific class are independent. More formally, given a document $D$, we estimate the following quantity for each term and rank them by their score:\n$$\n\\chi^2(D, t, c) = \\sum_{e_t \\in {0, 1}} \\sum_{e_c \\in {0, 1}} \n\\frac{ ( N_{e_te_c} - E_{e_te_c} )^2 }{ E_{e_te_c} }$$\nWhere\n\n$N$ is the observed frequency in and $E$ the expected frequency\n$e_t$ takes the value 1 if the document contains term $t$ and 0 otherwise\n$e_c$ takes the value 1 if the document is in class $c$ and 0 otherwise\n\nFor each feature (term), a corresponding high $\\chi^2$ score indicates that the null hypothesis $H_0$ of independence (meaning the document class has no influence over the term's frequency) should be rejected and the occurrence of the term and class are dependent. In this case, we should select the feature for the text classification.\nImplementation\nWe first compute the observed count for each class. This is done by building a contingency table from an input $X$ (feature values) and $y$ (class labels). Each entry $i$, $j$ corresponds to some feature $i$ and some class $j$, and holds the sum of the $i_{th}$ feature's values across all samples belonging to the class $j$.\nNote that although the feature values here are represented as frequencies, this method also works quite well in practice when the values are tf-idf values, since those are just weighted/scaled frequencies.", "# binarize the output column,\n# this makes computing the observed value a \n# simple dot product\ny_binarized = LabelBinarizer().fit_transform(y)\nprint(y_binarized)\nprint()\n\n# our observed count for each class (the row)\n# and each feature (the column)\nobserved = np.dot(y_binarized.T, X_dtm)\nprint(observed)", "e.g. the second row of the observed array refers to the total count of the terms that belongs to class 1. Then we compute the expected frequencies of each term for each class.", "# compute the probability of each class and the feature count; \n# keep both as a 2 dimension array using reshape\nclass_prob = y_binarized.mean(axis = 0).reshape(1, -1)\nfeature_count = X_dtm.sum(axis = 0).reshape(1, -1)\nexpected = np.dot(class_prob.T, feature_count)\nprint(expected)\n\nchisq = (observed - expected) ** 2 / expected\nchisq_score = chisq.sum(axis = 0)\nprint(chisq_score)", "We can confirm our result with the scikit-learn library using the chi2 function. The following code chunk computes chi-square value for each feature. For the returned tuple, the first element is the chi-square scores, the scores are better if greater. The second element is the p-values, they are better if smaller.", "chi2score = chi2(X_dtm, y)\nchi2score", "Scikit-learn provides a SelectKBest class that can be used with a suite of different statistical tests. It will rank the features with the statistical test that we've specified and select the top k performing ones (meaning that these terms is considered to be more relevant to the task at hand than the others), where k is also a number that we can tweak.", "kbest = SelectKBest(score_func = chi2, k = 4)\nX_dtm_kbest = kbest.fit_transform(X_dtm, y)\nX_dtm_kbest", "For the Chi-Square feature selection we should expect that out of the total selected features, a small part of them are still independent from the class. In text classification, however, it rarely matters when a few additional terms are included the in the final feature set. All is good as long as the feature selection is ranking features with respect to their usefulness and is not used to make statements about statistical dependence or independence of variables.\nReference\n\nBlog: Feature Selection For Machine Learning in Python\nBlog: Using Feature Selection Methods in Text Classification\nStackoverflow: Perform Chi-2 feature selection on TF and TF*IDF vectors" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
kwinkunks/rainbow
notebooks/Guessing_colourmaps-NOCROSS.ipynb
apache-2.0
[ "Avoiding the cross", "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline", "Read an image", "cd ~/Dropbox/dev/rainbow/notebooks\n\nfrom PIL import Image\n\n# img = Image.open('data/cbar/boxer.png')\n# img = Image.open('data/cbar/fluid.png')\n# img = Image.open('data/cbar/lisa.png')\n# img = Image.open('data/cbar/redblu.png')\n# img = Image.open('data/cbar/seismic.png')\n# img = Image.open('data/cbar/drainage.jpg')\n#img = Image.open('data/cbar/test.png')\n\nimg = Image.open('data/cbar/Colormap_Jet1.png')\n\nimg\n\nimg.size", "Quantize with scikit", "n_colours = 100\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.utils import shuffle\n\nim = np.asarray(img)[..., :3] / 255.\n\nh, w, d = im.shape\nim_ = im.reshape((w * h, d))\n\n# Define training set.\nn = min(h*w//50, n_colours*10)\nsample = shuffle(im_, random_state=0)[:n]\n\nkmeans = KMeans(n_clusters=n_colours).fit(sample)\n\np = kmeans.cluster_centers_\n\n# I don't know why I need to do this, but I do. Floating point precision maybe.\np[p > 1] = 1\np[p < 0] = 0", "Colinearity adjustment\nApproaches to try:\nFor each point...\n\nHow colinear is every triangle this point makes with every other point? (Too many)\nHow colinear is this point with its 2 nearest neighbours?\nHow colinear are the three nearest neighbours of this point with each other? (PRobably can't disconnect a point from its colinearity coefficient like this)", "# ALL TRIPLES\n# from itertools import permutations\n# triples = np.array(list(permutations(p, 3)))\n# triples.shape\n\n# There are n(n - 1) values that \"belong\" to each point (have it first).\n# So maybe I can get a measure of the local linearity of a point\n\n# ... OK this is all too much, let's try something else...\n\nfrom sklearn.neighbors import BallTree\ntree = BallTree(p)\n\n# Get only the three nearest\n# _, idx = tree.query(p, 4)\n# idx = idx[:, 1:] # For measuring the relationship between the 3 neighbours (not including 'self')\n# triples = p[idx]\n\n# Get 2 nearest neighbours and include 'self'\n_, idx = tree.query(p, 3)\ntriples = p[idx]\n\ncolin = 1 - np.power(np.abs(np.linalg.det(triples)), 0.25) # 1 = colinear, 0 = not at all\n# Will need to scale this I think\n\nnp.max(colin)\n\nplt.hist(colin)\nplt.plot()\nplt.title('{} points'.format(colin.size))\nplt.show()\n\n# Now we also need a direction metric. Best thing is probably\n# the spherical angle, which has two parameters: alpha and gamma.\n\ndef colinear(pts):\n \"\"\"Area of a triangle, given an array of three 3d points.\n \"\"\"\n print(pts)\n p1, p2, p3 = np.array(pts)\n co = 1 - 0.5 * np.linalg.norm(np.cross(p2 - p1, p3 - p1))\n return co\n\nnp.linalg.norm(np.cross(np.array([1,2,3])-np.array([6,3,9]), np.array([-1,4,3])-np.array([8,2,9])))\n\nx, y, z = triples[-1]\nx\n\nnp.apply_along_axis(colinear, 1, triples)\n\ndef orient_line(p):\n \"\"\"Orientation of a line, given an array of two 3d points.\n \"\"\"\n p1, p2 = np.array(p)\n line = np.abs(p1 - p2)\n unit = line / np.linalg.norm(line)\n\n x, y, z = unit\n pitch = np.arcsin(-y)\n yaw = np.arctan2(x, z)\n \n return np.array([pitch, yaw])\n\ndef orient_tri(points):\n \"\"\"Orientation of a triangle, given an array of three 3d points.\n Mean of sides or orientation of longest edge? Or mean of two longest...?\n \"\"\"\n this = []\n pairs = list(permutations(points, 2))\n for pair in [0, 1, -1]:\n this.append(orientation(pairs[pair]))\n return np.mean(this, axis=0)\n\n\nidx[:5]\n\nr, c = idx.shape\nresult = np.zeros((r, 2))\nfor i, ix in enumerate(idx):\n triple = p[ix]\n this_point = 0.5 * colinear * orient_tri(t)\n result[i] += this_point\n result[ix[1]] += 0.5 * this_point\n result[ix[2]] += 0.5 * this_point\nresult\n\ntriples[3]\n\nnp.mean(a, axis=0)\n\nfrom itertools import permutations\npoints = []\nfor i in triples:\n this = []\n pairs = list(permutations(i, 2))\n for pair in [0, 1, -1]:\n this.append(orientation(pairs[pair]))\n points.append(this)\npoints = np.array(points)\n\npoints\n\n# need a function that takes 3 points and returns the orientation and area of the triangle\n\nnp.arccos(-11/3)\n\na = np.array([[11,12,13], [21, 22, 23], [31, 32,33], [41, 42, 43], [51,52,53], [61,62,63]])\nt = np.array(list(permutations(a, 3)))", "Travelling salesman problem\nRemember that these points are essentially in random order:", "from mpl_toolkits.mplot3d import Axes3D\n\n# Set up the figure\nfig = plt.figure(figsize=(8, 8))\n\n# Result of TSP solver\nax = fig.add_subplot(111, projection='3d')\nax.scatter(*p.T, c=p, lw=0, s=40, alpha=1)\nax.plot(*p.T, color='k', alpha=0.4)\nax.set_title('Codebook')\n\nplt.show()\n\nfrom pytsp import run, dumps_matrix\n\np = np.vstack([[[0.25, 0, 0.5]], p])\n#p = np.vstack([[[0, 0, 0]], p])\n\np[:6]\n\nfrom scipy.spatial.distance import pdist, squareform\n\n# Make distance matrix.\ndists = squareform(pdist(p, 'euclidean'))\n\n# The values in `dists` are floats in the range 0 to sqrt(3). \n# Normalize the values to int16s.\nd = 32767 * dists / np.sqrt(3)\nd = d.astype(np.int16)\n\n# To use a TSP algo to solve the shortest Hamiltonian path problem,\n# we need to add a point that is zero units from every other point.\nrow, col = dists.shape\nd = np.insert(d, row, 0, axis=0)\nd = np.insert(d, col, 0, axis=1)", "The zero-point trick is legit. Reference from E. L. Lawler, Jan Karel Lenstra, A. H. G. Rinnooy Kan, D. B. Shmoys (1985). The Traveling Salesman Problem: A Guided Tour of Combinatorial Optimization, 1st Edition. Wiley. 476 pp. ISBN 978-0471904137.", "d", "LKH implementation.\nK. Helsgaun (2009). General k-opt submoves for the Lin-Kernighan TSP heuristic. Mathematical Programming Computation, 2009, doi: 10.1007/s12532-009-0004-6.", "outf = \"/tmp/myroute_lkh.tsp\"\nwith open(outf, 'w') as f:\n f.write(dumps_matrix(d, name=\"My Route\"))\n\ntour_lkh = run(outf, start=0, solver=\"LKH\")\n\n#result = np.array(tour_concorde['tour'])\nresult = np.array(tour_lkh['tour'])\n\nresult\n\nresult.size # Should be n_colours + 2\n\n# e = np.asscalar(np.where(result == result.size-1)[0])\n\n# if e == 1:\n# # Then it's second and I think I know why.\n# # As long as it went to the last point next, and I think\n# # it necessarily does, then we're good.\n# print(\"Zero-point is second. Probably dealt with it.\")\n# result = np.concatenate([result[:e], result[e+1::][::-1]])\n# elif e == len(result)-1:\n# # Then it's at the end already.\n# print(\"Zero-point is at the end. Dealt with it.\")\n# result = result[:-1]\n# else:\n# # I'm not sure why this would happen... but I Think in this\n# # case we can just skip it.\n# print(\"Zero-point is somewhere weird. Maybe dealt with... BE CAREFUL.\")\n# result = result[result != result.size-1]\n \n# assert len(result) == len(p)", "Now result is the indices of points for the shortest path, shape (256,). And p is our quantized colormap, shape (256, 3). So we can select the points easily for an ordered colourmap.\nThe offsets are to account for the fact that we added a dark-blue point at the start and a zero point at the end.", "c = p[result[1:-1]]", "Ideally I'd like all the distances too, but it wouldn't be too hard to compute these.\nNow let's look at it all.", "from mpl_toolkits.mplot3d import Axes3D\n# Set up the figure\nfig = plt.figure(figsize=(8, 8))\n\n# Result of TSP solver\nax = fig.add_subplot(111, projection='3d')\nax.scatter(*c.T, c=c, lw=0, s=40, alpha=1)\nax.plot(*c.T, color='k', alpha=0.4)\nax.set_title('TSP solver')\n\nplt.show()", "Check below an interactive version of the 3D plot. May help when there are complicated paths between points. You need to install plotly and colorlover (with pip) if you don't already have them.", "import plotly.graph_objs as go\nimport colorlover as cl\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\ninit_notebook_mode(connected=True)\n\ncb = cl.to_rgb(tuple(map(tuple, c*255)))\ntrace = go.Scatter3d(\n name='TSP Sover',\n x = c[:,0], y = c[:,1], z = c[:,2],\n marker = dict(\n size=4.,\n color=cb\n ),\n line=dict(\n color='#000',\n width=1,\n ),\n )\ndata = [trace]\n\n# Set the different layout properties of the figure:\nlayout = go.Layout(\n autosize=False,\n width=600,\n height=600,\n margin = dict(\n t=0,b=0,l=0,r=0\n ),\n scene = go.Scene(\n xaxis=dict(\n title='red',\n gridcolor='rgb(255, 255, 255)',\n zerolinecolor='rgb(255, 0, 0)',\n showbackground=True,\n backgroundcolor='rgb(230, 230,230)'\n ),\n yaxis=dict(\n title='green',\n gridcolor='rgb(255, 255, 255)',\n zerolinecolor='rgb(0, 255, 0)',\n showbackground=True,\n backgroundcolor='rgb(230, 230,230)'\n ),\n zaxis=dict(\n title='blue',\n gridcolor='rgb(255, 255, 255)',\n zerolinecolor='rgb(0, 0, 255)',\n showbackground=True,\n backgroundcolor='rgb(230, 230,230)'\n ),\n aspectmode='cube',\n camera=dict( \n eye=dict(\n x=1.7,\n y=-1.7,\n z=1,\n )\n ),\n )\n)\n\nfig = go.Figure(data=data, layout=layout)\niplot(fig, show_link=False)\n\nnp.save('/Users/matt/Dropbox/public/raw_data.npy', p[1:])\nnp.save('/Users/matt/Dropbox/public/ordered_data.npy', c)\n\nfrom scipy.spatial import cKDTree\n\nkdtree = cKDTree(c)\n\ndx, ix = kdtree.query(im)\n\nplt.imshow(ix, cmap='gray')\nplt.colorbar()\nplt.show()\n\nplt.imshow(dx, cmap='gray')\nplt.colorbar()\nplt.show()\n\nfig = plt.figure(figsize=(18, 5))\n\nax0 = fig.add_subplot(131)\nplt.imshow(im, interpolation='none')\nax0.set_title(\"Starting image\")\n\nax1 = fig.add_subplot(132, projection='3d')\nax1.scatter(*c.T, c=c, lw=0, s=40, alpha=1)\nax1.plot(*c.T, color='k', alpha=0.5)\nax1.text(*c[0], ' start')\nax1.text(*c[-1], ' end')\nax1.set_title(\"Recovered cmap locus\")\n\nax2 = fig.add_subplot(133)\nplt.imshow(ix, cmap='viridis', interpolation='none')\nplt.colorbar(shrink=0.75)\nax2.set_title(\"Recovered data with known cmap\")\n\nplt.show()\n\ncmaps = [('Perceptually Uniform Sequential',\n ['viridis', 'inferno', 'plasma', 'magma']),\n ('Sequential', ['Blues', 'BuGn', 'BuPu',\n 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd',\n 'PuBu', 'PuBuGn', 'PuRd', 'Purples', 'RdPu',\n 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']),\n ('Sequential (2)', ['afmhot', 'autumn', 'bone', 'cool',\n 'copper', 'gist_heat', 'gray', 'hot',\n 'pink', 'spring', 'summer', 'winter']),\n ('Diverging', ['BrBG', 'bwr', 'coolwarm', 'PiYG', 'PRGn', 'PuOr',\n 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',\n 'seismic']),\n ('Qualitative', ['Accent', 'Dark2', 'Paired', 'Pastel1',\n 'Pastel2', 'Set1', 'Set2', 'Set3']),\n ('Miscellaneous', ['gist_earth', 'terrain', 'ocean', 'gist_stern',\n 'brg', 'CMRmap', 'cubehelix',\n 'gnuplot', 'gnuplot2', 'gist_ncar',\n 'nipy_spectral', 'jet', 'rainbow',\n 'gist_rainbow', 'hsv', 'flag', 'prism'])]" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ES-DOC/esdoc-jupyterhub
notebooks/mohc/cmip6/models/hadgem3-gc31-lm/seaice.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Seaice\nMIP Era: CMIP6\nInstitute: MOHC\nSource ID: HADGEM3-GC31-LM\nTopic: Seaice\nSub-Topics: Dynamics, Thermodynamics, Radiative Processes. \nProperties: 80 (63 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:14\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'mohc', 'hadgem3-gc31-lm', 'seaice')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties --&gt; Model\n2. Key Properties --&gt; Variables\n3. Key Properties --&gt; Seawater Properties\n4. Key Properties --&gt; Resolution\n5. Key Properties --&gt; Tuning Applied\n6. Key Properties --&gt; Key Parameter Values\n7. Key Properties --&gt; Assumptions\n8. Key Properties --&gt; Conservation\n9. Grid --&gt; Discretisation --&gt; Horizontal\n10. Grid --&gt; Discretisation --&gt; Vertical\n11. Grid --&gt; Seaice Categories\n12. Grid --&gt; Snow On Seaice\n13. Dynamics\n14. Thermodynamics --&gt; Energy\n15. Thermodynamics --&gt; Mass\n16. Thermodynamics --&gt; Salt\n17. Thermodynamics --&gt; Salt --&gt; Mass Transport\n18. Thermodynamics --&gt; Salt --&gt; Thermodynamics\n19. Thermodynamics --&gt; Ice Thickness Distribution\n20. Thermodynamics --&gt; Ice Floe Size Distribution\n21. Thermodynamics --&gt; Melt Ponds\n22. Thermodynamics --&gt; Snow Processes\n23. Radiative Processes \n1. Key Properties --&gt; Model\nName of seaice model used.\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of sea ice model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.model.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.model.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Variables\nList of prognostic variable in the sea ice model.\n2.1. Prognostic\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of prognostic variables in the sea ice component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.variables.prognostic') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sea ice temperature\" \n# \"Sea ice concentration\" \n# \"Sea ice thickness\" \n# \"Sea ice volume per grid cell area\" \n# \"Sea ice u-velocity\" \n# \"Sea ice v-velocity\" \n# \"Sea ice enthalpy\" \n# \"Internal ice stress\" \n# \"Salinity\" \n# \"Snow temperature\" \n# \"Snow depth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Seawater Properties\nProperties of seawater relevant to sea ice\n3.1. Ocean Freezing Point\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEquation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"TEOS-10\" \n# \"Constant\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3.2. Ocean Freezing Point Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant seawater freezing point, specify this value.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Resolution\nResolution of the sea ice grid\n4.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Canonical Horizontal Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Number Of Horizontal Gridpoints\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Tuning Applied\nTuning applied to sea ice model component\n5.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Target\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.target') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.3. Simulations\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\n*Which simulations had tuning applied, e.g. all, not historical, only pi-control? *", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.4. Metrics Used\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList any observed metrics used in tuning model/parameters", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.5. Variables\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nWhich variables were changed during the tuning process?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Key Parameter Values\nValues of key parameters\n6.1. Typical Parameters\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nWhat values were specificed for the following parameters if used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ice strength (P*) in units of N m{-2}\" \n# \"Snow conductivity (ks) in units of W m{-1} K{-1} \" \n# \"Minimum thickness of ice created in leads (h0) in units of m\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "6.2. Additional Parameters\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7. Key Properties --&gt; Assumptions\nAssumptions made in the sea ice model\n7.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral overview description of any key assumptions made in this model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.description') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. On Diagnostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nNote any assumptions that specifically affect the CMIP6 diagnostic sea ice variables.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.3. Missing Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8. Key Properties --&gt; Conservation\nConservation in the sea ice component\n8.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nProvide a general description of conservation methodology.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Properties\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nProperties conserved in sea ice by the numerical schemes.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.properties') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Energy\" \n# \"Mass\" \n# \"Salt\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.3. Budget\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nFor each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.budget') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.4. Was Flux Correction Used\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes conservation involved flux correction?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "8.5. Corrected Conserved Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList any variables which are conserved by more than the numerical scheme alone.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Grid --&gt; Discretisation --&gt; Horizontal\nSea ice discretisation in the horizontal\n9.1. Grid\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGrid on which sea ice is horizontal discretised?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ocean grid\" \n# \"Atmosphere Grid\" \n# \"Own Grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.2. Grid Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the type of sea ice grid?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Structured grid\" \n# \"Unstructured grid\" \n# \"Adaptive grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.3. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the advection scheme?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Finite differences\" \n# \"Finite elements\" \n# \"Finite volumes\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.4. Thermodynamics Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the time step in the sea ice model thermodynamic component in seconds.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "9.5. Dynamics Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the time step in the sea ice model dynamic component in seconds.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "9.6. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional horizontal discretisation details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Grid --&gt; Discretisation --&gt; Vertical\nSea ice vertical properties\n10.1. Layering\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhat type of sea ice vertical layers are implemented for purposes of thermodynamic calculations?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Zero-layer\" \n# \"Two-layers\" \n# \"Multi-layers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.2. Number Of Layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using multi-layers specify how many.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "10.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional vertical grid details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11. Grid --&gt; Seaice Categories\nWhat method is used to represent sea ice categories ?\n11.1. Has Mulitple Categories\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSet to true if the sea ice model has multiple sea ice categories.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "11.2. Number Of Categories\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using sea ice categories specify how many.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.3. Category Limits\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using sea ice categories specify each of the category limits.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.4. Ice Thickness Distribution Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the sea ice thickness distribution scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.5. Other\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.other') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12. Grid --&gt; Snow On Seaice\nSnow on sea ice details\n12.1. Has Snow On Ice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs snow on ice represented in this model?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.2. Number Of Snow Levels\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of vertical levels of snow on ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "12.3. Snow Fraction\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how the snow fraction on sea ice is determined", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12.4. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional details related to snow on ice.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Dynamics\nSea Ice Dynamics\n13.1. Horizontal Transport\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of horizontal advection of sea ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.horizontal_transport') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Incremental Re-mapping\" \n# \"Prather\" \n# \"Eulerian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.2. Transport In Thickness Space\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of sea ice transport in thickness space (i.e. in thickness categories)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Incremental Re-mapping\" \n# \"Prather\" \n# \"Eulerian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.3. Ice Strength Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhich method of sea ice strength formulation is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Hibler 1979\" \n# \"Rothrock 1975\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.4. Redistribution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhich processes can redistribute sea ice (including thickness)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.redistribution') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Rafting\" \n# \"Ridging\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.5. Rheology\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRheology, what is the ice deformation formulation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.rheology') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Free-drift\" \n# \"Mohr-Coloumb\" \n# \"Visco-plastic\" \n# \"Elastic-visco-plastic\" \n# \"Elastic-anisotropic-plastic\" \n# \"Granular\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14. Thermodynamics --&gt; Energy\nProcesses related to energy in sea ice thermodynamics\n14.1. Enthalpy Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the energy formulation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pure ice latent heat (Semtner 0-layer)\" \n# \"Pure ice latent and sensible heat\" \n# \"Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)\" \n# \"Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.2. Thermal Conductivity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat type of thermal conductivity is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pure ice\" \n# \"Saline ice\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.3. Heat Diffusion\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of heat diffusion?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Conduction fluxes\" \n# \"Conduction and radiation heat fluxes\" \n# \"Conduction, radiation and latent heat transport\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.4. Basal Heat Flux\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod by which basal ocean heat flux is handled?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Heat Reservoir\" \n# \"Thermal Fixed Salinity\" \n# \"Thermal Varying Salinity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.5. Fixed Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14.6. Heat Content Of Precipitation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method by which the heat content of precipitation is handled.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.7. Precipitation Effects On Salinity\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15. Thermodynamics --&gt; Mass\nProcesses related to mass in sea ice thermodynamics\n15.1. New Ice Formation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method by which new sea ice is formed in open water.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.2. Ice Vertical Growth And Melt\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method that governs the vertical growth and melt of sea ice.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.3. Ice Lateral Melting\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of sea ice lateral melting?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Floe-size dependent (Bitz et al 2001)\" \n# \"Virtual thin ice melting (for single-category)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.4. Ice Surface Sublimation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method that governs sea ice surface sublimation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.5. Frazil Ice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method of frazil ice formation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16. Thermodynamics --&gt; Salt\nProcesses related to salt in sea ice thermodynamics.\n16.1. Has Multiple Sea Ice Salinities\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "16.2. Sea Ice Salinity Thermal Impacts\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes sea ice salinity impact the thermal properties of sea ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "17. Thermodynamics --&gt; Salt --&gt; Mass Transport\nMass transport of salt\n17.1. Salinity Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is salinity determined in the mass transport of salt calculation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Prescribed salinity profile\" \n# \"Prognostic salinity profile\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.2. Constant Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant salinity value specify this value in PSU?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "17.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the salinity profile used.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18. Thermodynamics --&gt; Salt --&gt; Thermodynamics\nSalt thermodynamics\n18.1. Salinity Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is salinity determined in the thermodynamic calculation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Prescribed salinity profile\" \n# \"Prognostic salinity profile\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "18.2. Constant Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant salinity value specify this value in PSU?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "18.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the salinity profile used.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19. Thermodynamics --&gt; Ice Thickness Distribution\nIce thickness distribution details.\n19.1. Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is the sea ice thickness distribution represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Virtual (enhancement of thermal conductivity, thin ice melting)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20. Thermodynamics --&gt; Ice Floe Size Distribution\nIce floe-size distribution details.\n20.1. Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is the sea ice floe-size represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Parameterised\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20.2. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nPlease provide further details on any parameterisation of floe-size.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "21. Thermodynamics --&gt; Melt Ponds\nCharacteristics of melt ponds.\n21.1. Are Included\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre melt ponds included in the sea ice model?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "21.2. Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat method of melt pond formulation is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Flocco and Feltham (2010)\" \n# \"Level-ice melt ponds\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "21.3. Impacts\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhat do melt ponds have an impact on?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Albedo\" \n# \"Freshwater\" \n# \"Heat\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22. Thermodynamics --&gt; Snow Processes\nThermodynamic processes in snow on sea ice\n22.1. Has Snow Aging\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSet to True if the sea ice model has a snow aging scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "22.2. Snow Aging Scheme\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the snow aging scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.3. Has Snow Ice Formation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSet to True if the sea ice model has snow ice formation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "22.4. Snow Ice Formation Scheme\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the snow ice formation scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.5. Redistribution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the impact of ridging on snow cover?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.6. Heat Diffusion\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the heat diffusion through snow methodology in sea ice thermodynamics?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Single-layered heat diffusion\" \n# \"Multi-layered heat diffusion\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23. Radiative Processes\nSea Ice Radiative Processes\n23.1. Surface Albedo\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod used to handle surface albedo.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.radiative_processes.surface_albedo') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Delta-Eddington\" \n# \"Parameterized\" \n# \"Multi-band albedo\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.2. Ice Radiation Transmission\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMethod by which solar radiation through sea ice is handled.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Delta-Eddington\" \n# \"Exponential attenuation\" \n# \"Ice radiation transmission per category\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
blackjax-devs/blackjax
examples/SGLD.ipynb
apache-2.0
[ "MNIST digit recognition with a 3-layer Perceptron\nThis example is inspired form this notebook in the SGMCMCJax repository. We try to use a 3-layer neural network to recognise the digits in the MNIST dataset.", "import jax\nimport jax.nn as nn\nimport jax.numpy as jnp\nimport jax.scipy.stats as stats\nimport numpy as np", "Data preparation\nWe download the MNIST data using tensorflow-datasets:", "import tensorflow_datasets as tfds\n\nmnist_data, _ = tfds.load(\n name=\"mnist\", batch_size=-1, with_info=True, as_supervised=True\n)\nmnist_data = tfds.as_numpy(mnist_data)\ndata_train, data_test = mnist_data[\"train\"], mnist_data[\"test\"]", "Now we need to apply several transformations to the dataset before splitting it into a test and a test set:\n- The images come into 28x28 pixels matrices; we reshape them into a vector;\n- The images are arrays of RGB codes between 0 and 255. We normalize them by the maximum value to get a range between 0 and 1;\n- We hot-encode category numbers.", "def one_hot_encode(x, k, dtype=np.float32):\n \"Create a one-hot encoding of x of size k.\"\n return np.array(x[:, None] == np.arange(k), dtype)\n\n\ndef prepare_data(dataset: tuple, num_categories=10):\n X, y = dataset\n y = one_hot_encode(y, num_categories)\n\n num_examples = X.shape[0]\n num_pixels = 28 * 28\n X = X.reshape(num_examples, num_pixels)\n X = X / 255.0\n\n return jnp.array(X), jnp.array(y), num_examples\n\n\ndef batch_data(rng_key, data, batch_size, data_size):\n \"\"\"Return an iterator over batches of data.\"\"\"\n while True:\n _, rng_key = jax.random.split(rng_key)\n idx = jax.random.choice(\n key=rng_key, a=jnp.arange(data_size), shape=(batch_size,)\n )\n minibatch = tuple(elem[idx] for elem in data)\n yield minibatch\n\n\nX_train, y_train, N_train = prepare_data(data_train)\nX_test, y_test, N_test = prepare_data(data_train)", "Model: 3-layer perceptron\nWe will use a very simple (bayesian) neural network in this example: A MLP with gaussian priors on the weights. We first need a function that computes the model's logposterior density given the data and the current values of the parameters. If we note $X$ the array that represents an image and $y$ the array such that $y_i = 0$ if the image is in category $i$, $y_i=1$ otherwise, the model can be written as:\n\\begin{align}\n \\boldsymbol{p} &= \\operatorname{NN}(X)\\\n \\boldsymbol{y} &\\sim \\operatorname{Categorical}(\\boldsymbol{p})\n\\end{align}", "def predict_fn(parameters, X):\n \"\"\"Returns the probability for the image represented by X\n to be in each category given the MLP's weights vakues.\n\n \"\"\"\n activations = X\n for W, b in parameters[:-1]:\n outputs = jnp.dot(W, activations) + b\n activations = nn.softmax(outputs)\n\n final_W, final_b = parameters[-1]\n logits = jnp.dot(final_W, activations) + final_b\n return nn.log_softmax(logits)\n\n\ndef logprior_fn(parameters):\n \"\"\"Compute the value of the log-prior density function.\"\"\"\n logprob = 0.0\n for W, b in parameters:\n logprob += jnp.sum(stats.norm.logpdf(W))\n logprob += jnp.sum(stats.norm.logpdf(b))\n return logprob\n\n\ndef loglikelihood_fn(parameters, data):\n \"\"\"Categorical log-likelihood\"\"\"\n X, y = data\n return jnp.sum(y * predict_fn(parameters, X))\n\n\ndef compute_accuracy(parameters, X, y):\n \"\"\"Compute the accuracy of the model.\n\n To make predictions we take the number that corresponds to the highest probability value.\n \"\"\"\n target_class = jnp.argmax(y, axis=1)\n predicted_class = jnp.argmax(\n jax.vmap(predict_fn, in_axes=(None, 0))(parameters, X), axis=1\n )\n return jnp.mean(predicted_class == target_class)", "Sample from the posterior distribution of the perceptron's weights\nNow we need to get initial values for the parameters, and we simply sample from their prior distribution:", "def init_parameters(rng_key, sizes):\n \"\"\"\n\n Parameter\n ----------\n rng_key\n PRNGKey used by JAX to generate pseudo-random numbers\n sizes\n List of size for the subsequent layers. The first size must correspond\n to the size of the input data and the last one to the number of\n categories.\n\n \"\"\"\n num_layers = len(sizes)\n keys = jax.random.split(rng_key, num_layers)\n return [\n init_layer(rng_key, m, n) for rng_key, m, n in zip(keys, sizes[:-1], sizes[1:])\n ]\n\n\ndef init_layer(rng_key, m, n, scale=1e-2):\n \"\"\"Initialize the weights for a single layer.\"\"\"\n key_W, key_b = jax.random.split(rng_key)\n return (scale * jax.random.normal(key_W, (n, m))), scale * jax.random.normal(\n key_b, (n,)\n )", "We now sample from the model's posteriors. We discard the first 1000 samples until the sampler has reached the typical set, and then take 2000 samples. We record the model's accuracy with the current values every 100 steps.", "%%time\n\nimport blackjax\nfrom blackjax.sgmcmc.gradients import grad_estimator\n\ndata_size = len(y_train)\nbatch_size = int(0.01 * data_size)\nlayer_sizes = [784, 100, 10]\nstep_size = 5e-5\nnum_warmup = 1000\nnum_samples = 2000\n\n# Batch the data\nrng_key = jax.random.PRNGKey(1)\nbatches = batch_data(rng_key, (X_train, y_train), batch_size, data_size)\n\n# Build the SGLD kernel\nschedule_fn = lambda _: step_size # constant step size\ngrad_fn = grad_estimator(logprior_fn, loglikelihood_fn, data_size)\nsgld = blackjax.sgld(grad_fn, schedule_fn)\n\n# Set the initial state\ninit_positions = init_parameters(rng_key, layer_sizes)\nstate = sgld.init(init_positions, next(batches))\n\n# Sample from the posterior\naccuracies = []\nsamples = []\nsteps = []\nfor step in range(num_samples + num_warmup):\n _, rng_key = jax.random.split(rng_key)\n batch = next(batches)\n state = sgld.step(rng_key, state, batch)\n if step % 100 == 0:\n accuracy = compute_accuracy(state.position, X_test, y_test)\n accuracies.append(accuracy)\n steps.append(step)\n if step > num_warmup:\n samples.append(state.position)", "Let us plot the accuracy at different points in the sampling process:", "import matplotlib.pylab as plt\n\nfig = plt.figure(figsize=(12, 8))\nax = fig.add_subplot(111)\nax.plot(steps, accuracies)\nax.set_xlabel(\"Number of sampling steps\")\nax.set_ylabel(\"Prediction accuracy\")\nax.set_xlim([0, num_warmup + num_samples])\nax.set_ylim([0, 1])\nax.set_yticks([0.1, 0.3, 0.5, 0.7, 0.9])\nplt.title(\"Sample from 3-layer MLP posterior (MNIST dataset) with SgLD\")\nplt.plot()\n\nprint(f\"The average accuracy in the sampling phase is {np.mean(accuracies[10:]):.2f}\")", "Which is not a bad accuracy at all for such a simple model and after only 1000 steps! Remember though that we draw samples from the posterior distribution of the digit probabilities; we can thus use this information to filter out examples for which the model is \"unsure\" of its prediction.\nHere we will say that the model is unsure of its prediction for a given image if the digit that is most often predicted for this image is predicted less tham 95% of the time.", "predicted_class = np.exp(\n np.stack([jax.vmap(predict_fn, in_axes=(None, 0))(s, X_test) for s in samples])\n)\n\nmax_predicted = [np.argmax(predicted_class[:, i, :], axis=1) for i in range(60000)]\nfreq_max_predicted = np.array(\n [\n (max_predicted[i] == np.argmax(np.bincount(max_predicted[i]))).sum() / 2000\n for i in range(60000)\n ]\n)\ncertain_mask = freq_max_predicted > 0.95", "Let's plot a few examples where the model was very uncertain:", "most_uncertain_idx = np.argsort(freq_max_predicted)\n\nfor i in range(10):\n print(np.bincount(max_predicted[most_uncertain_idx[i]]) / 2000)\n fig = plt.figure()\n plt.imshow(X_test[most_uncertain_idx[i]].reshape(28, 28), cmap=\"gray\")\n plt.show()", "And now compute the average accuracy over all the samples without these uncertain predictions:", "avg_accuracy = np.mean(\n [compute_accuracy(s, X_test[certain_mask], y_test[certain_mask]) for s in samples]\n)\n\nprint(\n f\"The average accuracy removing the samples for which the model is uncertain is {avg_accuracy:.3f}\"\n)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
QuantStack/quantstack-talks
pylondinium/notebooks/bqplot.ipynb
bsd-3-clause
[ "bqplot https://github.com/bloomberg/bqplot\nA Jupyter - d3.js bridge\nbqplot is a jupyter interactive widget library bringing d3.js visualization to the Jupyter notebook.\n\nApache Licensed\n\nbqplot implements the abstractions of Wilkinson’s “The Grammar of Graphics” as interactive Jupyter widgets.\nbqplot provides both\n- high-level plotting procedures with relevant defaults for common chart types,\n- lower-level descriptions of data visualizations meant for complex interactive visualization dashboards and applications involving mouse interactions and user-provided Python callbacks.\nInstallation:\nbash\nconda install -c conda-forge bqplot", "from __future__ import print_function\nfrom IPython.display import display\nfrom ipywidgets import *\nfrom traitlets import *\n\nimport numpy as np\nimport pandas as pd\nimport bqplot as bq\nimport datetime as dt\n\nnp.random.seed(0)\nsize = 100\ny_data = np.cumsum(np.random.randn(size) * 100.0)\ny_data_2 = np.cumsum(np.random.randn(size))\ny_data_3 = np.cumsum(np.random.randn(size) * 100.)\n\nx = np.linspace(0.0, 10.0, size)\n\nprice_data = pd.DataFrame(np.cumsum(np.random.randn(150, 2).dot([[0.5, 0.8], [0.8, 1.0]]), axis=0) + 100,\n columns=['Security 1', 'Security 2'],\n index=pd.date_range(start='01-01-2007', periods=150))\n\nsymbol = 'Security 1'\ndates_all = price_data.index.values\nfinal_prices = price_data[symbol].values.flatten()", "A simple plot with the pyplot API", "from bqplot import pyplot as plt\n\nplt.figure(1)\nn = 100\nplt.plot(np.linspace(0.0, 10.0, n), np.cumsum(np.random.randn(n)), \n axes_options={'y': {'grid_lines': 'dashed'}})\nplt.show()", "Scatter Plot", "plt.figure(title='Scatter Plot with colors')\nplt.scatter(y_data_2, y_data_3, color=y_data)\nplt.show()", "Histogram", "plt.figure()\nplt.hist(y_data, colors=['OrangeRed'])\nplt.show()", "Every component of the figure is an independent widget", "xs = bq.LinearScale()\nys = bq.LinearScale()\nx = np.arange(100)\ny = np.cumsum(np.random.randn(2, 100), axis=1) #two random walks\n\nline = bq.Lines(x=x, y=y, scales={'x': xs, 'y': ys}, colors=['red', 'green'])\nxax = bq.Axis(scale=xs, label='x', grid_lines='solid')\nyax = bq.Axis(scale=ys, orientation='vertical', tick_format='0.2f', label='y', grid_lines='solid')\n\nfig = bq.Figure(marks=[line], axes=[xax, yax], animation_duration=1000)\ndisplay(fig)\n\n# update data of the line mark\nline.y = np.cumsum(np.random.randn(2, 100), axis=1)\n\nxs = bq.LinearScale()\nys = bq.LinearScale()\nx, y = np.random.rand(2, 20)\nscatt = bq.Scatter(x=x, y=y, scales={'x': xs, 'y': ys}, default_colors=['blue'])\nxax = bq.Axis(scale=xs, label='x', grid_lines='solid')\nyax = bq.Axis(scale=ys, orientation='vertical', tick_format='0.2f', label='y', grid_lines='solid')\n\nfig = bq.Figure(marks=[scatt], axes=[xax, yax], animation_duration=1000)\ndisplay(fig)\n\n#data updates\nscatt.x = np.random.rand(20) * 10\nscatt.y = np.random.rand(20)", "The same holds for the attributes of scales, axes", "xs.min = 4\n\nxs.min = None\n\nxax.label = 'Some label for the x axis'", "Use bqplot figures as input widgets", "xs = bq.LinearScale()\nys = bq.LinearScale()\nx = np.arange(100)\ny = np.cumsum(np.random.randn(2, 100), axis=1) #two random walks\n\nline = bq.Lines(x=x, y=y, scales={'x': xs, 'y': ys}, colors=['red', 'green'])\nxax = bq.Axis(scale=xs, label='x', grid_lines='solid')\nyax = bq.Axis(scale=ys, orientation='vertical', tick_format='0.2f', label='y', grid_lines='solid')", "Selections", "def interval_change_callback(change):\n db.value = str(change['new'])\n\nintsel = bq.interacts.FastIntervalSelector(scale=xs, marks=[line])\nintsel.observe(interval_change_callback, names=['selected'] )\n\ndb = widgets.Label()\ndb.value = str(intsel.selected)\ndisplay(db)\n\nfig = bq.Figure(marks=[line], axes=[xax, yax], animation_duration=1000, interaction=intsel)\ndisplay(fig)\n\nline.selected", "Handdraw", "handdraw = bq.interacts.HandDraw(lines=line)\nfig.interaction = handdraw\n\nline.y[0]", "Moving points around", "from bqplot import *\n\nsize = 100\nnp.random.seed(0)\nx_data = range(size)\ny_data = np.cumsum(np.random.randn(size) * 100.0)\n\n## Enabling moving of points in scatter. Try to click and drag any of the points in the scatter and \n## notice the line representing the mean of the data update\n\nsc_x = LinearScale()\nsc_y = LinearScale()\n\nscat = Scatter(x=x_data[:10], y=y_data[:10], scales={'x': sc_x, 'y': sc_y}, default_colors=['blue'],\n enable_move=True)\nlin = Lines(scales={'x': sc_x, 'y': sc_y}, stroke_width=4, line_style='dashed', colors=['orange'])\nm = Label(value='Mean is %s'%np.mean(scat.y))\n\ndef update_line(change):\n with lin.hold_sync():\n lin.x = [np.min(scat.x), np.max(scat.x)]\n lin.y = [np.mean(scat.y), np.mean(scat.y)]\n m.value='Mean is %s'%np.mean(scat.y)\n \n\nupdate_line(None)\n\n# update line on change of x or y of scatter\nscat.observe(update_line, names='x')\nscat.observe(update_line, names='y')\n\nax_x = Axis(scale=sc_x)\nax_y = Axis(scale=sc_y, tick_format='0.2f', orientation='vertical')\n\nfig = Figure(marks=[scat, lin], axes=[ax_x, ax_y])\n\n## In this case on drag, the line updates as you move the points.\nwith scat.hold_sync():\n scat.enable_move = True\n scat.update_on_move = True\n scat.enable_add = False\n\ndisplay(m, fig)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
analysiscenter/dataset
examples/experiments/augmentation/augmentation.ipynb
apache-2.0
[ "Is Augmentation Necessary?\nIn this notebook, we will check how the network trained on ordinary data copes with the augmented data and what will happen if it is learned from the augmented data.\nHow the implement class with the neural network you'll see in this file.", "import sys\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm_notebook as tqn\n%matplotlib inline\n\nsys.path.append('../../..')\nsys.path.append('../../utils')\n\nimport utils\nfrom secondbatch import MnistBatch\nfrom simple_conv_model import ConvModel\nfrom batchflow import V, B\nfrom batchflow.opensets import MNIST", "Create batch class depended from MnistBatch", "mnistset = MNIST(batch_class=MnistBatch)", "Already familiar to us the construction to create the pipelines. These pipelines train NN on simple MNIST images, without shift.", "normal_train_ppl = (\n mnistset.train.p\n .init_model('dynamic',\n ConvModel,\n 'conv',\n config={'inputs': dict(images={'shape': (28, 28, 1)},\n labels={'classes': (10), \n 'transform': 'ohe', \n 'name': 'targets'}),\n 'loss': 'ce',\n 'optimizer':'Adam',\n 'input_block/inputs': 'images',\n 'head/units': 10,\n 'output': dict(ops=['labels', \n 'proba', \n 'accuracy'])})\n .train_model('conv',\n feed_dict={'images': B('images'),\n 'labels': B('labels')})\n)\n\nnormal_test_ppl = (\n mnistset.test.p\n .import_model('conv', normal_train_ppl)\n .init_variable('test_accuracy', init_on_each_run=int)\n .predict_model('conv', \n fetches='output_accuracy',\n feed_dict={'images': B('images'),\n 'labels': B('labels')},\n save_to=V('test_accuracy'), \n mode='w'))", "Train the model by using next_batch method", "batch_size = 400\nfor i in tqn(range(600)):\n normal_train_ppl.next_batch(batch_size, n_epochs=None)\n normal_test_ppl.next_batch(batch_size, n_epochs=None)", "Get variable from pipeline and print accuracy on data without shift", "acc = normal_test_ppl.get_variable('test_accuracy')\nprint('Accuracy on normal data: {:.2%}'.format(acc))", "Now check, how change accuracy, if the first model testing on shift data", "shift_test_ppl= (\n mnistset.test.p\n .import_model('conv', normal_train_ppl)\n .shift_flattened_pic()\n .init_variable('predict', init_on_each_run=int)\n .predict_model('conv', \n fetches='output_accuracy',\n feed_dict={'images': B('images'),\n 'labels': B('labels')},\n save_to=V('predict'), \n mode='w')\n .run(batch_size, n_epochs=1)\n)\n\nprint('Accuracy with shift: {:.2%}'.format(shift_test_ppl.get_variable('predict')))", "In order for the model to be able to predict the augmentation data, we will teach it on such data", "shift_train_ppl = (\n mnistset.train.p\n .shift_flattened_pic()\n .init_model('dynamic',\n ConvModel,\n 'conv',\n config={'inputs': dict(images={'shape': (28, 28, 1)},\n labels={'classes': (10), \n 'transform': 'ohe', \n 'name': 'targets'}),\n 'loss': 'ce',\n 'optimizer':'Adam',\n 'input_block/inputs': 'images',\n 'head/units': 10,\n 'output': dict(ops=['labels', \n 'proba',\n 'accuracy'])})\n .train_model('conv',\n feed_dict={'images': B('images'),\n 'labels': B('labels')})\n)\n\nfor i in tqn(range(600)):\n shift_train_ppl.next_batch(batch_size, n_epochs=None)", "And now check, how change accuracy on shift data", "shift_test_ppl = (\n mnistset.test.p\n .import_model('conv', shift_train_ppl)\n .shift_flattened_pic()\n .init_variable('acc', init_on_each_run=list)\n .init_variable('img', init_on_each_run=list)\n .init_variable('predict', init_on_each_run=list)\n .predict_model('conv', \n fetches=['output_accuracy', 'inputs', 'output_proba'],\n feed_dict={'images': B('images'),\n 'labels': B('labels')},\n save_to=[V('acc'), V('img'), V('predict')],\n mode='a')\n .run(1, n_epochs=1)\n)\n\nprint('Accuracy with shift: {:.2%}'.format(np.mean(shift_test_ppl.get_variable('acc'))))", "It's really better than before.\nIt is interesting, on what figures we are mistaken?", "acc = shift_test_ppl.get_variable('acc')\nimg = shift_test_ppl.get_variable('img')\npredict = shift_test_ppl.get_variable('predict')\n\n_, ax = plt.subplots(3, 4, figsize=(16, 16))\nax = ax.reshape(-1)\nfor i in range(12):\n index = np.where(np.array(acc) == 0)[0][i]\n ax[i].imshow(img[index]['images'].reshape(-1,28))\n ax[i].set_xlabel('Predict: {}'.format(np.argmax(predict[index][0])), fontsize=18)\n ax[i].grid()", "In most cases, the model is mistaken for examples in which the figures are heavily shifted, some of them are even hardly recognizable by eye.\nСonclusion:\n\nThe network is not trained on the augmented data can not predict such figures with good quality. Therefore training on augmentation data are necessary.\n\nIf you still have not completed our tutorial, you can fix it right now!\nRead and apply another experiments:\n* next experiment\n* previous experiment\n* return to the table of contents." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
the-deep-learners/nyc-ds-academy
notebooks/tensor-fied_intro_to_tensorflow.ipynb
mit
[ "Introduction to TensorFlow, fitting point by point\nIn this notebook, we introduce TensorFlow by fitting a line of the form y=m*x+b point by point. This is a derivation of Jared Ostmeyer's Naked Tensor code. \nLoad dependencies and set seeds for reproducibility", "import numpy as np\nnp.random.seed(42)\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport tensorflow as tf\ntf.set_random_seed(42)", "Create a very small data set", "xs = [0., 1., 2., 3., 4., 5., 6., 7.] # feature (independent variable)\nys = [-.82, -.94, -.12, .26, .39, .64, 1.02, 1.] # labels (dependent variable)\n\nfig, ax = plt.subplots()\n_ = ax.scatter(xs, ys)", "Define variables -- the model parameters we'll learn -- and initialize them with \"random\" values", "m = tf.Variable(-0.5)\nb = tf.Variable(1.0)", "One single point at a time, define the error between the true label and the model's prediction of the label", "ys_model = m*xs + b\ntotal_error = tf.reduce_sum((ys-ys_model)**2)", "Define optimizer as SSE-minimizing gradient descent", "optimizer_operation = tf.train.GradientDescentOptimizer(learning_rate=0.005).minimize(total_error)", "Define an operator that will initialize the graph with all available global variables", "initializer_op = tf.global_variables_initializer()", "With the computational graph designed, we initialize a session to execute it", "with tf.Session() as sess:\n \n sess.run(initializer_op)\n \n n_epochs = 100\n \n for iteration in range(n_epochs):\n sess.run(optimizer_operation)\n \n slope, intercept = sess.run([m, b])\n\nslope\n\nintercept", "Calculate the predicted model outputs given the inputs xs", "y_hat = slope*np.array(xs) + intercept\n\npd.DataFrame(list(zip(ys, y_hat)), columns=['y', 'y_hat'])\n\nfig, ax = plt.subplots()\n\nax.scatter(xs, ys)\nx_min, x_max = ax.get_xlim()\ny_min, y_max = intercept, intercept + slope*(x_max-x_min)\n\nax.plot([x_min, x_max], [y_min, y_max])\n_ = ax.set_xlim([x_min, x_max])" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
llclave/Springboard-Mini-Projects
data_wrangling_json/sliderule_dsi_json_exercise.ipynb
mit
[ "JSON examples and exercise\n\n\nget familiar with packages for dealing with JSON\nstudy examples with JSON strings and files \nwork on exercise to be completed and submitted \n\n\n\nreference: http://pandas.pydata.org/pandas-docs/stable/io.html#io-json-reader\ndata source: http://jsonstudio.com/resources/", "import pandas as pd", "imports for Python, Pandas", "import json\nfrom pandas.io.json import json_normalize", "JSON example, with string\n\ndemonstrates creation of normalized dataframes (tables) from nested json string\nsource: http://pandas.pydata.org/pandas-docs/stable/io.html#normalization", "# define json string\ndata = [{'state': 'Florida', \n 'shortname': 'FL',\n 'info': {'governor': 'Rick Scott'},\n 'counties': [{'name': 'Dade', 'population': 12345},\n {'name': 'Broward', 'population': 40000},\n {'name': 'Palm Beach', 'population': 60000}]},\n {'state': 'Ohio',\n 'shortname': 'OH',\n 'info': {'governor': 'John Kasich'},\n 'counties': [{'name': 'Summit', 'population': 1234},\n {'name': 'Cuyahoga', 'population': 1337}]}]\n\n# use normalization to create tables from nested element\njson_normalize(data, 'counties')\n\n# further populate tables created from nested element\njson_normalize(data, 'counties', ['state', 'shortname', ['info', 'governor']])", "JSON example, with file\n\ndemonstrates reading in a json file as a string and as a table\nuses small sample file containing data about projects funded by the World Bank \ndata source: http://jsonstudio.com/resources/", "# load json as string\njson.load((open('data/world_bank_projects_less.json')))\n\n# load as Pandas dataframe\nsample_json_df = pd.read_json('data/world_bank_projects_less.json')\nsample_json_df", "JSON exercise\nUsing data in file 'data/world_bank_projects.json' and the techniques demonstrated above,\n1. Find the 10 countries with most projects\n2. Find the top 10 major project themes (using column 'mjtheme_namecode')\n3. In 2. above you will notice that some entries have only the code and the name is missing. Create a dataframe with the missing names filled in." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ES-DOC/esdoc-jupyterhub
notebooks/ncar/cmip6/models/sandbox-1/aerosol.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Aerosol\nMIP Era: CMIP6\nInstitute: NCAR\nSource ID: SANDBOX-1\nTopic: Aerosol\nSub-Topics: Transport, Emissions, Concentrations, Optical Radiative Properties, Model. \nProperties: 69 (37 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:22\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'ncar', 'sandbox-1', 'aerosol')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --&gt; Software Properties\n3. Key Properties --&gt; Timestep Framework\n4. Key Properties --&gt; Meteorological Forcings\n5. Key Properties --&gt; Resolution\n6. Key Properties --&gt; Tuning Applied\n7. Transport\n8. Emissions\n9. Concentrations\n10. Optical Radiative Properties\n11. Optical Radiative Properties --&gt; Absorption\n12. Optical Radiative Properties --&gt; Mixtures\n13. Optical Radiative Properties --&gt; Impact Of H2o\n14. Optical Radiative Properties --&gt; Radiative Scheme\n15. Optical Radiative Properties --&gt; Cloud Interactions\n16. Model \n1. Key Properties\nKey properties of the aerosol model\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of aerosol model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of aerosol model code", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Scheme Scope\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nAtmospheric domains covered by the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.scheme_scope') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"troposhere\" \n# \"stratosphere\" \n# \"mesosphere\" \n# \"mesosphere\" \n# \"whole atmosphere\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.4. Basic Approximations\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBasic approximations made in the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.basic_approximations') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.5. Prognostic Variables Form\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPrognostic variables in the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"3D mass/volume ratio for aerosols\" \n# \"3D number concenttration for aerosols\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.6. Number Of Tracers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of tracers in the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.number_of_tracers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "1.7. Family Approach\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre aerosol calculations generalized into families of species?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.family_approach') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Software Properties\nSoftware properties of aerosol code\n2.1. Repository\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nLocation of code for this component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.2. Code Version\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCode version identifier.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.3. Code Languages\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nCode language(s).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Timestep Framework\nPhysical properties of seawater in ocean\n3.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMathematical method deployed to solve the time evolution of the prognostic variables", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses atmospheric chemistry time stepping\" \n# \"Specific timestepping (operator splitting)\" \n# \"Specific timestepping (integrated)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3.2. Split Operator Advection Timestep\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTimestep for aerosol advection (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.3. Split Operator Physical Timestep\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTimestep for aerosol physics (in seconds).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.4. Integrated Timestep\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTimestep for the aerosol model (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.5. Integrated Scheme Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecify the type of timestep scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Implicit\" \n# \"Semi-implicit\" \n# \"Semi-analytic\" \n# \"Impact solver\" \n# \"Back Euler\" \n# \"Newton Raphson\" \n# \"Rosenbrock\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Meteorological Forcings\n**\n4.1. Variables 3D\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nThree dimensionsal forcing variables, e.g. U, V, W, T, Q, P, conventive mass flux", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Variables 2D\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTwo dimensionsal forcing variables, e.g. land-sea mask definition", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Frequency\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nFrequency with which meteological forcings are applied (in seconds).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Resolution\nResolution in the aersosol model grid\n5.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Canonical Horizontal Resolution\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.3. Number Of Horizontal Gridpoints\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5.4. Number Of Vertical Levels\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nNumber of vertical levels resolved on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5.5. Is Adaptive Grid\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDefault is False. Set true if grid resolution changes during execution.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Tuning Applied\nTuning methodology for aerosol model\n6.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.2. Global Mean Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList set of metrics of the global mean state used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.3. Regional Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of regional metrics of mean state used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.4. Trend Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList observed trend metrics used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7. Transport\nAerosol transport\n7.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of transport in atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod for aerosol transport modeling", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Specific transport scheme (eulerian)\" \n# \"Specific transport scheme (semi-lagrangian)\" \n# \"Specific transport scheme (eulerian and semi-lagrangian)\" \n# \"Specific transport scheme (lagrangian)\" \n# TODO - please enter value(s)\n", "7.3. Mass Conservation Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMethod used to ensure mass conservation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Mass adjustment\" \n# \"Concentrations positivity\" \n# \"Gradients monotonicity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "7.4. Convention\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nTransport by convention", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.convention') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Convective fluxes connected to tracers\" \n# \"Vertical velocities connected to tracers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8. Emissions\nAtmospheric aerosol emissions\n8.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of emissions in atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMethod used to define aerosol species (several methods allowed because the different species may not use the same method).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Prescribed (climatology)\" \n# \"Prescribed CMIP6\" \n# \"Prescribed above surface\" \n# \"Interactive\" \n# \"Interactive above surface\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.3. Sources\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nSources of the aerosol species are taken into account in the emissions scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.sources') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Vegetation\" \n# \"Volcanos\" \n# \"Bare ground\" \n# \"Sea surface\" \n# \"Lightning\" \n# \"Fires\" \n# \"Aircraft\" \n# \"Anthropogenic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.4. Prescribed Climatology\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify the climatology type for aerosol emissions", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_climatology') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Interannual\" \n# \"Annual\" \n# \"Monthly\" \n# \"Daily\" \n# TODO - please enter value(s)\n", "8.5. Prescribed Climatology Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and prescribed via a climatology", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.6. Prescribed Spatially Uniform Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and prescribed as spatially uniform", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.7. Interactive Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and specified via an interactive method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.8. Other Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and specified via an &quot;other method&quot;", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.other_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.9. Other Method Characteristics\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCharacteristics of the &quot;other method&quot; used for aerosol emissions", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.other_method_characteristics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Concentrations\nAtmospheric aerosol concentrations\n9.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of concentrations in atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.2. Prescribed Lower Boundary\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed at the lower boundary.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.3. Prescribed Upper Boundary\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed at the upper boundary.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.4. Prescribed Fields Mmr\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed as mass mixing ratios.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.5. Prescribed Fields Mmr\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed as AOD plus CCNs.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Optical Radiative Properties\nAerosol optical and radiative properties\n10.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of optical and radiative properties", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11. Optical Radiative Properties --&gt; Absorption\nAbsortion properties in aerosol scheme\n11.1. Black Carbon\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAbsorption mass coefficient of black carbon at 550nm (if non-absorbing enter 0)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.2. Dust\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAbsorption mass coefficient of dust at 550nm (if non-absorbing enter 0)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.3. Organics\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAbsorption mass coefficient of organics at 550nm (if non-absorbing enter 0)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "12. Optical Radiative Properties --&gt; Mixtures\n**\n12.1. External\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there external mixing with respect to chemical composition?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.2. Internal\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there internal mixing with respect to chemical composition?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.3. Mixing Rule\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf there is internal mixing with respect to chemical composition then indicate the mixinrg rule", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Optical Radiative Properties --&gt; Impact Of H2o\n**\n13.1. Size\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes H2O impact size?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "13.2. Internal Mixture\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes H2O impact internal mixture?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "14. Optical Radiative Properties --&gt; Radiative Scheme\nRadiative scheme for aerosol\n14.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of radiative scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.2. Shortwave Bands\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of shortwave bands", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14.3. Longwave Bands\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of longwave bands", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15. Optical Radiative Properties --&gt; Cloud Interactions\nAerosol-cloud interactions\n15.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of aerosol-cloud interactions", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.2. Twomey\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the Twomey effect included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "15.3. Twomey Minimum Ccn\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf the Twomey effect is included, then what is the minimum CCN number?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15.4. Drizzle\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the scheme affect drizzle?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "15.5. Cloud Lifetime\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the scheme affect cloud lifetime?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "15.6. Longwave Bands\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of longwave bands", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "16. Model\nAerosol model\n16.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16.2. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nProcesses included in the Aerosol model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Dry deposition\" \n# \"Sedimentation\" \n# \"Wet deposition (impaction scavenging)\" \n# \"Wet deposition (nucleation scavenging)\" \n# \"Coagulation\" \n# \"Oxidation (gas phase)\" \n# \"Oxidation (in cloud)\" \n# \"Condensation\" \n# \"Ageing\" \n# \"Advection (horizontal)\" \n# \"Advection (vertical)\" \n# \"Heterogeneous chemistry\" \n# \"Nucleation\" \n# TODO - please enter value(s)\n", "16.3. Coupling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nOther model components coupled to the Aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Radiation\" \n# \"Land surface\" \n# \"Heterogeneous chemistry\" \n# \"Clouds\" \n# \"Ocean\" \n# \"Cryosphere\" \n# \"Gas phase chemistry\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.4. Gas Phase Precursors\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of gas phase aerosol precursors.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.gas_phase_precursors') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"DMS\" \n# \"SO2\" \n# \"Ammonia\" \n# \"Iodine\" \n# \"Terpene\" \n# \"Isoprene\" \n# \"VOC\" \n# \"NOx\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.5. Scheme Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nType(s) of aerosol scheme used by the aerosols model (potentially multiple: some species may be covered by one type of aerosol scheme and other species covered by another type).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Bulk\" \n# \"Modal\" \n# \"Bin\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.6. Bulk Scheme Species\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of species covered by the bulk scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.bulk_scheme_species') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sulphate\" \n# \"Nitrate\" \n# \"Sea salt\" \n# \"Dust\" \n# \"Ice\" \n# \"Organic\" \n# \"Black carbon / soot\" \n# \"SOA (secondary organic aerosols)\" \n# \"POM (particulate organic matter)\" \n# \"Polar stratospheric ice\" \n# \"NAT (Nitric acid trihydrate)\" \n# \"NAD (Nitric acid dihydrate)\" \n# \"STS (supercooled ternary solution aerosol particule)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
SJSlavin/phys202-2015-work
assignments/assignment10/ODEsEx03.ipynb
mit
[ "Ordinary Differential Equations Exercise 3\nImports", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom scipy.integrate import odeint\nfrom IPython.html.widgets import interact, fixed", "Damped, driven nonlinear pendulum\nThe equations of motion for a simple pendulum of mass $m$, length $l$ are:\n$$\n\\frac{d^2\\theta}{dt^2} = \\frac{-g}{\\ell}\\sin\\theta\n$$\nWhen a damping and periodic driving force are added the resulting system has much richer and interesting dynamics:\n$$\n\\frac{d^2\\theta}{dt^2} = \\frac{-g}{\\ell}\\sin\\theta - a \\omega - b \\sin(\\omega_0 t)\n$$\nIn this equation:\n\n$a$ governs the strength of the damping.\n$b$ governs the strength of the driving force.\n$\\omega_0$ is the angular frequency of the driving force.\n\nWhen $a=0$ and $b=0$, the energy/mass is conserved:\n$$E/m =g\\ell(1-\\cos(\\theta)) + \\frac{1}{2}\\ell^2\\omega^2$$\nBasic setup\nHere are the basic parameters we are going to use for this exercise:", "g = 9.81 # m/s^2\nl = 0.5 # length of pendulum, in meters\ntmax = 50. # seconds\nt = np.linspace(0, tmax, int(100*tmax))", "Write a function derivs for usage with scipy.integrate.odeint that computes the derivatives for the damped, driven harmonic oscillator. The solution vector at each time will be $\\vec{y}(t) = (\\theta(t),\\omega(t))$.", "def derivs(y, t, a, b, omega0):\n \"\"\"Compute the derivatives of the damped, driven pendulum.\n \n Parameters\n ----------\n y : ndarray\n The solution vector at the current time t[i]: [theta[i],omega[i]].\n t : float\n The current time t[i].\n a, b, omega0: float\n The parameters in the differential equation.\n \n Returns\n -------\n dy : ndarray\n The vector of derviatives at t[i]: [dtheta[i],domega[i]].\n \"\"\"\n # YOUR CODE HERE\n theta = y[0]\n omega = y[1]\n d2theta = -(g/l) * np.sin(theta)\n d2omega = -(g/l) * np.sin(theta) - a*omega - b*np.sin(omega0*t)\n\nassert np.allclose(derivs(np.array([np.pi,1.0]), 0, 1.0, 1.0, 1.0), [1.,-1.])\n\ndef energy(y):\n \"\"\"Compute the energy for the state array y.\n \n The state array y can have two forms:\n \n 1. It could be an ndim=1 array of np.array([theta,omega]) at a single time.\n 2. It could be an ndim=2 array where each row is the [theta,omega] at single\n time.\n \n Parameters\n ----------\n y : ndarray, list, tuple\n A solution vector\n \n Returns\n -------\n E/m : float (ndim=1) or ndarray (ndim=2)\n The energy per mass.\n \"\"\"\n # YOUR CODE HERE\n raise NotImplementedError()\n\nassert np.allclose(energy(np.array([np.pi,0])),g)\nassert np.allclose(energy(np.ones((10,2))), np.ones(10)*energy(np.array([1,1])))", "Simple pendulum\nUse the above functions to integrate the simple pendulum for the case where it starts at rest pointing vertically upwards. In this case, it should remain at rest with constant energy.\n\nIntegrate the equations of motion.\nPlot $E/m$ versus time.\nPlot $\\theta(t)$ and $\\omega(t)$ versus time.\nTune the atol and rtol arguments of odeint until $E/m$, $\\theta(t)$ and $\\omega(t)$ are constant.\n\nAnytime you have a differential equation with a a conserved quantity, it is critical to make sure the numerical solutions conserve that quantity as well. This also gives you an opportunity to find other bugs in your code. The default error tolerances (atol and rtol) used by odeint are not sufficiently small for this problem. Start by trying atol=1e-3, rtol=1e-2 and then decrease each by an order of magnitude until your solutions are stable.", "# YOUR CODE HERE\nraise NotImplementedError()\n\n# YOUR CODE HERE\nraise NotImplementedError()\n\n# YOUR CODE HERE\nraise NotImplementedError()\n\nassert True # leave this to grade the two plots and their tuning of atol, rtol.", "Damped pendulum\nWrite a plot_pendulum function that integrates the damped, driven pendulum differential equation for a particular set of parameters $[a,b,\\omega_0]$.\n\nUse the initial conditions $\\theta(0)=-\\pi + 0.1$ and $\\omega=0$.\nDecrease your atol and rtol even futher and make sure your solutions have converged.\nMake a parametric plot of $[\\theta(t),\\omega(t)]$ versus time.\nUse the plot limits $\\theta \\in [-2 \\pi,2 \\pi]$ and $\\theta \\in [-10,10]$\nLabel your axes and customize your plot to make it beautiful and effective.", "def plot_pendulum(a=0.0, b=0.0, omega0=0.0):\n \"\"\"Integrate the damped, driven pendulum and make a phase plot of the solution.\"\"\"\n # YOUR CODE HERE\n raise NotImplementedError()", "Here is an example of the output of your plot_pendulum function that should show a decaying spiral.", "plot_pendulum(0.5, 0.0, 0.0)", "Use interact to explore the plot_pendulum function with:\n\na: a float slider over the interval $[0.0,1.0]$ with steps of $0.1$.\nb: a float slider over the interval $[0.0,10.0]$ with steps of $0.1$.\nomega0: a float slider over the interval $[0.0,10.0]$ with steps of $0.1$.", "# YOUR CODE HERE\nraise NotImplementedError()", "Use your interactive plot to explore the behavior of the damped, driven pendulum by varying the values of $a$, $b$ and $\\omega_0$.\n\nFirst start by increasing $a$ with $b=0$ and $\\omega_0=0$.\nThen fix $a$ at a non-zero value and start to increase $b$ and $\\omega_0$.\n\nDescribe the different classes of behaviors you observe below.\nYOUR ANSWER HERE" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
yashdeeph709/Algorithms
PythonBootCamp/Complete-Python-Bootcamp-master/.ipynb_checkpoints/Functions-checkpoint.ipynb
apache-2.0
[ "Functions\nIntroduction to Functions\nThis lecture will consist of explaining what a function is in Python and how to create one. Functions will be one of our main building blocks when we construct larger and larger amounts of code to solve problems.\nSo what is a function?\nFormally, a function is a useful device that groups together a set of statements so they can be run more than once. They can also let us specify parameters that can serve as inputs to the functions.\nOn a more fundamental level, functions allow us to not have to repeatedly write the same code again and again. If you remember back to the lessons on strings and lists, remember that we used a function len() to get the length of a string. Since checking the length of a sequence is a common task you would want to write a function that can do this repeatedly at command.\nFunctions will be one of most basic levels of reusing code in Python, and it will also allow us to start thinking of program design (we will dive much deeper into the ideas of design when we learn about Object Oriented Programming).\ndef Statements\nLet's see how to build out a function's syntax in Python. It has the following form:", "def name_of_function(arg1,arg2):\n '''\n This is where the function's Document String (docstring) goes\n '''\n # Do stuff here\n #return desired result", "We begin with def then a space followed by the name of the function. Try to keep names relevant, for example len() is a good name for a length() function. Also be careful with names, you wouldn't want to call a function the same name as a built-in function in Python (such as len).\nNext come a pair of parenthesis with a number of arguments seperated by a comma. These arguments are the inputs for your function. You'll be able to use these inputs in your function and reference them. After this you put a colon.\nNow here is the important step, you must indent to begin the code inside your function correctly. Python makes use of whitespace to organize code. Lots of other programing languages do not do this, so keep that in mind.\nNext you'll see the docstring, this is where you write a basic description of the function. Using iPython and iPython Notebooks, you'll be ab;e to read these docstrings by pressing Shift+Tab after a function name. Doc strings are not necessary for simple functions, but its good practice to put them in so you or other people can easily understand the code you write.\nAfter all this you begin writing the code you wish to execute.\nThe best way to learn functions is by going through examples. So let's try to go through examples that relate back to the various objects and data structures we learned about before.\nExample 1: A simple print 'hello' function", "def say_hello():\n print 'hello'", "Call the function", "say_hello()", "Example 2: A simple greeting function\nLet's write a function that greets people with their name.", "def greeting(name):\n print 'Hello %s' %name\n\ngreeting('Jose')", "Using return\nLet's see some example that use a return statement. return allows a function to return a result that can then be stored as a variable, or used in whatever manner a user wants.\nExample 3: Addition function", "def add_num(num1,num2):\n return num1+num2\n\nadd_num(4,5)\n\n# Can also save as variable due to return\nresult = add_num(4,5)\n\nprint result", "What happens if we input two strings?", "print add_num('one','two')", "Note that because we don't declare variable types in Python, this function could be used to add numbers or sequences together! We'll later learn about adding in checks to make sure a user puts in the correct arguments into a function.\nLets also start using break,continue, and pass statements in our code. We introduced these during the while lecture.\nFinally lets go over a full example of creating a function to check if a number is prime ( a common interview exercise).\nWe know a number is prime if that number is only evenly divisble by 1 and itself. Let's write our first version of the function to check all the numbers from 1 to N and perform modulo checks.", "def is_prime(num):\n '''\n Naive method of checking for primes. \n '''\n for n in range(2,num):\n if num % n == 0:\n print 'not prime'\n break\n else: # If never mod zero, then prime\n print 'prime'\n\nis_prime(16)", "Note how we break the code after the print statement! We can actually improve this by only checking to the square root of the target number, also we can disregard all even numbers after checking for 2. We'll also switch to returning a boolean value to get an exaple of using return statements:", "import math\n\ndef is_prime(num):\n '''\n Better method of checking for primes. \n '''\n if num % 2 == 0 and num > 2: \n return False\n for i in range(3, int(math.sqrt(num)) + 1, 2):\n if num % i == 0:\n return False\n return True\n\nis_prime(14)", "Great! You should now have a basic understanding of creating your own functions to save yourself from repeatedly writing code!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
hetland/python4geosciences
examples/numpy.ipynb
mit
[ "Numpy example: Reading in and analyzing topography/bathymetry data", "import os # this package allows us to use terminal window commands from within python\nimport numpy as np", "Read in file\nWe have a dataset saved in the repository: cascadia.npz. This contains topography and bathymetry data from Washington state.", "d = np.load('../data/cascadia.npz') # data was saved in compressed numpy format", "What is contained in this file?", "d.keys() # notice that d is a dictionary!\n\nd['z'].shape # this is an array instead of a list, so it can have more than 1 dimension", "Investigate\nLet's start with a quick look at the data. We'll keep it simple since we aren't to the plotting section yet.", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport cmocean.cm as cmo\n\nplt.figure(figsize=(10, 8))\nplt.pcolormesh(d['lon'], d['lat'], d['z'], cmap=cmo.delta)\nplt.colorbar()\nplt.xlabel('Longitude [deg]')\nplt.ylabel('Latitude [deg]')\nplt.title('Topography and bathymetry [m]')", "Anyone recognize this?\nLet's do a few calculations using numpy.\nHow about a mean:", "z = d['z'] # we can rename the vertical data information to save a little space\nz.mean()", "So overall we have a mean value of about -5 meters. But how meaningful is this? Let's break it down further.\nWe have both positive and negative values, and they represent pretty distinct areas: above and below water. It is logical that we separate the two.", "iabove = z > 0 # indices of the z values that are above water\nibelow = z < 0 # indices of z values that are below water\n\nprint('above water: ', z[iabove])\nprint('below water: ', z[ibelow])", "Look good! Now let's do something with them.\nFirst, how about the mean vertical level, separate for above and below water.", "z[iabove].mean()\n\nz[ibelow].mean()", "So, the average topographic height in this region of western Washington and Canada is about 550 meters above sea level, and the average depth is about 700 meters.\n\nExercise\n\nFind the highest and lowest points in the dataset. \nHow does the mean value of the data between 800 and 1000 meters compare with that between -1000 and -800 meters?" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
esa-as/2016-ml-contest
MandMs/Facies_classification-M&Ms_plurality_voting_classifier.ipynb
apache-2.0
[ "Facies classification using plurality voting (e.g. multiclass majority voting)\nContest entry by: <a href=\"https://github.com/mycarta\">Matteo Niccoli</a> and <a href=\"https://github.com/dahlmb\">Mark Dahl</a>\nOriginal contest notebook by Brendon Hall, Enthought\n<a rel=\"license\" href=\"http://creativecommons.org/licenses/by/4.0/\"><img alt=\"Creative Commons License\" style=\"border-width:0\" src=\"https://i.creativecommons.org/l/by/4.0/88x31.png\" /></a><br /><span xmlns:dct=\"http://purl.org/dc/terms/\" property=\"dct:title\">The code and ideas in this notebook,</span> by <span xmlns:cc=\"http://creativecommons.org/ns#\" property=\"cc:attributionName\">Matteo Niccoli and Mark Dahl,</span> are licensed under a <a rel=\"license\" href=\"http://creativecommons.org/licenses/by/4.0/\">Creative Commons Attribution 4.0 International License</a>.\nIn this notebook we will attempt to predict facies from well log data using machine learnig classifiers. The dataset comes from a class exercise from The University of Kansas on Neural Networks and Fuzzy Systems. This exercise is based on a consortium project to use machine learning techniques to create a reservoir model of the largest gas fields in North America, the Hugoton and Panoma Fields. For more info on the origin of the data, see Bohling and Dubois (2003) and Dubois et al. (2007). \nThe dataset consists of log data from nine wells that have been labeled with a facies type based on observation of core. We will use this log data to train a support vector machine to classify facies types. \nThe plan\nWe will created three classifiers with pretuned parameters:\n- best SVM in the competition (by our team's SVM submission) \n- best Random Forest in the competition (form the leading submission, by gccrowther) \n- multilayer perceptron (from previous notebooks, not submitted)\nWe will then try to predict the facies using a plurality voting approach (plurality voting = multi-class majority voting).\nFrom the scikit-learn website: \"The idea behind the voting classifier implementation is to combine conceptually different machine learning classifiers and use a majority vote or the average predicted probabilities (soft vote) to predict the class labels. Such a classifier can be useful for a set of equally well performing model in order to balance out their individual weaknesses\".\nExploring the dataset\nFirst, we will examine the data set we will use to train the classifier.", "%matplotlib inline\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nimport pandas as pd\nfrom pandas import set_option\nset_option(\"display.max_rows\", 10)\npd.options.mode.chained_assignment = None\n\nfrom sklearn import preprocessing\nfrom sklearn.metrics import f1_score, accuracy_score, make_scorer\nfrom sklearn.model_selection import LeaveOneGroupOut\n\nfilename = 'facies_vectors.csv'\ntraining_data = pd.read_csv(filename)\ntraining_data", "This data is from the Council Grove gas reservoir in Southwest Kansas. The Panoma Council Grove Field is predominantly a carbonate gas reservoir encompassing 2700 square miles in Southwestern Kansas. This dataset is from nine wells (with 4149 examples), consisting of a set of seven predictor variables and a rock facies (class) for each example vector and validation (test) data (830 examples from two wells) having the same seven predictor variables in the feature vector. Facies are based on examination of cores from nine wells taken vertically at half-foot intervals. Predictor variables include five from wireline log measurements and two geologic constraining variables that are derived from geologic knowledge. These are essentially continuous variables sampled at a half-foot sample rate. \nThe seven predictor variables are:\n* Five wire line log curves include gamma ray (GR), resistivity logging (ILD_log10),\nphotoelectric effect (PE), neutron-density porosity difference and average neutron-density porosity (DeltaPHI and PHIND). Note, some wells do not have PE.\n* Two geologic constraining variables: nonmarine-marine indicator (NM_M) and relative position (RELPOS)\nThe nine discrete facies (classes of rocks) are: \n1. Nonmarine sandstone\n2. Nonmarine coarse siltstone \n3. Nonmarine fine siltstone \n4. Marine siltstone and shale \n5. Mudstone (limestone)\n6. Wackestone (limestone)\n7. Dolomite\n8. Packstone-grainstone (limestone)\n9. Phylloid-algal bafflestone (limestone)\nThese facies aren't discrete, and gradually blend into one another. Some have neighboring facies that are rather close. Mislabeling within these neighboring facies can be expected to occur. The following table lists the facies, their abbreviated labels and their approximate neighbors.\nFacies |Label| Adjacent Facies\n:---: | :---: |:--:\n1 |SS| 2\n2 |CSiS| 1,3\n3 |FSiS| 2\n4 |SiSh| 5\n5 |MS| 4,6\n6 |WS| 5,7\n7 |D| 6,8\n8 |PS| 6,7,9\n9 |BS| 7,8\nLet's clean up this dataset. The 'Well Name' and 'Formation' columns can be turned into a categorical data type.", "training_data['Well Name'] = training_data['Well Name'].astype('category')\ntraining_data['Formation'] = training_data['Formation'].astype('category')\ntraining_data['Well Name'].unique()", "These are the names of the 10 training wells in the Council Grove reservoir. Data has been recruited into pseudo-well 'Recruit F9' to better represent facies 9, the Phylloid-algal bafflestone. \nBefore we plot the well data, let's define a color map so the facies are represented by consistent color in all the plots in this tutorial. We also create the abbreviated facies labels, and add those to the facies_vectors dataframe.", "# 1=sandstone 2=c_siltstone 3=f_siltstone # 4=marine_silt_shale \n#5=mudstone 6=wackestone 7=dolomite 8=packstone 9=bafflestone\nfacies_colors = ['#F4D03F', '#F5B041', '#DC7633','#A569BD',\n '#000000', '#000080', '#2E86C1', '#AED6F1', '#196F3D']\n\nfacies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS',\n 'WS', 'D','PS', 'BS']\n#facies_color_map is a dictionary that maps facies labels\n#to their respective colors\nfacies_color_map = {}\nfor ind, label in enumerate(facies_labels):\n facies_color_map[label] = facies_colors[ind]\n\ndef label_facies(row, labels):\n return labels[ row['Facies'] -1]\n \ntraining_data.loc[:,'FaciesLabels'] = training_data.apply(lambda row: label_facies(row, facies_labels), axis=1)\ntraining_data.describe()", "This is a quick view of the statistical distribution of the input variables. Looking at the count values, most values have 4149 valid values except for PE, which has 3232. We will drop the feature vectors that don't have a valid PE entry.", "PE_mask = training_data['PE'].notnull().values\ntraining_data = training_data[PE_mask]\n\ntraining_data.describe()", "Now we extract just the feature variables we need to perform the classification. The predictor variables are the five log values and two geologic constraining variables, and we are also using depth. We also get a vector of the facies labels that correspond to each feature vector.", "y = training_data['Facies'].values\nprint y[25:40]\nprint np.shape(y)\n\nX = training_data.drop(['Formation', 'Well Name','Facies','FaciesLabels'], axis=1)\nprint np.shape(X)\nX.describe(percentiles=[.05, .25, .50, .75, .95])\n\nscaler = preprocessing.StandardScaler().fit(X)\nX = scaler.transform(X)", "Make performance scorers\nUsed to evaluate performance.", "Fscorer = make_scorer(f1_score, average = 'micro')", "Pre-tuned SVM classifier classifier and leave one well out average F1 score\nThis is the Support Vector Machine classifier from our first submission.", "from sklearn import svm\nSVC_classifier = svm.SVC(C = 100, cache_size=2400, class_weight=None, coef0=0.0,\n decision_function_shape=None, degree=3, gamma=0.01, kernel='rbf',\n max_iter=-1, probability=True, random_state=49, shrinking=True,\n tol=0.001, verbose=False)\n\nf1_svc = []\n\nwells = training_data[\"Well Name\"].values\nlogo = LeaveOneGroupOut()\n\nfor train, test in logo.split(X, y, groups=wells):\n well_name = wells[test[0]]\n SVC_classifier.fit(X[train], y[train])\n pred_svc = SVC_classifier.predict(X[test])\n sc = f1_score(y[test], pred_svc, labels = np.arange(10), average = 'micro')\n print(\"{:>20s} {:.3f}\".format(well_name, sc))\n f1_svc.append(sc)\n \nprint \"-Average leave-one-well-out F1 Score: %6f\" % (sum(f1_svc)/(1.0*(len(f1_svc))))", "Pre-tuned multi-layer perceptron classifier and average F1 score", "from sklearn.neural_network import MLPClassifier\nmlp_classifier = MLPClassifier(activation='logistic', alpha=0.01, batch_size='auto',\n beta_1=0.9, beta_2=0.999, early_stopping=False, epsilon=1e-08,\n hidden_layer_sizes=(100,), learning_rate='adaptive',\n learning_rate_init=0.001, max_iter=1000, momentum=0.9,\n nesterovs_momentum=True, power_t=0.5, random_state=49, shuffle=True,\n solver='adam', tol=0.0001, validation_fraction=0.1, verbose=False,\n warm_start=False)\n\nf1_mlp = []\n\nwells = training_data[\"Well Name\"].values\nlogo = LeaveOneGroupOut()\n\nfor train, test in logo.split(X, y, groups=wells):\n well_name = wells[test[0]]\n mlp_classifier.fit(X[train], y[train])\n pred_mlp = mlp_classifier.predict(X[test])\n sc = f1_score(y[test], pred_mlp, labels = np.arange(10), average = 'micro')\n print(\"{:>20s} {:.3f}\".format(well_name, sc))\n f1_mlp.append(sc)\n \nprint \"-Average leave-one-well-out F1 Score: %6f\" % (sum(f1_mlp)/(1.0*(len(f1_mlp))))", "Pre-tuned extra trees\nThis is the RF classifier with parameters tuned in the leading submission, by George Crowther, but without his engineered features.", "from sklearn.pipeline import make_pipeline\nfrom sklearn.feature_selection import VarianceThreshold\nfrom sklearn.ensemble import ExtraTreesClassifier\n\nET_classifier = make_pipeline(\n VarianceThreshold(threshold=0.49),\n ExtraTreesClassifier(criterion=\"entropy\", max_features=0.71,\n n_estimators=500, random_state=49))\n\nf1_ET = []\n\nwells = training_data[\"Well Name\"].values\nlogo = LeaveOneGroupOut()\n\nfor train, test in logo.split(X, y, groups=wells):\n well_name = wells[test[0]]\n ET_classifier.fit(X[train], y[train])\n pred_cv = ET_classifier.predict(X[test])\n sc = f1_score(y[test], pred_cv, labels = np.arange(10), average = 'micro')\n print(\"{:>20s} {:.3f}\".format(well_name, sc))\n f1_ET.append(sc)\n \nprint \"-Average leave-one-well-out F1 Score: %6f\" % (sum(f1_ET)/(1.0*(len(f1_ET))))", "Plurality voting classifier (multi-class majority voting)\nWe will use a weighted approach, where the weights are somewhat arbitrary, but their proportion is based on the average f1 score of the individual classifiers.", "from sklearn.ensemble import VotingClassifier\n\neclf_cv = VotingClassifier(estimators=[\n ('SVC', SVC_classifier), ('MLP', mlp_classifier), ('ET', ET_classifier)], \n voting='soft', weights=[0.3,0.33,0.37])", "Leave one-well-out F1 scores", "f1_ens = []\n\nwells = training_data[\"Well Name\"].values\nlogo = LeaveOneGroupOut()\n\nfor train, test in logo.split(X, y, groups=wells):\n well_name = wells[test[0]]\n eclf_cv.fit(X[train], y[train])\n pred_cv = eclf_cv.predict(X[test])\n sc = f1_score(y[test], pred_cv, labels = np.arange(10), average = 'micro')\n print(\"{:>20s} {:.3f}\".format(well_name, sc))\n f1_ens.append(sc)\n \nprint \"-Average leave-one-well-out F1 Score: %6f\" % (sum(f1_ens)/(1.0*(len(f1_ens))))", "Comments\nUsing the average F1 score from the leave-one-well out cross validation as a metric, the majority voting is superior to the individual classifiers, including the pre-tuned Random Forest from the leading submission. However, the Random Forest in the official leading submission was trained using additional new features engineered by George, and outperforms our majority voting classifier, with an F1 score of 0.580 against our 0.579. A clear indication, in our view, that the feature engineering is a key element to achieve the best possible prediction.\nPredicting, displaying, and saving facies for blind wells", "blind = pd.read_csv('validation_data_nofacies.csv') \nX_blind = np.array(blind.drop(['Formation', 'Well Name'], axis=1)) \nX_blind = scaler.transform(X_blind) \ny_pred = eclf_cv.fit(X, y).predict(X_blind) \nblind['Facies'] = y_pred\n\ndef make_facies_log_plot(logs, facies_colors):\n #make sure logs are sorted by depth\n logs = logs.sort_values(by='Depth')\n cmap_facies = colors.ListedColormap(\n facies_colors[0:len(facies_colors)], 'indexed')\n \n ztop=logs.Depth.min(); zbot=logs.Depth.max()\n \n cluster=np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1)\n \n f, ax = plt.subplots(nrows=1, ncols=6, figsize=(8, 12))\n ax[0].plot(logs.GR, logs.Depth, '-g')\n ax[1].plot(logs.ILD_log10, logs.Depth, '-')\n ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5')\n ax[3].plot(logs.PHIND, logs.Depth, '-', color='r')\n ax[4].plot(logs.PE, logs.Depth, '-', color='black')\n im=ax[5].imshow(cluster, interpolation='none', aspect='auto',\n cmap=cmap_facies,vmin=1,vmax=9)\n \n divider = make_axes_locatable(ax[5])\n cax = divider.append_axes(\"right\", size=\"20%\", pad=0.05)\n cbar=plt.colorbar(im, cax=cax)\n cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS', \n 'SiSh', ' MS ', ' WS ', ' D ', \n ' PS ', ' BS ']))\n cbar.set_ticks(range(0,1)); cbar.set_ticklabels('')\n \n for i in range(len(ax)-1):\n ax[i].set_ylim(ztop,zbot)\n ax[i].invert_yaxis()\n ax[i].grid()\n ax[i].locator_params(axis='x', nbins=3)\n \n ax[0].set_xlabel(\"GR\")\n ax[0].set_xlim(logs.GR.min(),logs.GR.max())\n ax[1].set_xlabel(\"ILD_log10\")\n ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max())\n ax[2].set_xlabel(\"DeltaPHI\")\n ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max())\n ax[3].set_xlabel(\"PHIND\")\n ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max())\n ax[4].set_xlabel(\"PE\")\n ax[4].set_xlim(logs.PE.min(),logs.PE.max())\n ax[5].set_xlabel('Facies')\n \n ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([])\n ax[4].set_yticklabels([]); ax[5].set_yticklabels([])\n ax[5].set_xticklabels([])\n f.suptitle('Well: %s'%logs.iloc[0]['Well Name'], fontsize=14,y=0.94)\n\nmake_facies_log_plot(blind[blind['Well Name'] == 'STUART'], facies_colors)\nmake_facies_log_plot(blind[blind['Well Name'] == 'CRAWFORD'], facies_colors)\n\nnp.save('ypred.npy', y_pred)", "Displaying predicted versus original facies in the training data\nThis is a nice display to finish up with, as it gives us a visual idea of the predicted faces where we have facies from the core observations.\nThe plot we will use a function from the original notebook. Let's look at the well with the lowest F1 from the previous code block, CROSS H CATTLE, and the one with the highest F1 (excluding Recruit F9), which is SHRIMPLIN.", "def compare_facies_plot(logs, compadre, facies_colors):\n #make sure logs are sorted by depth\n logs = logs.sort_values(by='Depth')\n cmap_facies = colors.ListedColormap(\n facies_colors[0:len(facies_colors)], 'indexed')\n \n ztop=logs.Depth.min(); zbot=logs.Depth.max()\n \n cluster1 = np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1)\n cluster2 = np.repeat(np.expand_dims(logs[compadre].values,1), 100, 1)\n \n f, ax = plt.subplots(nrows=1, ncols=7, figsize=(9, 12))\n ax[0].plot(logs.GR, logs.Depth, '-g')\n ax[1].plot(logs.ILD_log10, logs.Depth, '-')\n ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5')\n ax[3].plot(logs.PHIND, logs.Depth, '-', color='r')\n ax[4].plot(logs.PE, logs.Depth, '-', color='black')\n im1 = ax[5].imshow(cluster1, interpolation='none', aspect='auto',\n cmap=cmap_facies,vmin=1,vmax=9)\n im2 = ax[6].imshow(cluster2, interpolation='none', aspect='auto',\n cmap=cmap_facies,vmin=1,vmax=9)\n \n divider = make_axes_locatable(ax[6])\n cax = divider.append_axes(\"right\", size=\"20%\", pad=0.05)\n cbar=plt.colorbar(im2, cax=cax)\n cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS', \n 'SiSh', ' MS ', ' WS ', ' D ', \n ' PS ', ' BS ']))\n cbar.set_ticks(range(0,1)); cbar.set_ticklabels('')\n \n for i in range(len(ax)-2):\n ax[i].set_ylim(ztop,zbot)\n ax[i].invert_yaxis()\n ax[i].grid()\n ax[i].locator_params(axis='x', nbins=3)\n \n ax[0].set_xlabel(\"GR\")\n ax[0].set_xlim(logs.GR.min(),logs.GR.max())\n ax[1].set_xlabel(\"ILD_log10\")\n ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max())\n ax[2].set_xlabel(\"DeltaPHI\")\n ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max())\n ax[3].set_xlabel(\"PHIND\")\n ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max())\n ax[4].set_xlabel(\"PE\")\n ax[4].set_xlim(logs.PE.min(),logs.PE.max())\n ax[5].set_xlabel('Facies')\n ax[6].set_xlabel(compadre)\n \n ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([])\n ax[4].set_yticklabels([]); ax[5].set_yticklabels([])\n ax[5].set_xticklabels([])\n ax[6].set_xticklabels([])\n f.suptitle('Well: %s'%logs.iloc[0]['Well Name'], fontsize=14,y=0.94)\n\neclf_cv.fit(X,y)\npred = eclf_cv.predict(X)\n\nX = training_data\nX['Prediction'] = pred\n\ncompare_facies_plot(X[X['Well Name'] == 'CROSS H CATTLE'], 'Prediction', facies_colors)\ncompare_facies_plot(X[X['Well Name'] == 'SHRIMPLIN'], 'Prediction', facies_colors)", "To do next:\n\nreplace current Random Forest in this notebook with our own extra-trees classifier.\nimplementation of new features." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
kaushik94/tardis
docs/models/examples/Custom_Density_And_Boundary_Velocities.ipynb
bsd-3-clause
[ "Specifying boundary velocities in addition to a custom density file\nThis notebook will go through multiple detailed examples of how to properly run TARDIS with a custom ejecta profile specified by a custom density file and a custom abundance file.", "import tardis\nimport matplotlib.pyplot as plt\nimport numpy as np", "Your custom density file\nFirst, let's look at an example of a custom density file.\n80 day\n0 9500 9e-16\n1 10500 6e-16\n2 12000 2e-17\n\nThe first line specifies the time in days after the explosion\nAfter a skipped line, each row corresponds to a shell with index specified by the first column.\nThe second column lists the velocities of the outer boundary of the cell in km / s.\nThe third column lists the density of the cell.\n\nIMPORTANT\nThe default behavior of TARDIS is to use the first shell as the inner boundary. This means that v_inner_boundary = 9500, and the corresponding density 9e-16 is ignored because it is within the inner boundary. It can be replaced by an arbitrary number. The outer boundary of the last shell will be used as v_outer_boundary, so the default behavior will set v_outer_boundary = 12000.\nYour custom abundance file\nLet's look at an example of a custom density file.\nH He\n0.0 1.0\n0.4 0.6\n\nThe first line indicates which elements (or isotopes) correspond to which columns.\nAfter a skipped line, each row specifies the chemical abundance of one shell. Therefore the numbers in a given row should sum to 1.0.\n\nIMPORTANT\nNote that there are only 2 shells specified in this abundance file (despite the custom density file having 3 lines). This is because the custom density file specifies the boundaries of the shells, while the abundance file specifies the abundances within each shell.\nRunning TARDIS with the custom files\nNow let's run TARDIS using the example custom files.", "model = tardis.run_tardis('./test_config.yml')", "You can check to make sure that the model loaded and used by TARDIS during the simulation is consistent with your expectations based on the custom files you provided:", "print('v_inner_boundary = ',model.model.v_boundary_inner)\nprint('v_outer_boundary = ',model.model.v_boundary_outer)\nprint('\\n')\nprint('velocities of shell boundaries: ')\nprint(model.model.velocity)\nprint('\\n')\nprint('densities loaded by TARDIS: (NOTE that the density in the first line of the file was ignored! Densities are also rescaled.)')\nprint(model.model.density)", "Specifying boundary velocities in the config file\nIn addition to specifying custom density and abundance files, the user can set the v_inner_boundary and v_outer_boundary velocities in the YAML config file. This can cause some confusion, so we carefully go through some examples.\nIMPORTANT\nBoundary velocities set in the YAML config file must be within the velocity range specified in the custom density file (if one is provided).\nExample 1) v_inner_boundary lower than first velocity in density file\nIn this example, the first velocity in the density file is 9500 km/s. The user can specify in the config file the velocity of the inner boundary to be a lower velocity, say v_inner_boundary = 9000 km/s. This will cause TARDIS to raise an error.", "model = tardis.run_tardis('./test_config_ex1.yml')", "Example 2) v_outer_boundary larger than last velocity in density file\nIn this example, the last velocity in the density file is 12000 km/s. The user can specify in the config file the velocity of the outer boundary to a larger velocity, say v_outer_boundary = 13000 km/s. This will cause TARDIS to raise an error.", "model = tardis.run_tardis('./test_config_ex2.yml')", "Example 3) v_boundaries in config file are within density file velocity range\nHere the user sets v_inner_boundary = 9700 and v_outer_boundary = 11500 in the config file. Both values fall within the velocity range specified by the custom density file.", "model = tardis.run_tardis('./test_config_ex3.yml')\n\nprint('v_inner_boundary = ',model.model.v_boundary_inner)\nprint('v_outer_boundary = ',model.model.v_boundary_outer)\nprint('\\n')\nprint('velocities of shell boundaries: ')\nprint(model.model.velocity)\nprint('\\n')\nprint('densities loaded by TARDIS: (NOTE that the density in the first line of the file was ignored! Densities are also rescaled.)')\nprint(model.model.density)", "IMPORTANT\nNotice that the inner and outer boundary velocities are the ones specifically set by the user." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
0.14/_downloads/plot_object_raw.ipynb
bsd-3-clause
[ "%matplotlib inline", "The :class:Raw &lt;mne.io.Raw&gt; data structure: continuous data", "from __future__ import print_function\n\nimport mne\nimport os.path as op\nfrom matplotlib import pyplot as plt", "Continuous data is stored in objects of type :class:Raw &lt;mne.io.Raw&gt;.\nThe core data structure is simply a 2D numpy array (channels × samples,\nstored in a private attribute called ._data) combined with an\n:class:Info &lt;mne.Info&gt; object (.info attribute)\n(see tut_info_objects).\nThe most common way to load continuous data is from a .fif file. For more\ninformation on loading data from other formats &lt;ch_convert&gt;, or\ncreating it from scratch &lt;tut_creating_data_structures&gt;.\nLoading continuous data", "# Load an example dataset, the preload flag loads the data into memory now\ndata_path = op.join(mne.datasets.sample.data_path(), 'MEG',\n 'sample', 'sample_audvis_raw.fif')\nraw = mne.io.read_raw_fif(data_path, preload=True)\nraw.set_eeg_reference() # set EEG average reference\n\n# Give the sample rate\nprint('sample rate:', raw.info['sfreq'], 'Hz')\n# Give the size of the data matrix\nprint('channels x samples:', raw._data.shape)", "<div class=\"alert alert-info\"><h4>Note</h4><p>Accessing the `._data` attribute is done here for educational\n purposes. However this is a private attribute as its name starts\n with an `_`. This suggests that you should **not** access this\n variable directly but rely on indexing syntax detailed just below.</p></div>\n\nInformation about the channels contained in the :class:Raw &lt;mne.io.Raw&gt;\nobject is contained in the :class:Info &lt;mne.Info&gt; attribute.\nThis is essentially a dictionary with a number of relevant fields (see\ntut_info_objects).\nIndexing data\nTo access the data stored within :class:Raw &lt;mne.io.Raw&gt; objects,\nit is possible to index the :class:Raw &lt;mne.io.Raw&gt; object.\nIndexing a :class:Raw &lt;mne.io.Raw&gt; object will return two arrays: an array\nof times, as well as the data representing those timepoints. This works\neven if the data is not preloaded, in which case the data will be read from\ndisk when indexing. The syntax is as follows:", "# Extract data from the first 5 channels, from 1 s to 3 s.\nsfreq = raw.info['sfreq']\ndata, times = raw[:5, int(sfreq * 1):int(sfreq * 3)]\n_ = plt.plot(times, data.T)\n_ = plt.title('Sample channels')", "Selecting subsets of channels and samples\nIt is possible to use more intelligent indexing to extract data, using\nchannel names, types or time ranges.", "# Pull all MEG gradiometer channels:\n# Make sure to use .copy() or it will overwrite the data\nmeg_only = raw.copy().pick_types(meg=True)\neeg_only = raw.copy().pick_types(meg=False, eeg=True)\n\n# The MEG flag in particular lets you specify a string for more specificity\ngrad_only = raw.copy().pick_types(meg='grad')\n\n# Or you can use custom channel names\npick_chans = ['MEG 0112', 'MEG 0111', 'MEG 0122', 'MEG 0123']\nspecific_chans = raw.copy().pick_channels(pick_chans)\nprint(meg_only)\nprint(eeg_only)\nprint(grad_only)\nprint(specific_chans)", "Notice the different scalings of these types", "f, (a1, a2) = plt.subplots(2, 1)\neeg, times = eeg_only[0, :int(sfreq * 2)]\nmeg, times = meg_only[0, :int(sfreq * 2)]\na1.plot(times, meg[0])\na2.plot(times, eeg[0])\ndel eeg, meg, meg_only, grad_only, eeg_only, data, specific_chans", "You can restrict the data to a specific time range", "raw = raw.crop(0, 50) # in seconds\nprint('New time range from', raw.times.min(), 's to', raw.times.max(), 's')", "And drop channels by name", "nchan = raw.info['nchan']\nraw = raw.drop_channels(['MEG 0241', 'EEG 001'])\nprint('Number of channels reduced from', nchan, 'to', raw.info['nchan'])", "Concatenating :class:Raw &lt;mne.io.Raw&gt; objects\n:class:Raw &lt;mne.io.Raw&gt; objects can be concatenated in time by using the\n:func:append &lt;mne.io.Raw.append&gt; function. For this to work, they must\nhave the same number of channels and their :class:Info\n&lt;mne.Info&gt; structures should be compatible.", "# Create multiple :class:`Raw <mne.io.RawFIF>` objects\nraw1 = raw.copy().crop(0, 10)\nraw2 = raw.copy().crop(10, 20)\nraw3 = raw.copy().crop(20, 40)\n\n# Concatenate in time (also works without preloading)\nraw1.append([raw2, raw3])\nprint('Time extends from', raw1.times.min(), 's to', raw1.times.max(), 's')" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
phoebe-project/phoebe2-docs
2.3/tutorials/pblum.ipynb
gpl-3.0
[ "Passband Luminosity\nSetup\nLet's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).", "#!pip install -I \"phoebe>=2.3,<2.4\"\n\nimport phoebe\nfrom phoebe import u # units\nimport numpy as np\n\nlogger = phoebe.logger()\n\nb = phoebe.default_binary()", "And we'll add a single light curve dataset so that we can see how passband luminosities affect the resulting synthetic light curve model.", "b.add_dataset('lc', times=phoebe.linspace(0,1,101), dataset='lc01')", "Lastly, just to make things a bit easier and faster, we'll turn off irradiation (reflection), use blackbody atmospheres, and disable limb-darkening (so that we can play with weird temperatures without having to worry about falling of the grids).", "b.set_value('irrad_method', 'none')\nb.set_value_all('ld_mode', 'manual')\nb.set_value_all('ld_func', 'linear')\nb.set_value_all('ld_coeffs', [0.])\nb.set_value_all('ld_mode_bol', 'manual')\nb.set_value_all('ld_func_bol', 'linear')\nb.set_value_all('ld_coeffs_bol', [0.])\nb.set_value_all('atm', 'blackbody')", "Relevant Parameters & Methods\nA pblum_mode parameter exists for each LC dataset in the bundle. This parameter defines how passband luminosities are handled. The subsections below describe the use and parameters exposed depening on the value of this parameter.", "print(b.get_parameter(qualifier='pblum_mode', dataset='lc01'))", "For any of these modes, you can expose the intrinsic (excluding extrinsic effects such as spots and irradiation) and extrinsic computed luminosities of each star (in each dataset) by calling b.compute_pblums.\nNote that as its an aspect-dependent effect, boosting is ignored in all of these output values.", "print(b.compute_pblums())", "For more details, see the section below on \"Accessing Model Luminosities\" as well as the b.compute_pblums API docs\nThe table below provides a brief summary of all available pblum_mode options. Details are given in the remainder of the tutorial.\n| pblum_mode | intent |\n|-------------------|--------|\n| component-coupled | provide pblum for one star (by default L1), compute pblums for other stars from atmosphere tables |\n| decoupled | provide pblums for each star independently |\n| absolute | obtain unscaled pblums, in passband watts, computed from atmosphere tables |\n| dataset-scaled | calculate each pblum from the scaling factor between absolute fluxes and each dataset |\n| dataset-coupled | same as above, but all datasets are scaled with the same scaling factor |\npblum_mode = 'component-coupled'\npblum_mode='component-coupled' is the default option and maintains the default behavior from previous releases. Here the user provides passband luminosities for a single star in the system for the given dataset/passband, and all other stars are scaled accordingly.\nBy default, the value of pblum is set for the primary star in the system, but we can instead provide pblum for the secondary star by changing the value of pblum_component.", "print(b.filter(qualifier='pblum'))\n\nprint(b.get_parameter(qualifier='pblum_component'))\n\nb.set_value('pblum_component', 'secondary')\n\nprint(b.filter(qualifier='pblum'))", "Note that in general (for the case of a spherical star), a pblum of 4pi will result in an out-of-eclipse flux of ~1.\nNow let's just reset to the default case where the primary star has a provided (default) pblum of 4pi.", "b.set_value('pblum_component', 'primary')\nprint(b.get_parameter(qualifier='pblum', component='primary'))", "NOTE: other parameters also affect flux-levels, including limb darkening, third light, boosting, irradiation, and distance\nIf we call b.compute_pblums, we'll see that the computed intrinsic luminosity of the primary star (pblum@primary@lc01) matches the value of the parameter above.", "print(b.compute_pblums())", "Let's see how changing the value of pblum affects the computed light curve. By default, pblum is set to be 4 pi, giving a total flux for the primary star of ~1.\nSince the secondary star in the default binary is identical to the primary star, we'd expect an out-of-eclipse flux of the binary to be ~2.", "b.run_compute()\n\nafig, mplfig = b.plot(show=True)", "If we now set pblum to be only 2 pi, we should expect the luminosities as well as entire light curve to be scaled in half.", "b.set_value('pblum', component='primary', value=2*np.pi)\n\nprint(b.compute_pblums())\n\nb.run_compute()\n\nafig, mplfig = b.plot(show=True)", "And if we halve the temperature of the secondary star - the resulting light curve changes to the new sum of fluxes, where the primary star dominates since the secondary star flux is reduced by a factor of 16, so we expect a total out-of-eclipse flux of ~0.5 + ~0.5/16 = ~0.53.", "b.set_value('teff', component='secondary', value=0.5 * b.get_value('teff', component='primary'))\n\nprint(b.filter(qualifier='teff'))\n\nprint(b.compute_pblums())\n\nb.run_compute()\n\nafig, mplfig = b.plot(show=True)", "Let us undo our changes before we look at decoupled luminosities.", "b.set_value_all('teff', 6000)\nb.set_value_all('pblum', 4*np.pi)", "pblum_mode = 'decoupled'\nThe luminosities are decoupled when pblums are provided for the individual components. To accomplish this, set pblum_mode to 'decoupled'.", "b.set_value('pblum_mode', 'decoupled')", "Now we see that both pblum parameters are available and can have different values.", "print(b.filter(qualifier='pblum'))", "If we set these to 4pi, then we'd expect each star to contribute 1.0 in flux units, meaning the baseline of the light curve should be at approximately 2.0", "b.set_value_all('pblum', 4*np.pi)\n\nprint(b.compute_pblums())\n\nb.run_compute()\n\nafig, mplfig = b.plot(show=True)", "Now let's make a significant temperature-ratio by making a very cool secondary star. Since the luminosities are decoupled - this temperature change won't affect the resulting light curve very much (compare this to the case above with coupled luminosities). What is happening here is that even though the secondary star is cooler, its luminosity is being rescaled to the same value as the primary star, so the eclipse depth doesn't change (you would see a similar lack-of-effect if you changed the radii - although in that case the eclipse widths would still change due to the change in geometry).", "print(b.filter(qualifier='teff'))\n\nb.set_value('teff', component='secondary', value=3000)\n\nprint(b.compute_pblums())\n\nb.run_compute()\n\nafig, mplfig = b.plot(show=True)", "In most cases you will not want decoupled luminosities as they can easily break the self-consistency of your model.\nNow we'll just undo our changes before we look at accessing model luminosities.", "b.set_value_all('teff', 6000)\nb.set_value_all('pblum', 4*np.pi)", "pblum_mode = 'absolute'\nBy setting pblum_mode to 'absolute', luminosities and fluxes will be returned in absolute units and not rescaled. Note that third light and distance will still affect the resulting flux levels.", "b.set_value('pblum_mode', 'absolute')", "As we no longer provide pblum values to scale, those parameters are not visible when filtering.", "print(b.filter(qualifier='pblum'))\n\nprint(b.compute_pblums())\n\nb.run_compute()\n\nafig, mplfig = b.plot(show=True)", "(note the exponent on the y-axis of the above figure)\npblum_mode = 'dataset-scaled'\nSetting pblum_mode to 'dataset-scaled' is only allowed if fluxes are attached to the dataset itself. Let's use our existing model to generate \"fake\" data and then populate the dataset.", "fluxes = b.get_value('fluxes', context='model') * 0.8 + (np.random.random(101) * 0.1)\n\nb.set_value('fluxes', context='dataset', value=fluxes)\n\nafig, mplfig = b.plot(context='dataset', show=True)", "Now if we set pblum_mode to 'dataset-scaled', the resulting model will be scaled to best fit the data. Note that in this mode we cannot access computed luminosities via b.compute_pblums (without providing model - we'll get back to that in a minute), nor can we access scaled intensities from the mesh.", "b.set_value('pblum_mode', 'dataset-scaled')\n\nprint(b.compute_pblums())\n\nb.run_compute()\n\nafig, mplfig = b.plot(show=True)", "The model stores the scaling factor used between the absolute fluxes and the relative fluxes that best fit to the observational data.", "print(b.get_parameter(qualifier='flux_scale', context='model'))", "We can then access the scaled luminosities by passing the model tag to b.compute_pblums. Keep in mind this only scales the absolute luminosities by flux_scale so assumes a fixed distance@system. This is useful though if we wanted to use 'dataset-scaled' to get an estimate for pblum before changing to 'component-coupled' and optimizing or marginalizing over pblum.", "print(b.compute_pblums(model='latest'))", "Before moving on, let's remove our fake data (and reset pblum_mode or else PHOEBE will complain about the lack of data).", "b.set_value('pblum_mode', 'component-coupled')\n\nb.set_value('fluxes', context='dataset', value=[])", "pblum_mode = 'dataset-coupled'\nSetting pblum_mode to 'dataset-coupled' allows for the same scaling factor to be applied to two different datasets. In order to see this in action, we'll add another LC dataset in a different passband.", "b.add_dataset('lc', times=phoebe.linspace(0,1,101), \n ld_mode='manual', ld_func='linear', ld_coeffs=[0],\n passband='Johnson:B', dataset='lc02')\n\nb.set_value('pblum_mode', dataset='lc02', value='dataset-coupled')", "Here we see the pblum_mode@lc01 is set to 'component-coupled' meaning it will follow the rules described earlier where pblum is provided for the primary component and the secondary is coupled to that. pblum_mode@lc02 is set to 'dataset-coupled' with pblum_dataset@lc01 pointing to 'lc01'.", "print(b.filter('pblum*'))\n\nprint(b.compute_pblums())\n\nb.run_compute()\n\nafig, mplfig = b.plot(show=True, legend=True)", "Accessing Model Luminosities\nPassband luminosities at t0@system per-star (including following all coupling logic) can be computed and exposed on the fly by calling compute_pblums.", "print(b.compute_pblums())", "By default this exposes 'pblum' and 'pblum_ext' for all component-dataset pairs in the form of a dictionary. Alternatively, you can pass a label or list of labels to component and/or dataset.", "print(b.compute_pblums(dataset='lc01', component='primary'))", "For more options, see the b.compute_pblums API docs.\nNote that this same logic is applied (at t0) to initialize all passband luminosities within the backend, so there is no need to call compute_pblums before run_compute.\nIn order to access passband luminosities at times other than t0, you can add a mesh dataset and request the pblum_ext column to be exposed. For stars that have pblum defined (as opposed to coupled to another star or dataset), this value should be equivalent to the value of the parameter (at t0 if no features or irradiation are present, and in simple circular cases will probably be equivalent at all times).\nLet's create a mesh dataset at a few times and then access the synthetic luminosities.", "b.add_dataset('mesh', times=np.linspace(0,1,5), dataset='mesh01', columns=['areas', 'pblum_ext@lc01', 'ldint@lc01', 'ptfarea@lc01', 'abs_normal_intensities@lc01', 'normal_intensities@lc01'])\n\nb.run_compute()", "Since the luminosities are passband-dependent, they are stored with the same dataset as the light curve (or RV), but with the mesh method, and are available at each of the times at which a mesh was stored.", "print(b.filter(qualifier='pblum_ext', context='model').twigs)", "Now let's compare the value of the synthetic luminosities to those of the input pblum", "t0 = b.get_value('t0@system')\n\nprint(b.get_value(qualifier='pblum_ext', time=t0, component='primary', kind='mesh', context='model'))\n\nprint(b.get_value('pblum@primary@dataset'))\n\nprint(b.compute_pblums(component='primary', dataset='lc01'))", "In this case, since our two stars are identical, the synthetic luminosity of the secondary star should be the same as the primary (and the same as pblum@primary).", "print(b.get_value(qualifier='pblum_ext', time=t0, component='primary', kind='mesh', context='model'))\n\nprint(b.get_value(qualifier='pblum_ext', time=t0, component='secondary', kind='mesh', context='model'))", "However, if we change the temperature of the secondary star again, since the pblums are coupled, we'd expect the synthetic luminosity of the primary to remain fixed but the secondary to decrease.", "b['teff@secondary@component'] = 3000\n\nprint(b.compute_pblums(dataset='lc01'))\n\nb.run_compute()\n\nprint(b.get_value(qualifier='pblum_ext', time=t0, component='primary', kind='mesh', context='model'))\n\nprint(b.get_value(qualifier='pblum_ext', time=t0, component='secondary', kind='mesh', context='model'))", "And lastly, if we re-enable irradiation, we'll see that the extrinsic luminosities do not match the prescribed value of pblum (an intrinsic luminosity).", "print(b['ld_mode'])\n\nprint(b['atm'])\n\nb.run_compute(irrad_method='horvat')\n\nprint(b.get_value(qualifier='pblum_ext', time=t0, component='primary', kind='mesh', context='model'))\n\nprint(b.get_value('pblum@primary@dataset'))\n\nprint(b.compute_pblums(dataset='lc01', irrad_method='horvat'))", "Now, we'll just undo our changes before continuing", "b.set_value_all('teff@component', 6000)", "Role of Pblum\nLet's now look at the intensities in the mesh to see how they're being scaled under-the-hood. First we'll recompute our model with the equal temperatures and irradiation disabled (to ignore the difference between pblum and pblum_ext).", "b.run_compute()\n\nareas = b.get_value(qualifier='areas', dataset='mesh01', time=t0, component='primary', unit='m^2')\nldint = b.get_value(qualifier='ldint', component='primary', time=t0)\nptfarea = b.get_value(qualifier='ptfarea', component='primary', time=t0)\n\nabs_normal_intensities = b.get_value(qualifier='abs_normal_intensities', dataset='lc01', time=t0, component='primary')\nnormal_intensities = b.get_value(qualifier='normal_intensities', dataset='lc01', time=t0, component='primary')", "'abs_normal_intensities' are the intensities per triangle in absolute units, i.e. W/m^3.", "print(np.median(abs_normal_intensities))", "The values of 'normal_intensities', however, are significantly samller (in this case). These are the intensities in relative units which will eventually be integrated to give us flux for a light curve.", "print(np.median(normal_intensities))", "'normal_intensities' are scaled from 'abs_normal_intensities' so that the computed luminosity matches the prescribed luminosity (pblum).\nHere we compute the luminosity by summing over each triangle's intensity in the normal direction, and multiply it by pi to account for blackbody intensity emitted in all directions in the solid angle, and by the area of that triangle.", "pblum = b.get_value(qualifier='pblum', component='primary', context='dataset')\nprint(np.sum(normal_intensities * ldint * np.pi * areas) * ptfarea, pblum)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ES-DOC/esdoc-jupyterhub
notebooks/cccma/cmip6/models/sandbox-2/ocean.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Ocean\nMIP Era: CMIP6\nInstitute: CCCMA\nSource ID: SANDBOX-2\nTopic: Ocean\nSub-Topics: Timestepping Framework, Advection, Lateral Physics, Vertical Physics, Uplow Boundaries, Boundary Forcing. \nProperties: 133 (101 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:53:46\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'cccma', 'sandbox-2', 'ocean')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --&gt; Seawater Properties\n3. Key Properties --&gt; Bathymetry\n4. Key Properties --&gt; Nonoceanic Waters\n5. Key Properties --&gt; Software Properties\n6. Key Properties --&gt; Resolution\n7. Key Properties --&gt; Tuning Applied\n8. Key Properties --&gt; Conservation\n9. Grid\n10. Grid --&gt; Discretisation --&gt; Vertical\n11. Grid --&gt; Discretisation --&gt; Horizontal\n12. Timestepping Framework\n13. Timestepping Framework --&gt; Tracers\n14. Timestepping Framework --&gt; Baroclinic Dynamics\n15. Timestepping Framework --&gt; Barotropic\n16. Timestepping Framework --&gt; Vertical Physics\n17. Advection\n18. Advection --&gt; Momentum\n19. Advection --&gt; Lateral Tracers\n20. Advection --&gt; Vertical Tracers\n21. Lateral Physics\n22. Lateral Physics --&gt; Momentum --&gt; Operator\n23. Lateral Physics --&gt; Momentum --&gt; Eddy Viscosity Coeff\n24. Lateral Physics --&gt; Tracers\n25. Lateral Physics --&gt; Tracers --&gt; Operator\n26. Lateral Physics --&gt; Tracers --&gt; Eddy Diffusity Coeff\n27. Lateral Physics --&gt; Tracers --&gt; Eddy Induced Velocity\n28. Vertical Physics\n29. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Details\n30. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Tracers\n31. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Momentum\n32. Vertical Physics --&gt; Interior Mixing --&gt; Details\n33. Vertical Physics --&gt; Interior Mixing --&gt; Tracers\n34. Vertical Physics --&gt; Interior Mixing --&gt; Momentum\n35. Uplow Boundaries --&gt; Free Surface\n36. Uplow Boundaries --&gt; Bottom Boundary Layer\n37. Boundary Forcing\n38. Boundary Forcing --&gt; Momentum --&gt; Bottom Friction\n39. Boundary Forcing --&gt; Momentum --&gt; Lateral Friction\n40. Boundary Forcing --&gt; Tracers --&gt; Sunlight Penetration\n41. Boundary Forcing --&gt; Tracers --&gt; Fresh Water Forcing \n1. Key Properties\nOcean key properties\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of ocean model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of ocean model code (NEMO 3.6, MOM 5.0,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Model Family\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of ocean model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.model_family') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OGCM\" \n# \"slab ocean\" \n# \"mixed layer ocean\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.4. Basic Approximations\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nBasic approximations made in the ocean.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.basic_approximations') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Primitive equations\" \n# \"Non-hydrostatic\" \n# \"Boussinesq\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.5. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of prognostic variables in the ocean component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Potential temperature\" \n# \"Conservative temperature\" \n# \"Salinity\" \n# \"U-velocity\" \n# \"V-velocity\" \n# \"W-velocity\" \n# \"SSH\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Seawater Properties\nPhysical properties of seawater in ocean\n2.1. Eos Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of EOS for sea water", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Linear\" \n# \"Wright, 1997\" \n# \"Mc Dougall et al.\" \n# \"Jackett et al. 2006\" \n# \"TEOS 2010\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "2.2. Eos Functional Temp\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTemperature used in EOS for sea water", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Potential temperature\" \n# \"Conservative temperature\" \n# TODO - please enter value(s)\n", "2.3. Eos Functional Salt\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSalinity used in EOS for sea water", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Practical salinity Sp\" \n# \"Absolute salinity Sa\" \n# TODO - please enter value(s)\n", "2.4. Eos Functional Depth\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDepth or pressure used in EOS for sea water ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pressure (dbars)\" \n# \"Depth (meters)\" \n# TODO - please enter value(s)\n", "2.5. Ocean Freezing Point\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEquation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"TEOS 2010\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "2.6. Ocean Specific Heat\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecific heat in ocean (cpocean) in J/(kg K)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "2.7. Ocean Reference Density\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBoussinesq reference density (rhozero) in kg / m3", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Bathymetry\nProperties of bathymetry in ocean\n3.1. Reference Dates\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nReference date of bathymetry", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Present day\" \n# \"21000 years BP\" \n# \"6000 years BP\" \n# \"LGM\" \n# \"Pliocene\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3.2. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the bathymetry fixed in time in the ocean ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "3.3. Ocean Smoothing\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe any smoothing or hand editing of bathymetry in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3.4. Source\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe source of bathymetry in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.source') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Nonoceanic Waters\nNon oceanic waters treatement in ocean\n4.1. Isolated Seas\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how isolated seas is performed", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. River Mouth\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how river mouth mixing or estuaries specific treatment is performed", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Software Properties\nSoftware properties of ocean code\n5.1. Repository\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nLocation of code for this component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Code Version\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCode version identifier.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.3. Code Languages\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nCode language(s).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Resolution\nResolution in the ocean grid\n6.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.2. Canonical Horizontal Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.3. Range Horizontal Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRange of horizontal resolution with spatial details, eg. 50(Equator)-100km or 0.1-0.5 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.4. Number Of Horizontal Gridpoints\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "6.5. Number Of Vertical Levels\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of vertical levels resolved on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "6.6. Is Adaptive Grid\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDefault is False. Set true if grid resolution changes during execution.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.7. Thickness Level 1\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThickness of first surface ocean level (in meters)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "7. Key Properties --&gt; Tuning Applied\nTuning methodology for ocean component\n7.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. Global Mean Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList set of metrics of the global mean state used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.3. Regional Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of regional metrics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.4. Trend Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList observed trend metrics used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8. Key Properties --&gt; Conservation\nConservation in the ocean component\n8.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBrief description of conservation methodology", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nProperties conserved in the ocean by the numerical schemes", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.scheme') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Energy\" \n# \"Enstrophy\" \n# \"Salt\" \n# \"Volume of ocean\" \n# \"Momentum\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.3. Consistency Properties\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAny additional consistency properties (energy conversion, pressure gradient discretisation, ...)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.4. Corrected Conserved Prognostic Variables\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSet of variables which are conserved by more than the numerical scheme alone.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.5. Was Flux Correction Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDoes conservation involve flux correction ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "9. Grid\nOcean grid\n9.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of grid in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Grid --&gt; Discretisation --&gt; Vertical\nProperties of vertical discretisation in ocean\n10.1. Coordinates\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of vertical coordinates in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Z-coordinate\" \n# \"Z*-coordinate\" \n# \"S-coordinate\" \n# \"Isopycnic - sigma 0\" \n# \"Isopycnic - sigma 2\" \n# \"Isopycnic - sigma 4\" \n# \"Isopycnic - other\" \n# \"Hybrid / Z+S\" \n# \"Hybrid / Z+isopycnic\" \n# \"Hybrid / other\" \n# \"Pressure referenced (P)\" \n# \"P*\" \n# \"Z**\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.2. Partial Steps\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nUsing partial steps with Z or Z vertical coordinate in ocean ?*", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "11. Grid --&gt; Discretisation --&gt; Horizontal\nType of horizontal discretisation scheme in ocean\n11.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHorizontal grid type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Lat-lon\" \n# \"Rotated north pole\" \n# \"Two north poles (ORCA-style)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "11.2. Staggering\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nHorizontal grid staggering type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Arakawa B-grid\" \n# \"Arakawa C-grid\" \n# \"Arakawa E-grid\" \n# \"N/a\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "11.3. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHorizontal discretisation scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Finite difference\" \n# \"Finite volumes\" \n# \"Finite elements\" \n# \"Unstructured grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12. Timestepping Framework\nOcean Timestepping Framework\n12.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of time stepping in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12.2. Diurnal Cycle\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDiurnal cycle type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Via coupling\" \n# \"Specific treatment\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13. Timestepping Framework --&gt; Tracers\nProperties of tracers time stepping in ocean\n13.1. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTracers time stepping scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Leap-frog + Asselin filter\" \n# \"Leap-frog + Periodic Euler\" \n# \"Predictor-corrector\" \n# \"Runge-Kutta 2\" \n# \"AM3-LF\" \n# \"Forward-backward\" \n# \"Forward operator\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.2. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTracers time step (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14. Timestepping Framework --&gt; Baroclinic Dynamics\nBaroclinic dynamics in ocean\n14.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBaroclinic dynamics type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Preconditioned conjugate gradient\" \n# \"Sub cyling\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBaroclinic dynamics scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Leap-frog + Asselin filter\" \n# \"Leap-frog + Periodic Euler\" \n# \"Predictor-corrector\" \n# \"Runge-Kutta 2\" \n# \"AM3-LF\" \n# \"Forward-backward\" \n# \"Forward operator\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.3. Time Step\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nBaroclinic time step (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15. Timestepping Framework --&gt; Barotropic\nBarotropic time stepping in ocean\n15.1. Splitting\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime splitting method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"split explicit\" \n# \"implicit\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.2. Time Step\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nBarotropic time step (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "16. Timestepping Framework --&gt; Vertical Physics\nVertical physics time stepping in ocean\n16.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDetails of vertical time stepping in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17. Advection\nOcean advection\n17.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of advection in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18. Advection --&gt; Momentum\nProperties of lateral momemtum advection scheme in ocean\n18.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of lateral momemtum advection scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.momentum.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Flux form\" \n# \"Vector form\" \n# TODO - please enter value(s)\n", "18.2. Scheme Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of ocean momemtum advection scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.momentum.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18.3. ALE\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nUsing ALE for vertical advection ? (if vertical coordinates are sigma)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.momentum.ALE') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "19. Advection --&gt; Lateral Tracers\nProperties of lateral tracer advection scheme in ocean\n19.1. Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOrder of lateral tracer advection scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "19.2. Flux Limiter\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMonotonic flux limiter for lateral tracer advection scheme in ocean ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "19.3. Effective Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEffective order of limited lateral tracer advection scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "19.4. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescriptive text for lateral tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19.5. Passive Tracers\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nPassive tracers advected", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ideal age\" \n# \"CFC 11\" \n# \"CFC 12\" \n# \"SF6\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "19.6. Passive Tracers Advection\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIs advection of passive tracers different than active ? if so, describe.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "20. Advection --&gt; Vertical Tracers\nProperties of vertical tracer advection scheme in ocean\n20.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescriptive text for vertical tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.vertical_tracers.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "20.2. Flux Limiter\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMonotonic flux limiter for vertical tracer advection scheme in ocean ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "21. Lateral Physics\nOcean lateral physics\n21.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of lateral physics in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "21.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of transient eddy representation in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Eddy active\" \n# \"Eddy admitting\" \n# TODO - please enter value(s)\n", "22. Lateral Physics --&gt; Momentum --&gt; Operator\nProperties of lateral physics operator for momentum in ocean\n22.1. Direction\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDirection of lateral physics momemtum scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Horizontal\" \n# \"Isopycnal\" \n# \"Isoneutral\" \n# \"Geopotential\" \n# \"Iso-level\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22.2. Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOrder of lateral physics momemtum scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Harmonic\" \n# \"Bi-harmonic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22.3. Discretisation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDiscretisation of lateral physics momemtum scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Second order\" \n# \"Higher order\" \n# \"Flux limiter\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23. Lateral Physics --&gt; Momentum --&gt; Eddy Viscosity Coeff\nProperties of eddy viscosity coeff in lateral physics momemtum scheme in the ocean\n23.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nLateral physics momemtum eddy viscosity coeff type in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Space varying\" \n# \"Time + space varying (Smagorinsky)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.2. Constant Coefficient\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant, value of eddy viscosity coeff in lateral physics momemtum scheme (in m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "23.3. Variable Coefficient\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf space-varying, describe variations of eddy viscosity coeff in lateral physics momemtum scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "23.4. Coeff Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe background eddy viscosity coeff in lateral physics momemtum scheme (give values in m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "23.5. Coeff Backscatter\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there backscatter in eddy viscosity coeff in lateral physics momemtum scheme ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "24. Lateral Physics --&gt; Tracers\nProperties of lateral physics for tracers in ocean\n24.1. Mesoscale Closure\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there a mesoscale closure in the lateral physics tracers scheme ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "24.2. Submesoscale Mixing\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there a submesoscale mixing parameterisation (i.e Fox-Kemper) in the lateral physics tracers scheme ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "25. Lateral Physics --&gt; Tracers --&gt; Operator\nProperties of lateral physics operator for tracers in ocean\n25.1. Direction\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDirection of lateral physics tracers scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Horizontal\" \n# \"Isopycnal\" \n# \"Isoneutral\" \n# \"Geopotential\" \n# \"Iso-level\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.2. Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOrder of lateral physics tracers scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Harmonic\" \n# \"Bi-harmonic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.3. Discretisation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDiscretisation of lateral physics tracers scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Second order\" \n# \"Higher order\" \n# \"Flux limiter\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "26. Lateral Physics --&gt; Tracers --&gt; Eddy Diffusity Coeff\nProperties of eddy diffusity coeff in lateral physics tracers scheme in the ocean\n26.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nLateral physics tracers eddy diffusity coeff type in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Space varying\" \n# \"Time + space varying (Smagorinsky)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "26.2. Constant Coefficient\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant, value of eddy diffusity coeff in lateral physics tracers scheme (in m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "26.3. Variable Coefficient\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf space-varying, describe variations of eddy diffusity coeff in lateral physics tracers scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "26.4. Coeff Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe background eddy diffusity coeff in lateral physics tracers scheme (give values in m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "26.5. Coeff Backscatter\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there backscatter in eddy diffusity coeff in lateral physics tracers scheme ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "27. Lateral Physics --&gt; Tracers --&gt; Eddy Induced Velocity\nProperties of eddy induced velocity (EIV) in lateral physics tracers scheme in the ocean\n27.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of EIV in lateral physics tracers in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"GM\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "27.2. Constant Val\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf EIV scheme for tracers is constant, specify coefficient value (M2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "27.3. Flux Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of EIV flux (advective or skew)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "27.4. Added Diffusivity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of EIV added diffusivity (constant, flow dependent or none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "28. Vertical Physics\nOcean Vertical Physics\n28.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of vertical physics in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "29. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Details\nProperties of vertical physics in ocean\n29.1. Langmuir Cells Mixing\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there Langmuir cells mixing in upper ocean ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "30. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Tracers\n*Properties of boundary layer (BL) mixing on tracers in the ocean *\n30.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of boundary layer mixing for tracers in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure - TKE\" \n# \"Turbulent closure - KPP\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Turbulent closure - Bulk Mixed Layer\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "30.2. Closure Order\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf turbulent BL mixing of tracers, specific order of closure (0, 1, 2.5, 3)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "30.3. Constant\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant BL mixing of tracers, specific coefficient (m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "30.4. Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBackground BL mixing of tracers coefficient, (schema and value in m2/s - may by none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "31. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Momentum\n*Properties of boundary layer (BL) mixing on momentum in the ocean *\n31.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of boundary layer mixing for momentum in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure - TKE\" \n# \"Turbulent closure - KPP\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Turbulent closure - Bulk Mixed Layer\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "31.2. Closure Order\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf turbulent BL mixing of momentum, specific order of closure (0, 1, 2.5, 3)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "31.3. Constant\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant BL mixing of momentum, specific coefficient (m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "31.4. Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBackground BL mixing of momentum coefficient, (schema and value in m2/s - may by none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "32. Vertical Physics --&gt; Interior Mixing --&gt; Details\n*Properties of interior mixing in the ocean *\n32.1. Convection Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of vertical convection in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Non-penetrative convective adjustment\" \n# \"Enhanced vertical diffusion\" \n# \"Included in turbulence closure\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "32.2. Tide Induced Mixing\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how tide induced mixing is modelled (barotropic, baroclinic, none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "32.3. Double Diffusion\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there double diffusion", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "32.4. Shear Mixing\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there interior shear mixing", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "33. Vertical Physics --&gt; Interior Mixing --&gt; Tracers\n*Properties of interior mixing on tracers in the ocean *\n33.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of interior mixing for tracers in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure / TKE\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "33.2. Constant\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant interior mixing of tracers, specific coefficient (m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "33.3. Profile\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the background interior mixing using a vertical profile for tracers (i.e is NOT constant) ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "33.4. Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBackground interior mixing of tracers coefficient, (schema and value in m2/s - may by none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "34. Vertical Physics --&gt; Interior Mixing --&gt; Momentum\n*Properties of interior mixing on momentum in the ocean *\n34.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of interior mixing for momentum in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure / TKE\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "34.2. Constant\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant interior mixing of momentum, specific coefficient (m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "34.3. Profile\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the background interior mixing using a vertical profile for momentum (i.e is NOT constant) ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "34.4. Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBackground interior mixing of momentum coefficient, (schema and value in m2/s - may by none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "35. Uplow Boundaries --&gt; Free Surface\nProperties of free surface in ocean\n35.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of free surface in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "35.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nFree surface scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Linear implicit\" \n# \"Linear filtered\" \n# \"Linear semi-explicit\" \n# \"Non-linear implicit\" \n# \"Non-linear filtered\" \n# \"Non-linear semi-explicit\" \n# \"Fully explicit\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "35.3. Embeded Seaice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the sea-ice embeded in the ocean model (instead of levitating) ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "36. Uplow Boundaries --&gt; Bottom Boundary Layer\nProperties of bottom boundary layer in ocean\n36.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of bottom boundary layer in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "36.2. Type Of Bbl\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of bottom boundary layer in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Diffusive\" \n# \"Acvective\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "36.3. Lateral Mixing Coef\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf bottom BL is diffusive, specify value of lateral mixing coefficient (in m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "36.4. Sill Overflow\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe any specific treatment of sill overflows", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37. Boundary Forcing\nOcean boundary forcing\n37.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of boundary forcing in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.2. Surface Pressure\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how surface pressure is transmitted to ocean (via sea-ice, nothing specific,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.3. Momentum Flux Correction\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe any type of ocean surface momentum flux correction and, if applicable, how it is applied and where.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.4. Tracers Flux Correction\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe any type of ocean surface tracers flux correction and, if applicable, how it is applied and where.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.5. Wave Effects\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how wave effects are modelled at ocean surface.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.wave_effects') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.6. River Runoff Budget\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how river runoff from land surface is routed to ocean and any global adjustment done.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.7. Geothermal Heating\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how geothermal heating is present at ocean bottom.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "38. Boundary Forcing --&gt; Momentum --&gt; Bottom Friction\nProperties of momentum bottom friction in ocean\n38.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of momentum bottom friction in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Linear\" \n# \"Non-linear\" \n# \"Non-linear (drag function of speed of tides)\" \n# \"Constant drag coefficient\" \n# \"None\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "39. Boundary Forcing --&gt; Momentum --&gt; Lateral Friction\nProperties of momentum lateral friction in ocean\n39.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of momentum lateral friction in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Free-slip\" \n# \"No-slip\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "40. Boundary Forcing --&gt; Tracers --&gt; Sunlight Penetration\nProperties of sunlight penetration scheme in ocean\n40.1. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of sunlight penetration scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"1 extinction depth\" \n# \"2 extinction depth\" \n# \"3 extinction depth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "40.2. Ocean Colour\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the ocean sunlight penetration scheme ocean colour dependent ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "40.3. Extinction Depth\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe and list extinctions depths for sunlight penetration scheme (if applicable).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "41. Boundary Forcing --&gt; Tracers --&gt; Fresh Water Forcing\nProperties of surface fresh water forcing in ocean\n41.1. From Atmopshere\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of surface fresh water forcing from atmos in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Freshwater flux\" \n# \"Virtual salt flux\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "41.2. From Sea Ice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of surface fresh water forcing from sea-ice in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Freshwater flux\" \n# \"Virtual salt flux\" \n# \"Real salt flux\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "41.3. Forced Mode Restoring\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of surface salinity restoring in forced mode (OMIP)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
abulbasar/machine-learning
Scikit - 21 Kaggle House price prediction (regression).ipynb
apache-2.0
[ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.linear_model import LinearRegression, LassoCV, RidgeCV, Lasso\nfrom sklearn.metrics import mean_squared_error\n\nimport xgboost as xgb\nimport seaborn as sns\n\npd.set_option('display.max_columns', None)\n\n%matplotlib inline\n\ndf = pd.read_csv(\"data/kaggle-house-prices/data_combined_cleaned.csv\")\ndf.info()\n\ndf.head(10)\n\ndf_dummy = pd.get_dummies(df, drop_first=True)\ndf_dummy.info()\n\ndf_training = df_dummy[~np.isnan(df.SalesPrice)]\ndf_testing = df_dummy[np.isnan(df.SalesPrice)]\ndf_training.shape, df_testing.shape\n\nplt.subplot(1, 2, 1)\ndf_training.SalesPrice.hist(bins = 100)\nplt.subplot(1, 2, 2)\ndf_training.SalesPrice.plot.box()\nplt.tight_layout()\n\ny = np.log(df_training.SalesPrice.values)\ndf_tmp = df_training.copy()\ndel df_tmp[\"SalesPrice\"]\ndel df_tmp[\"Id\"]\nX = df_tmp.values\n\ndf_tmp.head(4)\n\nplt.subplot(1, 2, 1)\npd.Series(y).plot.hist(bins = 100)\nplt.subplot(1, 2, 2)\npd.Series(y).plot.box()\nplt.tight_layout()\n\npd.DataFrame(X).describe()\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 1)\n\nscaler = StandardScaler()\nX_train_std = scaler.fit_transform(X_train)\nX_test_std = scaler.fit_transform(X_test)\n\ndef rmse(y_true, y_pred):\n return np.sqrt(mean_squared_error(y_true, y_pred))\n\nlr = LinearRegression()\nlr.fit(X_train_std, y_train)\nrmse(y_test, lr.predict(X_test_std))", "Seems that Linear regression model performed very poorly. Most likely it is because model finds a lot of collinearity in the data due to the categorical columns.\nTest lasso, which is more robust against multi collinearity.", "lasso = Lasso(random_state=1, max_iter=10000)\nlasso.fit(X_train_std, y_train)\nrmse(y_test, lasso.predict(X_test_std))", "This rmse score seems reasonable. Find cross validation scores.", "scores = cross_val_score(cv=10, estimator = lasso, scoring=\"neg_mean_squared_error\", X=X_train_std, y = y_train)\nscores = np.sqrt(-scores)\nscores\n\nfrom sklearn import linear_model\nfrom sklearn import metrics\nfrom sklearn import tree\nfrom sklearn import ensemble\nfrom sklearn import neighbors\nimport xgboost as xgb \n\nrs = 1\nestimatores = { \n #'Linear': linear_model.LinearRegression(), \n 'Ridge': linear_model.Ridge(random_state=rs, max_iter=10000), \n 'Lasso': linear_model.Lasso(random_state=rs, max_iter=10000), \n 'ElasticNet': linear_model.ElasticNet(random_state=rs, max_iter=10000),\n 'BayesRidge': linear_model.BayesianRidge(),\n 'OMP': linear_model.OrthogonalMatchingPursuit(),\n 'DecisionTree': tree.DecisionTreeRegressor(max_depth=10, random_state=rs),\n 'RandomForest': ensemble.RandomForestRegressor(random_state=rs),\n 'KNN': neighbors.KNeighborsRegressor(n_neighbors=5),\n 'GradientBoostingRegressor': ensemble.GradientBoostingRegressor(n_estimators=300, max_depth=4, learning_rate=0.01, loss=\"ls\", random_state=rs),\n 'xgboost': xgb.XGBRegressor(max_depth=10)\n}\n\n\n\nerrvals = {}\n\nfor k in estimatores:\n e = estimatores[k]\n e.fit(X_train_std, y_train)\n err = np.sqrt(metrics.mean_squared_error(y_test, e.predict(X_test_std)))\n errvals[k] = err\n\nresult = pd.Series.from_array(errvals).sort_values()\nresult.plot.barh(width = 0.8)\nfor y, error in enumerate(result):\n plt.text(x = 0.01, y = y - 0.1, s = \"%.3f\" % error, fontweight='bold', color = \"white\")\nplt.title(\"Performance comparison of algorithms\")" ]
[ "code", "markdown", "code", "markdown", "code" ]
mne-tools/mne-tools.github.io
0.20/_downloads/da7833d7ecb983c452e4eef48095435a/plot_stats_cluster_spatio_temporal_repeated_measures_anova.ipynb
bsd-3-clause
[ "%matplotlib inline", "Repeated measures ANOVA on source data with spatio-temporal clustering\nThis example illustrates how to make use of the clustering functions\nfor arbitrary, self-defined contrasts beyond standard t-tests. In this\ncase we will tests if the differences in evoked responses between\nstimulation modality (visual VS auditory) depend on the stimulus\nlocation (left vs right) for a group of subjects (simulated here\nusing one subject's data). For this purpose we will compute an\ninteraction effect using a repeated measures ANOVA. The multiple\ncomparisons problem is addressed with a cluster-level permutation test\nacross space and time.", "# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>\n# Eric Larson <larson.eric.d@gmail.com>\n# Denis Engemannn <denis.engemann@gmail.com>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport numpy as np\nfrom numpy.random import randn\nimport matplotlib.pyplot as plt\n\nimport mne\nfrom mne.stats import (spatio_temporal_cluster_test, f_threshold_mway_rm,\n f_mway_rm, summarize_clusters_stc)\n\nfrom mne.minimum_norm import apply_inverse, read_inverse_operator\nfrom mne.datasets import sample\n\nprint(__doc__)", "Set parameters", "data_path = sample.data_path()\nraw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nevent_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'\nsubjects_dir = data_path + '/subjects'\nsrc_fname = subjects_dir + '/fsaverage/bem/fsaverage-ico-5-src.fif'\n\ntmin = -0.2\ntmax = 0.3 # Use a lower tmax to reduce multiple comparisons\n\n# Setup for reading the raw data\nraw = mne.io.read_raw_fif(raw_fname)\nevents = mne.read_events(event_fname)", "Read epochs for all channels, removing a bad one", "raw.info['bads'] += ['MEG 2443']\npicks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')\n# we'll load all four conditions that make up the 'two ways' of our ANOVA\n\nevent_id = dict(l_aud=1, r_aud=2, l_vis=3, r_vis=4)\nreject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=(None, 0), reject=reject, preload=True)\n\n# Equalize trial counts to eliminate bias (which would otherwise be\n# introduced by the abs() performed below)\nepochs.equalize_event_counts(event_id)", "Transform to source space", "fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'\nsnr = 3.0\nlambda2 = 1.0 / snr ** 2\nmethod = \"dSPM\" # use dSPM method (could also be MNE, sLORETA, or eLORETA)\ninverse_operator = read_inverse_operator(fname_inv)\n\n# we'll only use one hemisphere to speed up this example\n# instead of a second vertex array we'll pass an empty array\nsample_vertices = [inverse_operator['src'][0]['vertno'], np.array([], int)]\n\n# Let's average and compute inverse, then resample to speed things up\nconditions = []\nfor cond in ['l_aud', 'r_aud', 'l_vis', 'r_vis']: # order is important\n evoked = epochs[cond].average()\n evoked.resample(50, npad='auto')\n condition = apply_inverse(evoked, inverse_operator, lambda2, method)\n # Let's only deal with t > 0, cropping to reduce multiple comparisons\n condition.crop(0, None)\n conditions.append(condition)\n\ntmin = conditions[0].tmin\ntstep = conditions[0].tstep * 1000 # convert to milliseconds", "Transform to common cortical space\nNormally you would read in estimates across several subjects and morph them\nto the same cortical space (e.g. fsaverage). For example purposes, we will\nsimulate this by just having each \"subject\" have the same response (just\nnoisy in source space) here.\nWe'll only consider the left hemisphere in this tutorial.", "n_vertices_sample, n_times = conditions[0].lh_data.shape\nn_subjects = 7\nprint('Simulating data for %d subjects.' % n_subjects)\n\n# Let's make sure our results replicate, so set the seed.\nnp.random.seed(0)\nX = randn(n_vertices_sample, n_times, n_subjects, 4) * 10\nfor ii, condition in enumerate(conditions):\n X[:, :, :, ii] += condition.lh_data[:, :, np.newaxis]", "It's a good idea to spatially smooth the data, and for visualization\npurposes, let's morph these to fsaverage, which is a grade 5 ICO source space\nwith vertices 0:10242 for each hemisphere. Usually you'd have to morph\neach subject's data separately, but here since all estimates are on\n'sample' we can use one morph matrix for all the heavy lifting.", "# Read the source space we are morphing to (just left hemisphere)\nsrc = mne.read_source_spaces(src_fname)\nfsave_vertices = [src[0]['vertno'], []]\nmorph_mat = mne.compute_source_morph(\n src=inverse_operator['src'], subject_to='fsaverage',\n spacing=fsave_vertices, subjects_dir=subjects_dir, smooth=20).morph_mat\nmorph_mat = morph_mat[:, :n_vertices_sample] # just left hemi from src\nn_vertices_fsave = morph_mat.shape[0]\n\n# We have to change the shape for the dot() to work properly\nX = X.reshape(n_vertices_sample, n_times * n_subjects * 4)\nprint('Morphing data.')\nX = morph_mat.dot(X) # morph_mat is a sparse matrix\nX = X.reshape(n_vertices_fsave, n_times, n_subjects, 4)", "Now we need to prepare the group matrix for the ANOVA statistic. To make the\nclustering function work correctly with the ANOVA function X needs to be a\nlist of multi-dimensional arrays (one per condition) of shape: samples\n(subjects) x time x space.\nFirst we permute dimensions, then split the array into a list of conditions\nand discard the empty dimension resulting from the split using numpy squeeze.", "X = np.transpose(X, [2, 1, 0, 3]) #\nX = [np.squeeze(x) for x in np.split(X, 4, axis=-1)]", "Prepare function for arbitrary contrast\nAs our ANOVA function is a multi-purpose tool we need to apply a few\nmodifications to integrate it with the clustering function. This\nincludes reshaping data, setting default arguments and processing\nthe return values. For this reason we'll write a tiny dummy function.\nWe will tell the ANOVA how to interpret the data matrix in terms of\nfactors. This is done via the factor levels argument which is a list\nof the number factor levels for each factor.", "factor_levels = [2, 2]", "Finally we will pick the interaction effect by passing 'A:B'.\n(this notation is borrowed from the R formula language).\nAs an aside, note that in this particular example, we cannot use the A*B\nnotation which return both the main and the interaction effect. The reason\nis that the clustering function expects stat_fun to return a 1-D array.\nTo get clusters for both, you must create a loop.", "effects = 'A:B'\n# Tell the ANOVA not to compute p-values which we don't need for clustering\nreturn_pvals = False\n\n# a few more convenient bindings\nn_times = X[0].shape[1]\nn_conditions = 4", "A stat_fun must deal with a variable number of input arguments.\nInside the clustering function each condition will be passed as flattened\narray, necessitated by the clustering procedure. The ANOVA however expects an\ninput array of dimensions: subjects X conditions X observations (optional).\nThe following function catches the list input and swaps the first and the\nsecond dimension, and finally calls ANOVA.\n<div class=\"alert alert-info\"><h4>Note</h4><p>For further details on this ANOVA function consider the\n corresponding\n `time-frequency tutorial <tut-timefreq-twoway-anova>`.</p></div>", "def stat_fun(*args):\n # get f-values only.\n return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,\n effects=effects, return_pvals=return_pvals)[0]", "Compute clustering statistic\nTo use an algorithm optimized for spatio-temporal clustering, we\njust pass the spatial connectivity matrix (instead of spatio-temporal).", "# as we only have one hemisphere we need only need half the connectivity\nprint('Computing connectivity.')\nconnectivity = mne.spatial_src_connectivity(src[:1])\n\n# Now let's actually do the clustering. Please relax, on a small\n# notebook and one single thread only this will take a couple of minutes ...\npthresh = 0.0005\nf_thresh = f_threshold_mway_rm(n_subjects, factor_levels, effects, pthresh)\n\n# To speed things up a bit we will ...\nn_permutations = 128 # ... run fewer permutations (reduces sensitivity)\n\nprint('Clustering.')\nT_obs, clusters, cluster_p_values, H0 = clu = \\\n spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=1,\n threshold=f_thresh, stat_fun=stat_fun,\n n_permutations=n_permutations,\n buffer_size=None)\n# Now select the clusters that are sig. at p < 0.05 (note that this value\n# is multiple-comparisons corrected).\ngood_cluster_inds = np.where(cluster_p_values < 0.05)[0]", "Visualize the clusters", "print('Visualizing clusters.')\n\n# Now let's build a convenient representation of each cluster, where each\n# cluster becomes a \"time point\" in the SourceEstimate\nstc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,\n vertices=fsave_vertices,\n subject='fsaverage')\n\n# Let's actually plot the first \"time point\" in the SourceEstimate, which\n# shows all the clusters, weighted by duration\n\nsubjects_dir = op.join(data_path, 'subjects')\n# The brighter the color, the stronger the interaction between\n# stimulus modality and stimulus location\n\nbrain = stc_all_cluster_vis.plot(subjects_dir=subjects_dir, views='lat',\n time_label='temporal extent (ms)',\n clim=dict(kind='value', lims=[0, 1, 40]))\nbrain.save_image('cluster-lh.png')\nbrain.show_view('medial')", "Finally, let's investigate interaction effect by reconstructing the time\ncourses:", "inds_t, inds_v = [(clusters[cluster_ind]) for ii, cluster_ind in\n enumerate(good_cluster_inds)][0] # first cluster\n\ntimes = np.arange(X[0].shape[1]) * tstep * 1e3\n\nplt.figure()\ncolors = ['y', 'b', 'g', 'purple']\nevent_ids = ['l_aud', 'r_aud', 'l_vis', 'r_vis']\n\nfor ii, (condition, color, eve_id) in enumerate(zip(X, colors, event_ids)):\n # extract time course at cluster vertices\n condition = condition[:, :, inds_v]\n # normally we would normalize values across subjects but\n # here we use data from the same subject so we're good to just\n # create average time series across subjects and vertices.\n mean_tc = condition.mean(axis=2).mean(axis=0)\n std_tc = condition.std(axis=2).std(axis=0)\n plt.plot(times, mean_tc.T, color=color, label=eve_id)\n plt.fill_between(times, mean_tc + std_tc, mean_tc - std_tc, color='gray',\n alpha=0.5, label='')\n\nymin, ymax = mean_tc.min() - 5, mean_tc.max() + 5\nplt.xlabel('Time (ms)')\nplt.ylabel('Activation (F-values)')\nplt.xlim(times[[0, -1]])\nplt.ylim(ymin, ymax)\nplt.fill_betweenx((ymin, ymax), times[inds_t[0]],\n times[inds_t[-1]], color='orange', alpha=0.3)\nplt.legend()\nplt.title('Interaction between stimulus-modality and location.')\nplt.show()" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ES-DOC/esdoc-jupyterhub
notebooks/nasa-giss/cmip6/models/giss-e2-1h/toplevel.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Toplevel\nMIP Era: CMIP6\nInstitute: NASA-GISS\nSource ID: GISS-E2-1H\nSub-Topics: Radiative Forcings. \nProperties: 85 (42 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:20\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'nasa-giss', 'giss-e2-1h', 'toplevel')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --&gt; Flux Correction\n3. Key Properties --&gt; Genealogy\n4. Key Properties --&gt; Software Properties\n5. Key Properties --&gt; Coupling\n6. Key Properties --&gt; Tuning Applied\n7. Key Properties --&gt; Conservation --&gt; Heat\n8. Key Properties --&gt; Conservation --&gt; Fresh Water\n9. Key Properties --&gt; Conservation --&gt; Salt\n10. Key Properties --&gt; Conservation --&gt; Momentum\n11. Radiative Forcings\n12. Radiative Forcings --&gt; Greenhouse Gases --&gt; CO2\n13. Radiative Forcings --&gt; Greenhouse Gases --&gt; CH4\n14. Radiative Forcings --&gt; Greenhouse Gases --&gt; N2O\n15. Radiative Forcings --&gt; Greenhouse Gases --&gt; Tropospheric O3\n16. Radiative Forcings --&gt; Greenhouse Gases --&gt; Stratospheric O3\n17. Radiative Forcings --&gt; Greenhouse Gases --&gt; CFC\n18. Radiative Forcings --&gt; Aerosols --&gt; SO4\n19. Radiative Forcings --&gt; Aerosols --&gt; Black Carbon\n20. Radiative Forcings --&gt; Aerosols --&gt; Organic Carbon\n21. Radiative Forcings --&gt; Aerosols --&gt; Nitrate\n22. Radiative Forcings --&gt; Aerosols --&gt; Cloud Albedo Effect\n23. Radiative Forcings --&gt; Aerosols --&gt; Cloud Lifetime Effect\n24. Radiative Forcings --&gt; Aerosols --&gt; Dust\n25. Radiative Forcings --&gt; Aerosols --&gt; Tropospheric Volcanic\n26. Radiative Forcings --&gt; Aerosols --&gt; Stratospheric Volcanic\n27. Radiative Forcings --&gt; Aerosols --&gt; Sea Salt\n28. Radiative Forcings --&gt; Other --&gt; Land Use\n29. Radiative Forcings --&gt; Other --&gt; Solar \n1. Key Properties\nKey properties of the model\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTop level overview of coupled model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of coupled model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Flux Correction\nFlux correction properties of the model\n2.1. Details\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how flux corrections are applied in the model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.flux_correction.details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Genealogy\nGenealogy and history of the model\n3.1. Year Released\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nYear the model was released", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3.2. CMIP3 Parent\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCMIP3 parent if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3.3. CMIP5 Parent\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCMIP5 parent if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3.4. Previous Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nPreviously known as", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Software Properties\nSoftware properties of model\n4.1. Repository\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nLocation of code for this component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Code Version\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCode version identifier.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Code Languages\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nCode language(s).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.4. Components Structure\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe how model realms are structured into independent software components (coupled via a coupler) and internal software components.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.5. Coupler\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nOverarching coupling framework for model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OASIS\" \n# \"OASIS3-MCT\" \n# \"ESMF\" \n# \"NUOPC\" \n# \"Bespoke\" \n# \"Unknown\" \n# \"None\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Coupling\n**\n5.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of coupling in the model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Atmosphere Double Flux\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the atmosphere passing a double flux to the ocean and sea ice (as opposed to a single one)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "5.3. Atmosphere Fluxes Calculation Grid\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nWhere are the air-sea fluxes calculated", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Atmosphere grid\" \n# \"Ocean grid\" \n# \"Specific coupler grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "5.4. Atmosphere Relative Winds\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre relative or absolute winds used to compute the flux? I.e. do ocean surface currents enter the wind stress calculation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Tuning Applied\nTuning methodology for model\n6.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics/diagnostics retained. Document the relative weight given to climate performance metrics/diagnostics versus process oriented metrics/diagnostics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.2. Global Mean Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList set of metrics/diagnostics of the global mean state used in tuning model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.3. Regional Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of regional metrics/diagnostics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.4. Trend Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList observed trend metrics/diagnostics used in tuning model/component (such as 20th century)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.5. Energy Balance\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how energy balance was obtained in the full system: in the various components independently or at the components coupling stage?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.6. Fresh Water Balance\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how fresh_water balance was obtained in the full system: in the various components independently or at the components coupling stage?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7. Key Properties --&gt; Conservation --&gt; Heat\nGlobal heat convervation properties of the model\n7.1. Global\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how heat is conserved globally", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. Atmos Ocean Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how heat is conserved at the atmosphere/ocean coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.3. Atmos Land Interface\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how heat is conserved at the atmosphere/land coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.4. Atmos Sea-ice Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how heat is conserved at the atmosphere/sea-ice coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.5. Ocean Seaice Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how heat is conserved at the ocean/sea-ice coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.6. Land Ocean Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how heat is conserved at the land/ocean coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8. Key Properties --&gt; Conservation --&gt; Fresh Water\nGlobal fresh water convervation properties of the model\n8.1. Global\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how fresh_water is conserved globally", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Atmos Ocean Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how fresh_water is conserved at the atmosphere/ocean coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.3. Atmos Land Interface\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how fresh water is conserved at the atmosphere/land coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.4. Atmos Sea-ice Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how fresh water is conserved at the atmosphere/sea-ice coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.5. Ocean Seaice Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how fresh water is conserved at the ocean/sea-ice coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.6. Runoff\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe how runoff is distributed and conserved", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.7. Iceberg Calving\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how iceberg calving is modeled and conserved", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.8. Endoreic Basins\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how endoreic basins (no ocean access) are treated", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.9. Snow Accumulation\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe how snow accumulation over land and over sea-ice is treated", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Key Properties --&gt; Conservation --&gt; Salt\nGlobal salt convervation properties of the model\n9.1. Ocean Seaice Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how salt is conserved at the ocean/sea-ice coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Key Properties --&gt; Conservation --&gt; Momentum\nGlobal momentum convervation properties of the model\n10.1. Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how momentum is conserved in the model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11. Radiative Forcings\nRadiative forcings of the model for historical and scenario (aka Table 12.1 IPCC AR5)\n11.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of radiative forcings (GHG and aerosols) implementation in model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12. Radiative Forcings --&gt; Greenhouse Gases --&gt; CO2\nCarbon dioxide forcing\n12.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Radiative Forcings --&gt; Greenhouse Gases --&gt; CH4\nMethane forcing\n13.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14. Radiative Forcings --&gt; Greenhouse Gases --&gt; N2O\nNitrous oxide forcing\n14.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15. Radiative Forcings --&gt; Greenhouse Gases --&gt; Tropospheric O3\nTroposheric ozone forcing\n15.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16. Radiative Forcings --&gt; Greenhouse Gases --&gt; Stratospheric O3\nStratospheric ozone forcing\n16.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17. Radiative Forcings --&gt; Greenhouse Gases --&gt; CFC\nOzone-depleting and non-ozone-depleting fluorinated gases forcing\n17.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.2. Equivalence Concentration\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDetails of any equivalence concentrations used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"Option 1\" \n# \"Option 2\" \n# \"Option 3\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.3. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18. Radiative Forcings --&gt; Aerosols --&gt; SO4\nSO4 aerosol forcing\n18.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "18.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19. Radiative Forcings --&gt; Aerosols --&gt; Black Carbon\nBlack carbon aerosol forcing\n19.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "19.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "20. Radiative Forcings --&gt; Aerosols --&gt; Organic Carbon\nOrganic carbon aerosol forcing\n20.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "21. Radiative Forcings --&gt; Aerosols --&gt; Nitrate\nNitrate forcing\n21.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "21.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22. Radiative Forcings --&gt; Aerosols --&gt; Cloud Albedo Effect\nCloud albedo effect forcing (RFaci)\n22.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22.2. Aerosol Effect On Ice Clouds\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRadiative effects of aerosols on ice clouds are represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "22.3. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "23. Radiative Forcings --&gt; Aerosols --&gt; Cloud Lifetime Effect\nCloud lifetime effect forcing (ERFaci)\n23.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.2. Aerosol Effect On Ice Clouds\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRadiative effects of aerosols on ice clouds are represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "23.3. RFaci From Sulfate Only\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRadiative forcing from aerosol cloud interactions from sulfate aerosol only?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "23.4. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "24. Radiative Forcings --&gt; Aerosols --&gt; Dust\nDust forcing\n24.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "24.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "25. Radiative Forcings --&gt; Aerosols --&gt; Tropospheric Volcanic\nTropospheric volcanic forcing\n25.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.2. Historical Explosive Volcanic Aerosol Implementation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in historical simulations", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.3. Future Explosive Volcanic Aerosol Implementation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in future simulations", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.4. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "26. Radiative Forcings --&gt; Aerosols --&gt; Stratospheric Volcanic\nStratospheric volcanic forcing\n26.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "26.2. Historical Explosive Volcanic Aerosol Implementation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in historical simulations", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "26.3. Future Explosive Volcanic Aerosol Implementation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in future simulations", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "26.4. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "27. Radiative Forcings --&gt; Aerosols --&gt; Sea Salt\nSea salt forcing\n27.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "27.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "28. Radiative Forcings --&gt; Other --&gt; Land Use\nLand use forcing\n28.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "28.2. Crop Change Only\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nLand use change represented via crop change only?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "28.3. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "29. Radiative Forcings --&gt; Other --&gt; Solar\nSolar forcing\n29.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow solar forcing is provided", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"irradiance\" \n# \"proton\" \n# \"electron\" \n# \"cosmic ray\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "29.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
AeroPython/Taller-PyConEs-2015
Ejercicios/Optimizacion de rotor/Optimizacion con algoritmo genetico.ipynb
mit
[ "Ejercicio: Algoritmo genético para optimizar un rotor o hélice, paso a paso\nEl problema\nA menudo, en ingeniería, cuando nos enfrentamos a un problema, no podemos resolver directamente o despejar la solución como en los problemas sencillos típicos de matemáticas o física clásica. Una manera muy típica en la que nos encontraremos los problemas es en la forma de simulación: tenemos una serie de parámetros y un modelo, y podemos simularlo para obtener sus características, pero sin tener ninguna fórmula explícita que relacione parámetros y resultados y que nos permita obtener una función inversa.\nEn este ejercicio, nos plantearemos un problema de ese tipo: tenemos una función que calcula las propiedades de una hélice en función de una serie de parámetros, pero no conocemos los cálculos que hace internamente. Para nosotros, es una caja negra.\nPara optimizar, iremos recuperando las funciones del algoritmo genético que se vieron en la parte de teoría.", "%matplotlib inline\nimport numpy as np # Trabajaremos con arrays\nimport matplotlib.pyplot as plt # Y vamos a pintar gráficos\nfrom optrot.rotor import calcular_rotor # Esta función es la que vamos a usar para calcular el rotor\nimport random as random # Necesitaremos números aleatorios", "Empecemos echando un ojo a la función del rotor, para ver qué vamos a necesitar y con qué parámetros vamos a trabajar.", "help(calcular_rotor)", "Podemos trazar unas cuantas curvas para observar qué pinta va a tener lo que saquemos. Por ejemplo, cómo cambian las características de la hélice dependiendo de la velocidad de vuelo, para una hélice de ejemplo que gira a uyna velocidad dada.", "vel = np.linspace(0, 30, 100)\nefic = np.zeros_like(vel)\nT = np.zeros_like(vel)\nP = np.zeros_like(vel)\nmach = np.zeros_like(vel)\nfor i in range(len(vel)):\n T[i], P[i], efic[i], mach[i] = calcular_rotor(130, vel[i], 0.5, 3)\n\nplt.plot(vel, T)\nplt.title('Tracción de la hélice')\n\nplt.plot(vel, P)\nplt.title('Potencia consumida')\n\nplt.plot(vel, efic)\nplt.title('Eficiencia de la hélice')\n\nplt.plot(vel, mach)\nplt.title('Mach en la punta de las palas')", "Definiendo el genoma\nDefinamos un individuo genérico: Cada individuo será un posible diseño del rotor, con unas características determinadas.", "class Individual (object):\n \n def __init__(self, genome):\n \n self.genome = genome \n self.traits = {}\n self.performances = {}\n self.fitness = 0", "Nuestro rotor depende de varios parámetros, pero en general, buscaremos optimizar el valor de unos, mateniendo un valor controlado de otros. Por ejemplo, la velocidad de avance y la altitud normalmente las impondremos, ya que querremos optimizar para una velocidad y altura de vuelos dadas.\nEn nuestro algoritmo, usaremos como genoma los parámetros de optimización, y las variables circunstanciales las controlaremos a mano.\nSugerencia (esta es una manera de organizar las variables, aunque puedes escoger otras)\nParámetros de optimización: \n\nomega (velocidad de rotación) (Entre 0 y 200 radianes/segundo)\nR (radio de la hélice) (Entre 0.1 y 2 metros)\nb (número de palas) (Entre 2 y 5 palas) \ntheta0 (ángulo de paso colectivo) (Entre -0.26 y 0.26 radianes)(se corresponde a -15 y 15 grados)\np (parámetro de torsión) (Entre -5 y 20 grados)\ncuerda (anchura de la pala) (Entre 0.01 y 0.2 metros)\n\nParámetros circunstanciales:\n\nvz (velocidad de vuelo)\nh (altura de vuelo)\n\nVariables que se van a mantener\n\nley de torsión (hiperbólica)\nformato de chord params: un solo número, para que la anchura sea constante a lo largo de la pala", "15 * np.pi / 180\n", "A continuación crearemos un diccionario de genes. En él iremos almacenando los nombres de los parámetros y la cantidad de bits que usaremos para definirlos. Cuantos más bits, más resolución\nEj: 1 bit : 2 valores, 2 bit : 4 valores, 10 bit : 1024 valores", "#Completa este diccionario con las variables que hayas elegido y los bits que usarás\n\ndict_genes = {\n 'omega' : 10,\n 'R': 10,\n 'b': 2\n}", "Ahora, crearemos una función que rellene estos genomas con datos aleatorios:", "def generate_genome (dict_genes):\n \n #Calculamos el número total de bits con un bucle que recorra el diccionario\n \n n_bits = ?\n \n #Generamos un array aletorio de 1 y 0 de esa longitud con numpy\n genome = np.random.randint(0, 2, nbits)\n \n #Transformamos el array en una lista antes de devolverlo\n return list(genome)\n\n# Podemos probar a usar nuestra función, para ver qué pinta tiene el ADN de un rotor:\ngenerate_genome(dict_genes)", "Trabajando con el individuo\nAhora necesitamos una función que transforme esos genes a valores con sentido. Cada gen es un número binario cuyo valor estará entre 0 y 2 ^ n, siendo n el número de bits que hayamos escogido. Estas variables traducidas las guardaremos en otro diccionario, ya con su valor. Estos genes no están volando por ahí sueltos, sino que estarán guardados en el interior del individuo al que pertenezcan, por lo que la función deberá estar preparada para extraerlos del individuo, y guardar los resultados a su vez en el interior del individuo.", "def calculate_traits (individual, dict_genes):\n \n \n genome = individual.genome \n \n integer_temporal_list = []\n \n for gen in dict_genes: #Recorremos el diccionario de genes para ir traduciendo del binario\n \n ??? #Buscamos los bits que se corresponden al bit en cuestión\n \n ??? #Pasamos de lista binaria a número entero\n \n integer_temporal_list.append(??) #Añadimos el entero a la lista\n \n # Transformamos cada entero en una variable con sentido físico:\n # Por ejemplo, si el entero de la variable Omega está entre 0 y 1023 (10bits), \n # pero la variable Omega real estará entre 0 y 200 radianes por segundo:\n omega = integer_temporal_list[0] * 200 / 1023\n \n #del mismo modo, para R:\n R = 0.1 + integer_temporal_list[1] * 1.9 / 1023 #Obtendremos un radio entre 0.1 y 2 metros\n \n #El número de palas debe ser un entero, hay que tener cuidado:\n b = integer_temporal_list[2] + 2 #(entre 2 y 5 palas)\n \n #Continúa con el resto de variables que hayas elegido!\n \n dict_traits = { #Aquí iremos guardando los traits, o parámetros \n 'omega' : omega,\n 'R': R\n } \n individual.traits = dict_traits #Por último, guardamos los traits en el individuo", "El siguiente paso es usar estos traits(parámetros) para calcular las performances (características o desempeños) del motor. Aquí es donde entra el modelo del motor propiamente dicho.", "def calculate_performances (individual):\n dict_traits = individual.traits\n \n #Nuestras circunstancias las podemos imponer aquí, o irlas pasando como argumento a la función\n h = 2000 #Altitud de vuelo en metros\n vz = 70 #velocidad de avance en m/s, unos 250 km/h\n \n #Extraemos los traits del diccionario:\n omega = dict_traits['omega']\n R = dict_traits['R']\n #... etc\n \n T, P, efic, mach_punta = calcular_rotor(omega, vz, R, b, h...) #Introduce las variables que uses de parámetro.\n # Consulta la ayuda para asegurarte de que usas el \n # formato correcto!\n dict_perfo = { \n 'T' : T, #Tracción de la hélice\n 'P' : P, #Potencia consumida por la hélice\n 'efic': efic, #Eficiencia propulsiva de la hélice\n 'mach_punta': mach_punta #Mach en la punta de las palas\n }\n \n individual.performances = dict_perfo", "Comprobemos si todo funciona!", "individuo = Individual(generate_genome(dict_genes))\ncalculate_traits(individuo, dict_genes)\ncalculate_performances(individuo)\n\nprint(individuo.traits)\nprint(individuo.performances)", "El último paso que tenemos que realizar sobre el individuo es uno de los más críticos: Transformar las performances en un valor único (fitness) que con exprese cómo de bueno es con respecto al objetivo de optimización. La función de fitness puede ser función de parámetros(traits) y performances, dependiendo de qué queramos optimizar.\nPor ejemplo, si buscáramos que tuviera la tracción máxima sin preocuparnos de nada más, el valor de fitnes sería simplemente igual al de T:\nfitness = T\n\nSi queremos imponer restricciones, por ejemplo, que la potencia sea menor a 1000 watios, se pueden añadir sentencias del tipo:\nif P &gt; 1000:\n fitness -= 1000\n\nSe puede hacer depender la fitness de varios parámetros de manera ponderada:\nfitness = parámetro_importante * 10 + parámetro_poco_importante * 0.5\n\nTambién se pueden combinar diferentes funciones no lineales:\nfitness = parámetro_1 * parámetro_2 - parámetro_3 **2 * log(parámetro_4)\n\nAhora te toca ser creativo! Elige con qué objetivo quieres optimizar la hélice!\nSugerencias de posibles objetivos de optimización:\n\nMínimo radio posible, manteniendo una tracción mínima de 30 Newtons\nMínima potencia posible, máxima eficiencia, y mínimo radio posible en menor medida, manteniendo una tracción mínima de 40 Newtons y un mach en la punta de las palas de como mucho 0.7\nMínima potencia posible y máxima eficiencia cuando vuela a 70 m/s, tracción mayor a 50 Newtons en el despegue (vz = 0), mínimo peso posible (calculado a partir del radio, número y anchura de las palas) (Puede que tengas que reescribir la función y el diccionario de performances!)", "def calculate_fitness (individual):\n \n dict_traits = individuo.traits\n dict_performances = individuo.performances\n \n fitness = ????? #Be Creative!\n \n \n individual.fitness = fitness", "Ya tenemos terminado todo lo que necesitamos a nivel de individuo!\nQue comiencen los Juegos!\nEs hora de trabajar a nivel de algoritmo, y para ello, lo primero es crear una sociedad compuesta de individuos aleatorios. Definamos una función para ello.", "def immigration (society, target_population, dict_genes):\n \n while len(society) < target_population:\n \n new_individual = Individual (generate_genome (dict_genes)) # Generamos un individuo aleatorio\n calculate_traits (new_individual, dict_genes) # Calculamos sus traits\n calculate_performances (new_individual) # Calculamos sus performances\n calculate_fitness (new_individual) # Calculamos su fitness\n \n society.append (new_individual) # Nuestro nuevo ciudadano está listo para unirse al grupo! ", "Ahora podemos crear nuestra sociedad:", "society = []\nimmigration (society, 12, dict_genes) #12 por ejemplo, pueden ser los que sean\n\n\n#Veamos qué pinta tienen los genes de la población\nplt.matshow([individual.genome for individual in society], cmap=plt.cm.gray)", "Ya tenemos nuestra pequeña sociedad, aumentémosla un poco más mezclando entre sí a los ciudadanos con mejores fitness! Vamos a extender nuestra población mezclando los genomas de otros individuos. Los individuos con mejor fitness es más probable que se reproduzcan. Además, en los nuevos individuos produciremos ligeras mutaciones aleatorias.", "#This function was taken from Eli Bendersky's website\n#It returns an index of a list called \"weights\", \n#where the content of each element in \"weights\" is the probability of this index to be returned.\n#For this function to be as fast as possible we need to pass it a list of weights in descending order.\ndef weighted_choice_sub(weights):\n \n rnd = random.random() * sum(weights)\n for i, w in enumerate(weights):\n rnd -= w\n if rnd < 0:\n return i\n\ndef crossover (society, reproduction_rate, mutation_rate):\n \n #First we create a list with the fitness values of every individual in the society\n fitness_list = [individual.fitness for individual in society]\n \n #We sort the individuals in the society in descending order of fitness. \n society_sorted = [x for (y, x) in sorted(zip(fitness_list, society), key=lambda x: x[0], reverse=True)] \n \n #We then create a list of relative probabilities in descending order, \n #so that the fittest individual in the society has N times more chances to reproduce than the least fit,\n #where N is the number of individuals in the society.\n probability = [i for i in reversed(range(1,len(society_sorted)+1))]\n \n #We create a list of weights with the probabilities of non-mutation and mutation\n mutation = [1 - mutation_rate, mutation_rate] \n \n #For every new individual to be created through reproduction:\n for i in range (int(len(society) * reproduction_rate)):\n \n #We select two parents randomly, using the list of probabilities in \"probability\".\n father, mother = society_sorted[weighted_choice_sub(probability)], society_sorted[weighted_choice_sub(probability)]\n \n #We randomly select two cutting points for the genome.\n a, b = random.randrange(0, len(father.genome)), random.randrange(0, len(father.genome))\n \n #And we create the genome of the child putting together the genome slices of the parents in the cutting points.\n child_genome = father.genome[0:min(a,b)]+mother.genome[min(a,b):max(a,b)]+father.genome[max(a,b):]\n \n #For every bit in the not-yet-born child, we generate a list containing \n #1's in the positions where the genome must mutate (i.e. the bit must switch its value)\n #and 0's in the positions where the genome must stay the same.\n n = [weighted_choice_sub(mutation) for ii in range(len(child_genome))]\n \n #This line switches the bits of the genome of the child that must mutate.\n mutant_child_genome = [abs(n[i] - child_genome[i]) for i in range(len(child_genome))]\n \n #We finally append the newborn individual to the society\n newborn = Individual(mutant_child_genome)\n calculate_traits (newborn, dict_genes)\n calculate_performances (newborn)\n calculate_fitness (newborn)\n society.append(newborn) ", "Ahora que tenemos una sociedad extensa, es el momento de que actúe la selección \"natural\": Eliminaremos de la sociedad a los individuos con peor fitness hasta llegar a una población objetivo.", "def tournament(society, target_population):\n \n while len(society) > target_population:\n \n fitness_list = [individual.fitness for individual in society]\n society.pop(fitness_list.index(min(fitness_list)))", "Ya tenemos nuestro algoritmo prácticamente terminado!", "society = []\nfitness_max = []\n\nfor generation in range(30):\n \n immigration (society, 100, dict_genes) #Añade individuos aleatorios a la sociedad hasta tener 100\n fitness_max += [max([individual.fitness for individual in society])]\n \n tournament (society, 15) #Los hace competir hasta que quedan 15\n crossover(society, 5, 0.05) #Los ganadores se reproducen hasta tener 75\n \n \nplt.plot(fitness_max)\nplt.title('Evolución del valor de fitness')\n\ntournament (society, 1) #Buscamos el mejor de todos\nwinner = society[0]\n\nprint(winner.traits) #Comprobamos sus características\nprint(winner.performances)\n", "Siro Moreno y Carlos Dorado, Aeropython, 20 de Noviembre de 2015" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
fluffy-hamster/A-Beginners-Guide-to-Python
A Beginners Guide to Python/08. Calling Functions.ipynb
mit
[ "How to Call Functions...\nWhat are functions...\nFunctions are an incredibly useful concept and I will cover them in greater detail later on, I promise. But for now let's just say functions are bits of self-contained code that take some input value(s), perform some process, and return some output. Today we shall be studying how to call functions, for what it's worth you guys have already called functions, you just didn't know that was what you are doing!\nFor the sake of simplicity, we are going to split functions into four basic categories:\n1. Those that take zero arguments as input.\n2. Those that take one argument as input.\n3. Those that take multiple arguments as input.\n4. Those that take optional arguments.\nBefore we begin though I really want to stress that these categories are not real, its an arbitrary distinction that is not founded upon anything of substance.\nIf that’s the case, why do it, then? I here you ask. Well, just because a distinction is not real doesn't mean it is not helpful. Basically, the reason I have made these categories is because I believe it makes the material easier to teach; a pedagogical tool, if you will.\n1. Zero argument functions....\nCalling zero argument functions are super simple, but they also tend to be rather uninteresting as well. The syntax:\n{function name} ()\n\nIn box below I've quickly made such a function and then calling it. Boring stuff ensues.", "# Defining the function...\ndef boring():\n return \"ZZZZZZZZ...\"\n\n# calling said function...\nboring()", "2. One argument functions....\nThe syntax:\n{function name} ({argument})\n\n\"len\" is a Python built-in function. Let's call it on a few strings and see if you can figure out what it does.", "print ( len(\"a\") )\nprint ( len(\"aa\") )\nprint ( len(\"aaa\") )", "'len' is short for length. When you call it returns how big the object is, in the case of strings, that’s how many characters the string contains. Thus len(“12345”) returns 5.\n3. Functions with Multiple Arguments\nFunctions that take multiple arguments can be further defined into those that require exactly n arguments and those functions that can handle an arbitrary number of arguments. We shall briefly cover both. The syntax:\n{function name} ({argument}, {argument2})\n\nSo, to call a function that takes two arguements as imput all we do is type two arguments seperated by a comma. Let's see that in action:", "# defining the function...\ndef multiply(a, b):\n return a * b # remember '*' is multiplication!\n\nmultiply(10, 25) # <-- calling the function with two arguments, 10 and 25", "What happens when you give such a function too few/too many arguments? Well, you get an error:", "multiply(2,3,4)\n\nmultiply(1)", "Take care to note that the error messages you get are actually rather insightful, Python is telling you what the problem is!\nFunctions that take an aribitary number of arguments\nNow, the syntax for a function that takes an arbitrary number of arguments is the same as the above, you just separate the values by a comma and keep on going. As a matter of fact, we have seen such a function several times already. Its called 'print'...", "print(\"hello\") # calling print with one arguement\nprint(\"hello\", \"world\") # two arguments\nprint() # calling print with zero arguments returns an empty line.\nprint(\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\") # seven arguments", "4. Functions with optional arguments...\nIf a particular function argument is optional is will have two parts, a 'keyword' and a 'default value'. The syntax:\n{keyword} = {value}\n\nThe 'default value' is what happens when we don't change anything, and obviously that is the bit we may want to change. \nIf you do give a function an optional argument make sure it is the last argument you pass the function; if you don't Python with throw and error. Anyway, here's the syntax:\n{function name} ({argument}, {argument2}, {keyword} = {value} )\n\nAs a matter of fact, the print function we have been calling has an optional argument, this argument is called \"sep\" and its default value is a single 'space' character. \nLet's look at the print calls above a bit more closely, did you notice that when we gave print several arguments it separated everything with a space? Well, we can change that behaviour by changing 'sep'. \nFor example...", "print(\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\") # Notice 'sep' is not defined here. Thus the default value \" \" is used. \nprint(\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", sep=\"\") # no spaces 'abc'\nprint(\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", sep=\"__\") # double underscore, 'a__b__c' \nprint(\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", sep=\",\") # comma, 'a,b,c'\nprint(\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", sep=\"\\n\") # \\n = New-line, please see string lecture!\n\nprint(sep=\"\", \"a\", \"b\", \"c\") # Keywords must be at the end! ", "In later lectures I'll show you how to build your own functions (and why you would want to), but the purpose of this lecture was to simply get you used to seeing function calls. Trust me when I say understanding this stuff is a really useful skill to have moving forward.\nHomework\nIn the cell below there is a function called \"call me\". For homework I want you to call this function with several arguments such that it returns the string:\n\"Dave and Jerry went to the cheese factory.\"", "def call_me(a, f, d, b=\"\", c=\"cheese\", e=False):\n if e == True:\n return \"{} and {} went to the {}{} {}\".format(a, b, c, d, f)\n else:\n return \"YOU SHALL NOT PASS\"\n\n# Your Code goes here... call_me(?, ?, ?, ?, ?)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
junhwanjang/DataSchool
Lecture/18. 문서 전처리/2) NLTK 자연어 처리 패키지 소개.ipynb
mit
[ "NLTK 자연어 처리 패키지 소개\nNLTK(Natural Language Toolkit) 패키지는 교육용으로 개발된 자연어 처리 및 문서 분석용 파이썬 패키지다. 다양한 기능 및 예제를 가지고 있으며 실무 및 연구에서도 많이 사용된다.\nNLTK 패키지가 제공하는 주요 기능은 다음과 같다.\n\n샘플 corpus 및 사전\n토큰 생성(tokenizing)\n형태소 분석(stemming/lemmatizing)\n품사 태깅(part-of-speech tagging)\n구문 분석(syntax parsing)\n\n샘플 corpus\ncorpus는 분석 작업을 위한 샘플 문서 집합을 말한다. 단순히 소설, 신문 등의 문서를 모아놓은 것도 있지만 대부분 품사. 형태소, 등의 보조적 의미를 추가하고 쉬운 분석을 위해 구조적인 형태로 정리해 놓은 것이 많다.\nNLTK 패키지의 corpus 서브패키지에서는 다음과 같은 다양한 연구용 corpus를 제공한다. 이 목록은 전체 corpus의 일부일 뿐이다.\n\naveraged_perceptron_tagger Averaged Perceptron Tagger\nbook_grammars: Grammars from NLTK Book\nbrown: Brown Corpus\nchat80: Chat-80 Data Files\ncity_database: City Database\ncomparative_sentences Comparative Sentence Dataset\ndependency_treebank. Dependency Parsed Treebank\ngutenberg: Project Gutenberg Selections\nhmm_treebank_pos_tagger Treebank Part of Speech Tagger (HMM)\ninaugural: C-Span Inaugural Address Corpus\nlarge_grammars: Large context-free and feature-based grammars for parser comparison\nmac_morpho: MAC-MORPHO: Brazilian Portuguese news text with part-of-speech tags\nmasc_tagged: MASC Tagged Corpus\nmaxent_ne_chunker: ACE Named Entity Chunker (Maximum entropy)\nmaxent_treebank_pos_tagger Treebank Part of Speech Tagger (Maximum entropy)\nmovie_reviews: Sentiment Polarity Dataset Version 2.0\nnames: Names Corpus, Version 1.3 (1994-03-29)\nnps_chat: NPS Chat\nomw: Open Multilingual Wordnet\nopinion_lexicon: Opinion Lexicon\npros_cons: Pros and Cons\nptb: Penn Treebank\npunkt: Punkt Tokenizer Models\nreuters: The Reuters-21578 benchmark corpus, ApteMod version\nsample_grammars: Sample Grammars\nsentence_polarity: Sentence Polarity Dataset v1.0\nsentiwordnet: SentiWordNet\nsnowball_data: Snowball Data\nstopwords: Stopwords Corpus\nsubjectivity: Subjectivity Dataset v1.0\ntagsets: Help on Tagsets\ntreebank: Penn Treebank Sample\ntwitter_samples: Twitter Samples\nunicode_samples: Unicode Samples\nuniversal_tagset: Mappings to the Universal Part-of-Speech Tagset\nuniversal_treebanks_v20 Universal Treebanks Version 2.0\nverbnet: VerbNet Lexicon, Version 2.1\nwebtext: Web Text Corpus\nword2vec_sample: Word2Vec Sample\nwordnet: WordNet\nwords: Word Lists\n\n이러한 corpus 자료는 설치시에 제공되는 것이 아니라 download 명령으로 사용자가 다운로드 받아야 한다.", "nltk.download('averaged_perceptron_tagger')\nnltk.download(\"gutenberg\")\nnltk.download('punkt')\nnltk.download('reuters')\nnltk.download(\"stopwords\")\nnltk.download(\"taggers\")\nnltk.download(\"webtext\")\nnltk.download(\"wordnet\")\n\nnltk.corpus.gutenberg.fileids()\n\nemma_raw = nltk.corpus.gutenberg.raw(\"austen-emma.txt\")\nprint(emma_raw[:1302])", "토큰 생성(tokenizing)\n문서를 분석하기 위해서는 우선 긴 문자열을 분석을 위한 작은 단위로 나누어야 한다. 이 문자열 단위를 토큰(token)이라고 한다.", "from nltk.tokenize import word_tokenize\nword_tokenize(emma_raw[50:100])\n\nfrom nltk.tokenize import RegexpTokenizer\nt = RegexpTokenizer(\"[\\w]+\")\nt.tokenize(emma_raw[50:100])\n\nfrom nltk.tokenize import sent_tokenize\nprint(sent_tokenize(emma_raw[:1000])[3])", "형태소 분석\n형태소 분석이란 어근, 접두사/접미사, 품사(POS, part-of-speech) 등 다양한 언어적 속성의 구조를 파악하는 작업이다. 구체적으로는 다음과 같은 작업으로 나뉜다.\n\nstemming (어근 추출)\nlemmatizing (원형 복원)\nPOS tagging (품사 태깅)\n\n### Stemming and lemmatizing", "from nltk.stem import PorterStemmer\nst = PorterStemmer()\nst.stem(\"eating\")\n\nfrom nltk.stem import LancasterStemmer\nst = LancasterStemmer()\nst.stem(\"shopping\")\n\nfrom nltk.stem import RegexpStemmer\nst = RegexpStemmer(\"ing\")\nst.stem(\"cooking\")\n\nfrom nltk.stem import WordNetLemmatizer\nlm = WordNetLemmatizer()\nprint(lm.lemmatize(\"cooking\"))\nprint(lm.lemmatize(\"cooking\", pos=\"v\"))\nprint(lm.lemmatize(\"cookbooks\"))\n\nprint(WordNetLemmatizer().lemmatize(\"believes\"))\nprint(LancasterStemmer().stem(\"believes\"))", "POS tagging\nPOS(part-of-speech)는 품사를 말한다. \n\nPart-of-Speech Tagset\nhttps://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.htm\nhttp://www.ibm.com/support/knowledgecenter/ko/SS5RWK_3.5.0/com.ibm.discovery.es.ta.doc/iiysspostagset.htm", "from nltk.tag import pos_tag\ntagged_list = pos_tag(word_tokenize(emma_raw[:100]))\ntagged_list\n\nfrom nltk.tag import untag\nuntag(tagged_list)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
MBARIMike/stoqs
stoqs/contrib/notebooks/select_data_in_columns_for_data_science.ipynb
gpl-3.0
[ "Select Data in Columns for Data Science\nPivot the row-based data in a STOQS database to fit into a column-based dataframe\nThis Notebook explores options raised by this GitHub Issue. We want to be able to efficiently consume mass quantities of data from a STOQS database and have it organized for efficient data analysis and visualization using modern data frame orientied tools.\nExecuting this Notebook requires a personal STOQS server. It can be run from either a Docker installation or from a development Vagrant Virtual Machine. \nDocker Instructions\nInstall and start the software as \ndetailed in the README. (Note that on MacOS you will need to modify settings in your docker-compose.yml and .env files &mdash; look for comments referencing 'HOST_UID'.)\nThen, from your $STOQS_HOME/docker directory start the Jupyter Notebook server - you can query from the remote database or from a copy that you've made to your local system: \nOption A: Query from MBARI's master database\nStart the Jupyter Notebook server pointing to MBARI's master STOQS database server. (Note: firewall rules limit unprivileged access to such resources):\ndocker-compose exec \\\n -e DATABASE_URL=postgis://everyone:guest@kraken.shore.mbari.org:5432/stoqs \\\n stoqs stoqs/manage.py shell_plus --notebook\n\nOption B: Query from your local Docker Desktop\nRestore the stoqs_canon_october2020 database from MBARI's server onto your local database and start the Jupyter Notebook server using the default DATABASE_URL, which should be your local system, also make sure that your Docker Desktop has at least 16 GB of RAM allocated to it:\ncd $STOQS_HOME/docker\ndocker-compose exec stoqs createdb -U postgres stoqs_canon_october2020\ncurl -k https://stoqs.shore.mbari.org/media/pg_dumps/stoqs_canon_october2020.pg_dump | \\\n docker exec -i stoqs pg_restore -Fc -U postgres -d stoqs_canon_october2020\ndocker-compose exec stoqs stoqs/manage.py shell_plus --notebook\n\nOpening this Notebook\nFollowing execution of the stoqs/manage.py shell_plus --notebook command a message is displayed giving a URL for you to use in a browser on your host, e.g.:\nhttp://127.0.0.1:8888/?token=&lt;a_token_generated_upon_server_start&gt;\n\nIn the browser window opened to this URL navigate to this file (select_data_in_columns_for_data_science.ipynb) and open it. You will then be able to execute the cells and modify the code to suit your needs.\nThe information in the output cells result from execution on a 2019 MacBook Pro with a 2.4 GHz 8-Core Intel Core i9 processor, 32 GB 2667 MHz DDR4 RAM, running Docker Desktop 3.1.0 with 16 GB with 4 CPUs and 16 GB allocated.", "import os\nimport time\n\n# Prevent SynchronousOnlyOperation exceptions\nos.environ[\"DJANGO_ALLOW_ASYNC_UNSAFE\"] = \"true\"\n\n# Set do_plots to True for visualizations (do not commit with this setting)\ndo_plots = False\nt_start = time.time()\n\n# Use a recent database available at DATABASE_URL\ndb = 'stoqs_canon_october2020'", "0. Perform a straight forward query using the STOQS data model.\nCollect all the sea_water_temperature and sea_water_salinity data into dictionaries keyed by platform name. This is to examine the landscape of data we are querying.", "# To make sure we collect temperatures and salinities that are properly associated\n# we will first find all the Platforms that have T & S and then from each Measurement\n# from the Platform collect the temperatures and salinities into lists for plotting.\n# Assume that Platforms that have sea_water_salinity also have sea_water_temperature.\nplatforms = (ActivityParameter.objects.using(db)\n .filter(parameter__standard_name='sea_water_salinity')\n .values_list('activity__platform__name', flat=True)\n .distinct().order_by('activity__platform__name'))\ntemps = {}\nsalts = {}\nfor platform in platforms:\n print(f\"Collecting data for: {platform:23}\", end=' ')\n mps = (MeasuredParameter.objects.using(db)\n .filter(measurement__instantpoint__activity__platform__name=platform))\n \n temps[platform] = (mps.filter(parameter__standard_name='sea_water_temperature')\n .values_list('datavalue', flat=True))\n salts[platform] = (mps.filter(parameter__standard_name='sea_water_salinity')\n .values_list('datavalue', flat=True))\n print(f\"#temps: {len(temps[platform]):6} #salts: {len(salts[platform]):6}\", end='')\n if len(temps[platform]) != len(salts[platform]):\n print(' - not equal')\n else:\n print()\nprint('Done')\n\nif do_plots:\n # Make a T/S plot of data from all the platforms\n import pylab as plt\n plt.rcParams['figure.figsize'] = (18, 6)\n for platform in temps.keys():\n if len(temps[platform]) == len(salts[platform]):\n plt.scatter(salts[platform], temps[platform], s=1, label=platform)\n plt.xlabel('Salinty')\n plt.ylabel('Temperature (DegC)')\n plt.legend();", "Approach 1. Use the same kind of self-join query used for selecting data for Parameter-Parameter plots.\nA sample SQL statement was copied from the STOQS UI and then modified to select sea_water_temperature and sea_water_salinity from all platforms.", "sql_multp = '''SELECT DISTINCT \n stoqs_platform.name,\n stoqs_instantpoint.timevalue,\n stoqs_measurement.depth,\n mp_salt.datavalue AS salt,\n mp_temp.datavalue AS temp\nFROM stoqs_measuredparameter\nINNER JOIN stoqs_measurement ON (stoqs_measuredparameter.measurement_id = stoqs_measurement.id)\nINNER JOIN stoqs_instantpoint ON (stoqs_measurement.instantpoint_id = stoqs_instantpoint.id)\nINNER JOIN stoqs_activity ON (stoqs_instantpoint.activity_id = stoqs_activity.id)\nINNER JOIN stoqs_platform ON (stoqs_activity.platform_id = stoqs_platform.id)\nINNER JOIN stoqs_measurement m_salt ON m_salt.instantpoint_id = stoqs_instantpoint.id\nINNER JOIN stoqs_measuredparameter mp_salt ON mp_salt.measurement_id = m_salt.id\nINNER JOIN stoqs_parameter p_salt ON mp_salt.parameter_id = p_salt.id\nINNER JOIN stoqs_measurement m_temp ON m_temp.instantpoint_id = stoqs_instantpoint.id\nINNER JOIN stoqs_measuredparameter mp_temp ON mp_temp.measurement_id = m_temp.id\nINNER JOIN stoqs_parameter p_temp ON mp_temp.parameter_id = p_temp.id\nWHERE (p_salt.standard_name = 'sea_water_salinity')\n AND (p_temp.standard_name = 'sea_water_temperature')\n AND stoqs_platform.name IN ({})\nORDER BY stoqs_instantpoint.timevalue, stoqs_measurement.depth'''\n\n# Build the SQL with optional selection of platforms to use\ndb = 'stoqs_canon_october2020'\nplatforms = (ActivityParameter.objects.using(db)\n .filter(parameter__standard_name='sea_water_salinity')\n .values_list('activity__platform__name', flat=True)\n .order_by('activity__platform__name').distinct())\nplats = ''\nplat_list = []\nfor platform in platforms:\n if platform == 'M1_Mooring' or platform == 'makai' or platform == 'pontus':\n # Continue to omit some platforms for shorter execution times\n continue\n plats += f\"'{platform}',\"\n plat_list.append(platform)\nplats = plats[:-2] + \"'\"\nsql = sql_multp.format(plats)\nprint(sql)\n\nimport pandas as pd\nfrom django.db import connections\n\n# It takes about 15 seconds to read about 0.5 million rows from the local STOQS database.\n%time df1 = pd.read_sql_query(sql, connections[db], index_col=['name', 'timevalue', 'depth'])\n##%time df1 = pd.read_sql_query(sql, connections[db])\nprint(df1.shape)\ndf1.head()\n\n# Writing the Parquet file takes about 0.6 seconds\n%time df1.to_parquet('all_plats.parquet')\n\n# Reading the Parquest file takes about 0.4 seconds\n%time df1b = pd.read_parquet('all_plats.parquet')\ndf1b.shape\n\ndf1\n\n# Datashader plots must be left justified on last line, use this variable to do that\nts_points = None\nif do_plots:\n # See: http://holoviews.org/user_guide/Large_Data.html\n # https://stackoverflow.com/a/18835121/1281657\n import colorcet\n import holoviews as hv\n from holoviews.operation.datashader import rasterize\n hv.extension(\"bokeh\")\n ropts = dict(height=380, width=300, colorbar=True, colorbar_position=\"bottom\", cmap=colorcet.fire)\n plots = [(rasterize(hv.Points(df1.iloc[df1.index.get_level_values('name') == p], kdims=['salt', 'temp']))\n .opts(**ropts).relabel(p)) for p in plat_list]\n ts_points = hv.Layout(plots).cols(3)\nts_points", "This approach could be used in a general way to extract all Parameters for each Platform by dynamically generating the SQL (with dozens more self joins) and executing it. We do need more scalable methods than .read_sql_query() and .to_parquet(), which need to read and write all the data in to and out of allocated random access memory. This is why at least a resource of 16 GB of RAM is needed in Docker Desktop for this query - larger data requests would require more memory - this is not scalable. \nThis SQL is not forgiving in terms of ad hoc modification; for example, adding a column to the SELECT can increase the volume of results by unexpectedly returning a type of cross join with repeated salt and temp values. \nThis sort of self-join query returns a lot of duplicate records (kind of a cross join) for the 'M1_Mooring' platform which has a 'stationprofile' CF featureType, resulting in a different relational cardinality that would require special treatment.\n\nApproach 2. Use Brent's trimSTOQS program to convert the MeasuredParameter Data Access output:", "# It takes about 5 minutes to read in 0.17 million dorado CSV rows and convert using trimSTOQS\n##! time wget https://stoqs.mbari.org/stoqs_canon_october2020/api/measuredparameter.csv?measurement__instantpoint__activity__platform__name=dorado \\\n## -q -O - | /srv/stoqs/contrib/trimSTOQS/trimSTOQS parameter__name --separator=, > october2020_dorado_parms.cvs\n##df2 = pd.read_csv('/srv/stoqs/contrib/trimSTOQS/october2020_dorado_parms.cvs')\n\n# It takes about 40 seconds (on a fast network) to read in just 0.033 million NPS_Glider_29 CSV rows and convert using trimSTOQS\n! time wget http://stoqs.mbari.org/stoqs_canon_october2020/api/measuredparameter.csv?measurement__instantpoint__activity__platform__name=NPS_Glider_29 \\\n -q -O - | /srv/stoqs/contrib/trimSTOQS/trimSTOQS parameter__name --separator=, \\\n > /srv/stoqs/contrib/trimSTOQS/october2020_NPS_Glider_29_parms.cvs\n \ndf2 = pd.read_csv('/srv/stoqs/contrib/trimSTOQS/october2020_NPS_Glider_29_parms.cvs')\nprint(df2.shape)\ndf2.head()", "The advantage of this approach is that all parameters get transformed into the columns we want. The disadvantage is that it takes a long time to extract the data in CSV format. Approach 1 reads at a rate of about 30,000 rows/sec, approach 2 reads at a rate of 1000 rows/sec - orders of magnitude slower.\n\n3. Do a direct Postgresql query to transform the data, perhaps using the crosstab() function.\nNeed to do this on the database first:\n% docker-compose exec postgis psql -U postgres \npostgres=# \\c stoqs_canon_october2020\nstoqs_canon_october2020=# CREATE EXTENSION IF NOT EXISTS tablefunc;\nCREATE EXTENSION", "# Base query that's similar to the one behind the api/measuredparameter.csv request\nsql_base = '''SELECT stoqs_platform.name as platform, stoqs_activity.name as activity__name,\n stoqs_instantpoint.timevalue, stoqs_measurement.depth, \n ST_X(stoqs_measurement.geom) as longitude, ST_Y(stoqs_measurement.geom) as latitude,\n stoqs_parameter.name, standard_name, datavalue \nFROM public.stoqs_measuredparameter\nINNER JOIN stoqs_measurement ON (stoqs_measuredparameter.measurement_id = stoqs_measurement.id)\nINNER JOIN stoqs_instantpoint ON (stoqs_measurement.instantpoint_id = stoqs_instantpoint.id)\nINNER JOIN stoqs_activity ON (stoqs_instantpoint.activity_id = stoqs_activity.id)\nINNER JOIN stoqs_platform ON (stoqs_activity.platform_id = stoqs_platform.id)\nINNER JOIN stoqs_parameter ON (stoqs_measuredparameter.parameter_id = stoqs_parameter.id)\nWHERE stoqs_platform.name IN ({})\nORDER BY stoqs_platform.name, stoqs_instantpoint.timevalue, stoqs_measurement.depth, stoqs_parameter.name'''\nsql = sql_base.format(plats)\nprint(sql)\n\n# Identify the columns used as the context (index) for the measurements\ncontext = ['platform', 'timevalue', 'depth', 'latitude', 'longitude']\n\n# It takes about 1 minute to read all the Parameters for the selected platforms - about 13.5 million rows\n%time df3a = pd.read_sql_query(sql, connections[db], index_col=context)\nprint(df3a.shape)\ndf3a.head()\n\ncontext = ['platform', 'activity__name', 'timevalue', 'depth', 'latitude', 'longitude']\n\n# It takes about 1 minute to read all the Parameters for the selected platforms - about 13.5 million rows\n%time df3a = pd.read_sql_query(sql, connections[db], index_col=context)\nprint(df3a.shape)\ndf3a.head()\n\nimport pandas.io.sql as sqlio\nimport psycopg2\n\n# Use psycopg2 for direct from Postgres query - still explodes stoqs container RAM as this Notebook runs there\nconn = psycopg2.connect(\"host='{}' port={} dbname='{}' user={} password={}\".format(\n 'postgis', 5432, 'stoqs_canon_october2020', 'stoqsadm', 'CHANGEME'))\n# Takes about 5 minutes to read 13.5 million rows\n%time df3b = sqlio.read_sql_query(sql, conn, index_col=context)\nprint(df3b.shape)\ndf3b.head()\n\n# Setting chunksize doesn't help reduce memory requirements in the stoqs or stoqs-postgis containers\n# See: https://stackoverflow.com/a/31843091/1281657\n# https://github.com/pandas-dev/pandas/issues/12265#issuecomment-181809005\n# https://github.com/pandas-dev/pandas/issues/35689\ndf3c = pd.DataFrame()\n# Still takes about 2.5 minutes to read 13.5 rows (chunking happens only on client in Pandas)\n##%time chunker = pd.read_sql_query(sql, connections[db], index_col=context, chunksize=1000000)\n##for chunk in chunker:\n ##print(chunk.shape)\n ##df3c.add(chunk.pivot_table(index=context, columns='name', values='datavalue'))\nprint(\"It would be nice if chunksize helped with memory usage in docker, but it doesn't.\")", "The syntax of crosstab() is arcane and it will take some work to figure out a way to preserve datetime objects as they are read into a DataFrame. It's likely that performing a pivot on the data closer to the database will be more performant than say doing it after reading records into a DataFrame.\n\nApproach 4. Use Pandas do a pivot on data read into a DataFrame\nSimilar to Approach 2, but this may be more efficient as conversion to and from CSV text format is avoided.", "# Identify the columns used as the index for the pivot\ncontext = ['platform', 'timevalue', 'depth', 'latitude', 'longitude']\n\n%time df4 = df3a.pivot_table(index=context, columns='name', values='datavalue')\nprint(df4.shape)\ndf4.head()", "This approach looks promising. Some advantages:\n\nThe SQL query is a simple inner join of the tables - similar to that used for MeasuredParameter Data Access\nNo complicated self joins are needed\nIt will work for any Parameter names from any platforms\nMissing values are preserved as None or NaN\nPandas pivot_table() method is efficient, taking only about 5 seconds\n\nSome disadvantages:\n\nThis uses the stoqs Docker image and the Django api - it could be closer to the database\nMore than 16 GB needs to be resourced to Docker Desktop to read the entire stoqs_canon_october2020 db\nWhen memory is exhausted there is no error message provided; it annoyingly quits silently\n\n\nThese experiments now lead to the script stoqs/contrib/parquet/extract_columns.py which will implement this capability at the command line. This notebook can still serve as a \"playground\" for testing out various ways to get STOQS data into modern data science tools.", "if do_plots:\n # See: https://datashader.org/getting_started/Pipeline.html\n import holoviews as hv\n from holoviews.operation.datashader import datashade\n hv.extension(\"bokeh\")\n pts1 = hv.Points(df1, kdims=['salt', 'temp'])\n pts2 = hv.Points(df2, kdims=['PSAL (0.001)', 'TEMP (Celsius)'])\n pts4a = hv.Points(df4, kdims=['salinity', 'temperature (Celsius)'])\n pts4b = hv.Points(df4, kdims=['PSAL (0.001)', 'TEMP (Celsius)'])\n ts_points = ( datashade(pts1, cmap=colorcet.fire).opts(title='df1')\n + datashade(pts2, cmap=colorcet.fire).opts(title='df2')\n + datashade(pts4a, cmap=colorcet.fire).opts(title='df4a')\n + datashade(pts4b, cmap=colorcet.fire).opts(title='df4b'))\nts_points\n\nif do_plots:\n # See: http://holoviews.org/user_guide/Large_Data.html\n from holoviews.operation.datashader import rasterize\n ##ropts = dict(tools=[\"pan,wheel_zoom,box_zoom\"], height=380, width=330, colorbar=True, colorbar_position=\"bottom\")\n ropts = dict(height=380, width=330, colorbar=True, colorbar_position=\"bottom\")\n\n ts_points = hv.Layout([rasterize(hv.Points(df1.iloc[df1.index.get_level_values('name') == p],kdims=['temp', 'salt'])).opts(**ropts).relabel(p)for p in plat_list])\n\nts_points\n\nprint(f\"Time to execute this notebook: {(time.time() - t_start):.1f} seconds\")", "Monitoring with docker stats shows that executing this notebook required 7.5 GB of memory by the stoqs container." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
pschragger/big-data-python-class
Lectures/Lecture 10 - Graph Algorithms/Python Graphs - networkx intro .ipynb
mit
[ "Using the graph from figure 10.1 from the textbook (http://infolab.stanford.edu/~ullman/mmds/book.pdf) to demonstrate\n<img src=\"images/smallsocialgraph.png\">", "#Example Small social newtork as a connection matrix\nsc1 = ([(0, 1, 1, 0, 0, 0, 0), \n (1, 0, 1, 1, 0, 0, 0),\n (1, 1, 0, 0, 0, 0, 0),\n (0, 1, 0, 0, 1, 1, 1),\n (0, 0, 0, 1, 0, 1, 0),\n (0, 0, 0, 1, 1, 0, 1),\n (0, 0, 0, 1, 0, 1, 0)])", "http://networkx.github.io/documentation/latest/install.html\npip install networkx\nto install networkx into your python modules\npip install networkx --upgrade \nto upgrade you previously installed version\nYou may have to restart your ipython engine to pick up the new module\nTest your installation by running the new box locally\nA full tutorial is available at:\nhttp://networkx.github.io/documentation/latest/tutorial/index.html\nThere are other packages for graph manipulation for python:\npython-igraph, (http://igraph.org/python/#pydoc1)\nGraph-tool, (https://graph-tool.skewed.de)\nI picked networkx because it took little effort to install.", "import networkx as nx\nG1 = nx.Graph()\n\n\nG1.add_nodes_from(['A','B','C','D','E','F','G'])\nG1.nodes()\n\n\nG1.add_edges_from([('A','B'),('A','C')])\nG1.add_edges_from([('B','C'),('B','D')])\nG1.add_edges_from([('D','E'),('D','F'),('D','G')])\nG1.add_edges_from([('E','F')])\nG1.add_edges_from([('F','G')])\n\nG1.number_of_edges()\n\nG1.edges()\n\nG1.neighbors('D')\n\nimport matplotlib.pyplot as plt\n#drawing the graph\n%matplotlib inline \n\n\nnx.draw(G1)\n\npos=nx.spring_layout(G1)\nnx.draw(G1,pos,node_color='y', edge_color='r', node_size=600, width=3.0)\nnx.draw_networkx_labels(G1,pos,color='W',font_size=20,font_family='sans-serif')\n#https://networkx.github.io/documentation/latest/reference/generated/networkx.drawing.nx_pylab.draw_networkx.html\n#Some parameters to play with", "Let's play with some algorithms in class:\nhttps://networkx.github.io/documentation/latest/reference/algorithms.html\nSocial Graph analysis algorithms\nBetweenness:\nhttps://networkx.github.io/documentation/latest/reference/algorithms.centrality.html#betweenness\nEigenVector centrality\nhttps://networkx.github.io/documentation/latest/reference/algorithms.centrality.html#eigenvector\nClustering per node\nhttps://networkx.github.io/documentation/latest/reference/generated/networkx.algorithms.cluster.clustering.html\nAll Shortest Paths\nhttps://networkx.github.io/documentation/latest/reference/generated/networkx.algorithms.shortest_paths.generic.all_shortest_paths.html\nLaplacian matrix\nhttps://networkx.github.io/documentation/latest/reference/generated/networkx.linalg.laplacianmatrix.laplacian_matrix.html#networkx.linalg.laplacianmatrix.laplacian_matrix\nEach of the students pair up and work on demonstrating a networkx algorithm\nCreate a new ipython page and submit it", "#Enumeration of all cliques\nlist(nx.enumerate_all_cliques(G1))\n\nlist(nx.cliques_containing_node(G1,'A'))\n\nlist(nx.cliques_containing_node(G1,'D'))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
ocean-color-ac-challenge/evaluate-pearson
evaluation-participant-a.ipynb
apache-2.0
[ "E-CEO Challenge #3 Evaluation\nWeights\nDefine the weight of each wavelength", "w_412 = 0.56\nw_443 = 0.73\nw_490 = 0.71\nw_510 = 0.36\nw_560 = 0.01", "Run\nProvide the run information:\n* run id\n* run metalink containing the 3 by 3 kernel extractions\n* participant", "run_id = '0000000-150625115710650-oozie-oozi-W'\nrun_meta = 'http://sb-10-16-10-55.dev.terradue.int:50075/streamFile/ciop/run/participant-a/0000000-150625115710650-oozie-oozi-W/results.metalink?'\nparticipant = 'participant-a'", "Define all imports in a single cell", "import glob\nimport pandas as pd\nfrom scipy.stats.stats import pearsonr\nimport numpy\nimport math", "Manage run results\nDownload the results and aggregate them in a single Pandas dataframe", "!curl $run_meta | aria2c -d $participant -M -\n\npath = participant # use your path\n\nallFiles = glob.glob(path + \"/*.txt\")\nframe = pd.DataFrame()\nlist_ = []\nfor file_ in allFiles:\n df = pd.read_csv(file_,index_col=None, header=0)\n list_.append(df)\n frame = pd.concat(list_)\n\nlen(frame.index)", "Number of points extracted from MERIS level 2 products\nCalculate Pearson\nFor all three sites, AAOT, BOUSSOLE and MOBY, calculate the Pearson factor for each band.\n\nNote AAOT does not have measurements for band @510\n\nAAOT site", "insitu_path = './insitu/AAOT.csv'\ninsitu = pd.read_csv(insitu_path)\nframe_full = pd.DataFrame.merge(frame.query('Name == \"AAOT\"'), insitu, how='inner', on = ['Date', 'ORBIT'])\n\nframe_xxx= frame_full[['reflec_1_mean', 'rho_wn_IS_412']].dropna()\nr_aaot_412 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0] \n\nprint(str(len(frame_xxx.index)) + \" observations for band @412\")\n\nframe_xxx= frame_full[['reflec_2_mean', 'rho_wn_IS_443']].dropna()\nr_aaot_443 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0] \n\nprint(str(len(frame_xxx.index)) + \" observations for band @443\")\n\nframe_xxx= frame_full[['reflec_3_mean', 'rho_wn_IS_490']].dropna()\nr_aaot_490 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0] \n\nprint(str(len(frame_xxx.index)) + \" observations for band @490\")\n\nr_aaot_510 = 0\nprint(\"0 observations for band @510\")\n\nframe_xxx= frame_full[['reflec_5_mean', 'rho_wn_IS_560']].dropna()\nr_aaot_560 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0] \n\nprint(str(len(frame_xxx.index)) + \" observations for band @560\")\n\ninsitu_path = './insitu/BOUSS.csv'\ninsitu = pd.read_csv(insitu_path)\nframe_full = pd.DataFrame.merge(frame.query('Name == \"BOUS\"'), insitu, how='inner', on = ['Date', 'ORBIT'])\n\nframe_xxx= frame_full[['reflec_1_mean', 'rho_wn_IS_412']].dropna()\nr_bous_412 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0] \n\nprint(str(len(frame_xxx.index)) + \" observations for band @412\")\n\nframe_xxx= frame_full[['reflec_2_mean', 'rho_wn_IS_443']].dropna()\nr_bous_443 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0] \n\nprint(str(len(frame_xxx.index)) + \" observations for band @443\")\n\nframe_xxx= frame_full[['reflec_3_mean', 'rho_wn_IS_490']].dropna()\nr_bous_490 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0] \n\nprint(str(len(frame_xxx.index)) + \" observations for band @490\")\n\nframe_xxx= frame_full[['reflec_4_mean', 'rho_wn_IS_510']].dropna()\nr_bous_510 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]\n\nprint(str(len(frame_xxx.index)) + \" observations for band @510\")\n\nframe_xxx= frame_full[['reflec_5_mean', 'rho_wn_IS_560']].dropna()\nr_bous_560 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]\n\nprint(str(len(frame_xxx.index)) + \" observations for band @560\")\n\ninsitu_path = './insitu/MOBY.csv'\ninsitu = pd.read_csv(insitu_path)\nframe_full = pd.DataFrame.merge(frame.query('Name == \"MOBY\"'), insitu, how='inner', on = ['Date', 'ORBIT'])\n\nframe_xxx= frame_full[['reflec_1_mean', 'rho_wn_IS_412']].dropna()\nr_moby_412 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0] \n\nprint(str(len(frame_xxx.index)) + \" observations for band @12\")\n\nframe_xxx= frame_full[['reflec_2_mean', 'rho_wn_IS_443']].dropna()\nr_moby_443 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0] \n\nprint(str(len(frame_xxx.index)) + \" observations for band @443\")\n\nframe_xxx= frame_full[['reflec_3_mean', 'rho_wn_IS_490']].dropna()\nr_moby_490 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]\n\nprint(str(len(frame_xxx.index)) + \" observations for band @490\")\n\nframe_xxx= frame_full[['reflec_4_mean', 'rho_wn_IS_510']].dropna()\nr_moby_510 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0] \n\nprint(str(len(frame_xxx.index)) + \" observations for band @510\")\n\nframe_xxx= frame_full[['reflec_5_mean', 'rho_wn_IS_560']].dropna()\nr_moby_560 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0] \n\nprint(str(len(frame_xxx.index)) + \" observations for band @560\")\n\n[r_aaot_412, r_aaot_443, r_aaot_490, r_aaot_510, r_aaot_560]\n\n[r_bous_412, r_bous_443, r_bous_490, r_bous_510, r_bous_560]\n\n\n[r_moby_412, r_moby_443, r_moby_490, r_moby_510, r_moby_560]\n\nr_final = (numpy.mean([r_bous_412, r_moby_412, r_aaot_412]) * w_412 \\\n + numpy.mean([r_bous_443, r_moby_443, r_aaot_443]) * w_443 \\\n + numpy.mean([r_bous_490, r_moby_490, r_aaot_490]) * w_490 \\\n + numpy.mean([r_bous_510, r_moby_510, r_aaot_510]) * w_510 \\\n + numpy.mean([r_bous_560, r_moby_560, r_aaot_560]) * w_560) \\\n / (w_412 + w_443 + w_490 + w_510 + w_560)\n\nr_final" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
borja876/Thinkful-DataScience-Borja
Challenge+Boston+marathon.ipynb
mit
[ "%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport scipy\nimport sklearn\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom matplotlib.mlab import PCA as mlabPCA\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA \nfrom sklearn import preprocessing\n\nfrom scipy.spatial.distance import cdist\nfrom sklearn.cluster import MeanShift, estimate_bandwidth\nfrom sklearn.cluster import AffinityPropagation\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.preprocessing import normalize\nfrom sklearn.decomposition import PCA\nfrom sklearn.cluster import MiniBatchKMeans\nfrom sklearn.cluster import SpectralClustering\nfrom sklearn.metrics import silhouette_samples, silhouette_score\n\n# Read and import data\nboston_marathon_results = pd.read_csv('results 2013.csv')\nboston_marathon_results.head()\n\nboston_marathon_results.columns\n\nboston_marathon_scores = boston_marathon_results.drop(['city','country', 'genderdiv', 'bib', 'ctz', 'state', 'name', 'division'], axis = 1)\n\nboston_marathon_scores.replace('-', 0, inplace=True)\nboston_marathon_scores['gender'] = boston_marathon_scores.loc[:, 'gender'].map({'F': 0,'M': 1})\nprint(boston_marathon_scores.columns.unique())\nboston_marathon_scores.head()\n\nboston_marathon_scores = boston_marathon_scores.astype(float)\nboston_marathon_scores.info()\n\n#Make a copy of DF\nX_tr = boston_marathon_scores\n\n#Standardize\nclmns = ['age', 'official','40k', '35k', '30k', '25k', 'half', '20k', '10k', '5k', 'pace']\n\nX_tr_std = normalize(X_tr[clmns])", "Compare Spectral Clustering against kMeans using Similarity\nAs there is no ground truth, the criteria used to evaluate clusters produced using Spectral and kmeans is the silhouette coefficient. From the results obtained, it can be appreaciated that Spectral Clustering requires 6 clusters to have the silhouette score similar to the one obtained with 3 clusters with kmeans.", "#Compare from a silhouette_score perspective kmeans against Spectral Clustering\nrange_n_clusters = np.arange(10)+2\n\nfor n_clusters in range_n_clusters:\n# The silhouette_score gives the average value for all the samples.\n # This gives a perspective into the density and separation of the formed\n # clusters\n\n# Initialize the clusterer with n_clusters value and a random generator\n # seed of 10 for reproducibility.\n spec_clust = SpectralClustering(n_clusters=n_clusters)\n cluster_labels1 = spec_clust.fit_predict(X_tr_std) \n silhouette_avg1 = silhouette_score(X_tr_std, cluster_labels1)\n \n kmeans = KMeans(n_clusters=n_clusters, init='k-means++', n_init=10).fit(X_tr_std)\n cluster_labels2 = kmeans.fit_predict(X_tr_std) \n silhouette_avg2 = silhouette_score(X_tr_std, cluster_labels2)\n \n print(\"For n_clusters =\", n_clusters,\n \"av. sil_score for Spec. clust is :\", silhouette_avg1,\n \"av. sil_score for kmeans is :\",silhouette_avg2 )", "the optimal number of kmeans will be determined using the elbow method. Once the kmeans number of clusters is set, the number of clusters using spectral clustering will be used so that it equals the silhouette score obtained in the first case.\nK-Means", "#Use the elbow method to determine the number of clusters\n# k-means determine k\ndistortions = []\nK = range(1,10)\nfor k in K:\n kmeanModel = KMeans(n_clusters=k).fit(X_tr)\n kmeanModel.fit(X_tr)\n distortions.append(sum(np.min(cdist(X_tr, kmeanModel.cluster_centers_, 'euclidean'), axis=1)) / X_tr.shape[0])\n \n# Plot the elbow\nplt.plot(K, distortions, 'bx-')\nplt.xlabel('k')\nplt.ylabel('Distortion')\nplt.title('The Elbow Method showing the optimal k')\nplt.show()", "The elbow method shows that the optimal number of clusters to be used in the kmeans method is 3, considering the euclidean distance between cluster centers. From an analytical perspective, the inertia functions shows the same results: 3 clusters were the difference between the results obtained by the inertia function are smaller when shifting from 3 to 4 clusters.", "#Evaluate the best number of clusters\nfor i in range(1,10):\n km = KMeans(n_clusters=i, init='k-means++', n_init=10).fit(X_tr_std)\n print (i, km.inertia_)\n\n#Cluster the data\nkmeans = KMeans(n_clusters=3, init='k-means++', n_init=10).fit(X_tr_std)\nlabels = kmeans.labels_\n\n#Glue back to original data\nX_tr['clusters'] = labels\nX_tr['Gender'] = boston_marathon_scores.gender\nX_tr['Overall'] = boston_marathon_scores.overall\n\n#Add the column into our list\nclmns.extend(['clusters','Gender','Overall'])\n\n#Lets analyze the clusters\npd.DataFrame(X_tr.groupby(['clusters']).mean())\n\nclusters_summary = X_tr.groupby(['clusters']).describe()\nclusters_summary_transposed = clusters_summary.transpose()\nclusters_summary_transposed\n\n# Reduce it to two components.\nX_pca = PCA(2).fit_transform(X_tr_std)\n\n# Calculate predicted values.\ny_pred = KMeans(n_clusters=3, random_state=42).fit_predict(X_pca)\n\n# Plot the solution.\nplt.scatter(X_pca[:, 0], X_pca[:, 1], c=y_pred)\nplt.show()\n\nGraph_kmeans_official = pd.pivot_table(X_tr, 'official', ['clusters', 'gender'])\nGraph_kmeans_pace = pd.pivot_table(X_tr, 'pace', ['clusters', 'gender'])\nGraph_kmeans_age = pd.pivot_table(X_tr, 'age', ['clusters', 'gender'])\nprint(Graph_kmeans_official, Graph_kmeans_pace, Graph_kmeans_age)", "Spectral Clustering", "# We know we're looking for 6 clusters from the comparison with the kmeans.\nn_clusters=6\n\n# Declare and fit the model.\nsc = SpectralClustering(n_clusters=n_clusters).fit(X_tr_std)\n\n# Extract cluster assignments for each data point.\nlabels = sc.labels_\n\n#Glue back to original data\nX_tr['clusters'] = labels\nX_tr['Gender'] = boston_marathon_scores.gender\nX_tr['Overall'] = boston_marathon_scores.overall\n\n#Add the column into our list\nclmns.extend(['clusters','Gender','Overall'])\n\n#Lets analyze the clusters\n\npd.DataFrame(X_tr.groupby(['clusters']).mean())\n\nclusters_summary = X_tr.groupby(['clusters']).describe()\nclusters_summary_transposed = clusters_summary.transpose()\nclusters_summary_transposed\n\n# Reduce it to two components.\nX_pca = PCA(2).fit_transform(X_tr_std)\n\n# Calculate predicted values.\n\ny_pred = SpectralClustering(n_clusters=3).fit_predict(X_pca)\n\n# Plot the solution.\nplt.scatter(X_pca[:, 0], X_pca[:, 1], c=y_pred)\nplt.show()", "Mean Shift", "# Here we set the bandwidth. This function automatically derives a bandwidth\n# number based on an inspection of the distances among points in the data.\nbandwidth = estimate_bandwidth(X_tr_std, quantile=0.9)\n\n# Declare and fit the model.\nms = MeanShift(bandwidth=bandwidth, bin_seeding=True).fit(X_tr_std)\n\n# Extract cluster assignments for each data point.\nlabels = ms.labels_\n\n# Coordinates of the cluster centers.\ncluster_centers = ms.cluster_centers_\n\n# Count our clusters.\nn_clusters_ = len(np.unique(labels))\n\n#Glue back to original data\nX_tr['clusters'] = labels\nX_tr['Gender'] = boston_marathon_scores.gender\nX_tr['Overall'] = boston_marathon_scores.overall\n\n#Add the column into our list\nclmns.extend(['clusters','Gender','Overall'])\n\n#Lets analyze the clusters\nprint(\"Number of estimated clusters: {}\".format(n_clusters_))\npd.DataFrame(X_tr.groupby(['clusters']).mean())\n\nclusters_summary = X_tr.groupby(['clusters']).describe()\nclusters_summary_transposed = clusters_summary.transpose()\nclusters_summary_transposed\n\n# Reduce it to two components.\nX_pca = PCA(2).fit_transform(X_tr_std)\n\n# Calculate predicted values.\nbandwidth = estimate_bandwidth(X_tr_std, quantile=0.9)\ny_pred = MeanShift(bandwidth=bandwidth, bin_seeding=True).fit_predict(X_pca)\n\n# Plot the solution.\nplt.scatter(X_pca[:, 0], X_pca[:, 1], c=y_pred)\nplt.show()\n\n# Declare the model and fit it in one statement.\n# Note that you can provide arguments to the model, but we didn't.\naf = AffinityPropagation().fit(X_tr_std)\nprint('Done')\n\n# Pull the number of clusters and cluster assignments for each data point.\ncluster_centers_indices = af.cluster_centers_indices_\nn_clusters_ = len(cluster_centers_indices)\nlabels = af.labels_\n\n#Glue back to original data\nX_tr['clusters'] = labels\nX_tr['Gender'] = boston_marathon_scores.gender\nX_tr['Overall'] = boston_marathon_scores.overall\n\n#Add the column into our list\nclmns.extend(['clusters','Gender','Overall'])\n\n#Lets analyze the clusters\nprint(\"Number of estimated clusters: {}\".format(n_clusters_))\npd.DataFrame(X_tr.groupby(['clusters']).mean())\n\nclusters_summary = X_tr.groupby(['clusters']).describe()\nclusters_summary_transposed = clusters_summary.transpose()\nclusters_summary_transposed", "From all the clustering techniques that have been used_ kmeans, spectral, mean shift and affinity, the ones that present more stability in terms of the variance withn the clusters are kmeans and spectral clustering. When the bandwidth is close to quantile 1 (0.9) then the number of clusters obtained with mean shift is reduced to 18 (from 58 in the case of using quantile 0.25). In this case most of the clusters are empty reason why this clustering method has been discarded. The same case applies to the Affinity clustering as there are 251 clusters with less than 1% of the data in each of them.\nFrom the kmeans and spectral clustering perspective, each cluster contains between 1% and 43% of the datapoints. In this case, the best one from a similarity analysis perspective is the kmeans as with less clusters achieves similar silhouette values.\nFor the kmeans cluster, the best solution is 3 clusters from an elbow methodology perspective although the last cluster containes less than 1% of the data, so 2 clusters should be considered.\nrom the 3 clusters we can see that in the first cluster men finished the marathon quicker than women with an average pace of 7.36 bein the official timings lower in all cases. Additionally, in cluster one we see that men are younger than women entering in position 192 against women 218. The third cluster men and women have similar age and there is also a difference in 20 positions from the first to the last person in the cluster of people aged on average 48 years old." ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
geography-munich/sciprog
material/sub/jrjohansson/Lecture-7-Revision-Control-Software.ipynb
apache-2.0
[ "Revision control software\nJ.R. Johansson (jrjohansson at gmail.com)\nThe latest version of this IPython notebook lecture is available at http://github.com/jrjohansson/scientific-python-lectures.\nThe other notebooks in this lecture series are indexed at http://jrjohansson.github.io.", "from IPython.display import Image", "In any software development, one of the most important tools are revision control software (RCS).\nThey are used in virtually all software development and in all environments, by everyone and everywhere (no kidding!)\nRCS can used on almost any digital content, so it is not only restricted to software development, and is also very useful for manuscript files, figures, data and notebooks!\nThere are two main purposes of RCS systems:\n\nKeep track of changes in the source code.\nAllow reverting back to an older revision if something goes wrong.\nWork on several \"branches\" of the software concurrently.\nTags revisions to keep track of which version of the software that was used for what (for example, \"release-1.0\", \"paper-A-final\", ...)\n\n\nMake it possible for serveral people to collaboratively work on the same code base simultaneously.\nAllow many authors to make changes to the code.\nClearly communicating and visualizing changes in the code base to everyone involved.\n\n\n\nBasic principles and terminology for RCS systems\nIn an RCS, the source code or digital content is stored in a repository. \n\n\nThe repository does not only contain the latest version of all files, but the complete history of all changes to the files since they were added to the repository. \n\n\nA user can checkout the repository, and obtain a local working copy of the files. All changes are made to the files in the local working directory, where files can be added, removed and updated. \n\n\nWhen a task has been completed, the changes to the local files are commited (saved to the repository).\n\n\nIf someone else has been making changes to the same files, a conflict can occur. In many cases conflicts can be resolved automatically by the system, but in some cases we might manually have to merge different changes together.\n\n\nIt is often useful to create a new branch in a repository, or a fork or clone of an entire repository, when we doing larger experimental development. The main branch in a repository is called often master or trunk. When work on a branch or fork is completed, it can be merged in to the master branch/repository.\n\n\nWith distributed RCSs such as GIT or Mercurial, we can pull and push changesets between different repositories. For example, between a local copy of there repository to a central online reposistory (for example on a community repository host site like github.com).\n\n\nSome good RCS software\n\nGIT (git) : http://git-scm.com/\nMercurial (hg) : http://mercurial.selenic.com/\n\nIn the rest of this lecture we will look at git, although hg is just as good and work in almost exactly the same way.\nInstalling git\nOn Linux:\n$ sudo apt-get install git\n\nOn Mac (with macports):\n$ sudo port install git\n\nThe first time you start to use git, you'll need to configure your author information:\n$ git config --global user.name 'Robert Johansson'\n$ git config --global user.email robert@riken.jp\n\nCreating and cloning a repository\nTo create a brand new empty repository, we can use the command git init repository-name:", "# create a new git repository called gitdemo:\n!git init gitdemo", "If we want to fork or clone an existing repository, we can use the command git clone repository:", "!git clone https://github.com/qutip/qutip", "Git clone can take a URL to a public repository, like above, or a path to a local directory:", "!git clone gitdemo gitdemo2", "We can also clone private repositories over secure protocols such as SSH:\n$ git clone ssh://myserver.com/myrepository\n\nStatus\nUsing the command git status we get a summary of the current status of the working directory. It shows if we have modified, added or removed files.", "!git status", "In this case, only the current ipython notebook has been added. It is listed as an untracked file, and is therefore not in the repository yet.\nAdding files and committing changes\nTo add a new file to the repository, we first create the file and then use the git add filename command:", "%%file README\n\nA file with information about the gitdemo repository.\n\n!git status", "After having added the file README, the command git status list it as an untracked file.", "!git add README\n\n!git status", "Now that it has been added, it is listed as a new file that has not yet been commited to the repository.", "!git commit -m \"Added a README file\" README\n\n!git add Lecture-7-Revision-Control-Software.ipynb\n\n!git commit -m \"added notebook file\" Lecture-7-Revision-Control-Software.ipynb\n\n!git status ", "After committing the change to the repository from the local working directory, git status again reports that working directory is clean.\nCommiting changes\nWhen files that is tracked by GIT are changed, they are listed as modified by git status:", "%%file README\n\nA file with information about the gitdemo repository.\n\nA new line.\n\n!git status", "Again, we can commit such changes to the repository using the git commit -m \"message\" command.", "!git commit -m \"added one more line in README\" README\n\n!git status", "Removing files\nTo remove file that has been added to the repository, use git rm filename, which works similar to git add filename:", "%%file tmpfile\n\nA short-lived file.", "Add it:", "!git add tmpfile\n\n!git commit -m \"adding file tmpfile\" tmpfile ", "Remove it again:", "!git rm tmpfile\n\n!git commit -m \"remove file tmpfile\" tmpfile ", "Commit logs\nThe messages that are added to the commit command are supposed to give a short (often one-line) description of the changes/additions/deletions in the commit. If the -m \"message\" is omitted when invoking the git commit message an editor will be opened for you to type a commit message (for example useful when a longer commit message is requried). \nWe can look at the revision log by using the command git log:", "!git log", "In the commit log, each revision is shown with a timestampe, a unique has tag that, and author information and the commit message.\nDiffs\nAll commits results in a changeset, which has a \"diff\" describing the changes to the file associated with it. We can use git diff so see what has changed in a file:", "%%file README\n\nA file with information about the gitdemo repository.\n\nREADME files usually contains installation instructions, and information about how to get started using the software (for example).\n\n!git diff README", "That looks quite cryptic but is a standard form for describing changes in files. We can use other tools, like graphical user interfaces or web based systems to get a more easily understandable diff.\nIn github (a web-based GIT repository hosting service) it can look like this:", "Image(filename='images/github-diff.png')", "Discard changes in the working directory\nTo discard a change (revert to the latest version in the repository) we can use the checkout command like this:", "!git checkout -- README\n\n!git status", "Checking out old revisions\nIf we want to get the code for a specific revision, we can use \"git checkout\" and giving it the hash code for the revision we are interested as argument:", "!git log\n\n!git checkout 1f26ad648a791e266fbb951ef5c49b8d990e6461", "Now the content of all the files like in the revision with the hash code listed above (first revision)", "!cat README", "We can move back to \"the latest\" (master) with the command:", "!git checkout master \n\n!cat README\n\n!git status", "Tagging and branching\nTags\nTags are named revisions. They are useful for marking particular revisions for later references. For example, we can tag our code with the tag \"paper-1-final\" when when simulations for \"paper-1\" are finished and the paper submitted. Then we can always retreive the exactly the code used for that paper even if we continue to work on and develop the code for future projects and papers.", "!git log\n\n!git tag -a demotag1 -m \"Code used for this and that purpuse\" \n\n!git tag -l \n\n!git show demotag1", "To retreive the code in the state corresponding to a particular tag, we can use the git checkout tagname command:\n$ git checkout demotag1\n\nBranches\nWith branches we can create diverging code bases in the same repository. They are for example useful for experimental development that requires a lot of code changes that could break the functionality in the master branch. Once the development of a branch has reached a stable state it can always be merged back into the trunk. Branching-development-merging is a good development strategy when serveral people are involved in working on the same code base. But even in single author repositories it can often be useful to always keep the master branch in a working state, and always branch/fork before implementing a new feature, and later merge it back into the main trunk.\nIn GIT, we can create a new branch like this:", "!git branch expr1 ", "We can list the existing branches like this:", "!git branch", "And we can switch between branches using checkout:", "!git checkout expr1", "Make a change in the new branch.", "%%file README\n\nA file with information about the gitdemo repository.\n\nREADME files usually contains installation instructions, and information about how to get started using the software (for example).\n\nExperimental addition.\n\n!git commit -m \"added a line in expr1 branch\" README\n\n!git branch\n\n!git checkout master\n\n!git branch", "We can merge an existing branch and all its changesets into another branch (for example the master branch) like this:\nFirst change to the target branch:", "!git checkout master\n\n!git merge expr1\n\n!git branch ", "We can delete the branch expr1 now that it has been merged into the master:", "!git branch -d expr1\n\n!git branch\n\n!cat README", "pulling and pushing changesets between repositories\nIf the respository has been cloned from another repository, for example on github.com, it automatically remembers the address of the parant repository (called origin):", "!git remote\n\n!git remote show origin", "pull\nWe can retrieve updates from the origin repository by \"pulling\" changesets from \"origin\" to our repository:", "!git pull origin", "We can register addresses to many different repositories, and pull in different changesets from different sources, but the default source is the origin from where the repository was first cloned (and the work origin could have been omitted from the line above).\npush\nAfter making changes to our local repository, we can push changes to a remote repository using git push. Again, the default target repository is origin, so we can do:", "!git status\n\n!git add Lecture-7-Revision-Control-Software.ipynb\n\n!git commit -m \"added lecture notebook about RCS\" Lecture-7-Revision-Control-Software.ipynb\n\n!git push", "Hosted repositories\nGithub.com is a git repository hosting site that is very popular with both open source projects (for which it is free) and private repositories (for which a subscription might be needed).\nWith a hosted repository it easy to collaborate with colleagues on the same code base, and you get a graphical user interface where you can browse the code and look at commit logs, track issues etc. \nSome good hosted repositories are\n\nGithub : http://www.github.com\nBitbucket: http://www.bitbucket.org", "Image(filename='images/github-project-page.png')", "Graphical user interfaces\nThere are also a number of graphical users interfaces for GIT. The available options vary a little bit from platform to platform:\nhttp://git-scm.com/downloads/guis", "Image(filename='images/gitk.png')", "Further reading\n\nhttp://git-scm.com/book\nhttp://www.vogella.com/articles/Git/article.html\nhttp://cheat.errtheblog.com/s/git" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
quantopian/research_public
notebooks/lectures/Linear_Correlation_Analysis/notebook.ipynb
apache-2.0
[ "The Correlation Coefficient\nBy Evgenia \"Jenny\" Nitishinskaya and Delaney Granizo-Mackenzie with example algorithms by David Edwards\nPart of the Quantopian Lecture Series:\n\nwww.quantopian.com/lectures\ngithub.com/quantopian/research_public\n\n\nThe correlation coefficient measures the extent to which the relationship between two variables is linear. Its value is always between -1 and 1. A positive coefficient indicates that the variables are directly related, i.e. when one increases the other one also increases. A negative coefficient indicates that the variables are inversely related, so that when one increases the other decreases. The closer to 0 the correlation coefficient is, the weaker the relationship between the variables.\nThe correlation coefficient of two series $X$ and $Y$ is defined as\n$$r = \\frac{Cov(X,Y)}{std(X)std(Y)}$$\nwhere $Cov$ is the covariance and $std$ is the standard deviation.\nTwo random sets of data will have a correlation coefficient close to 0:\nCorrelation vs. Covariance\nCorrelation is simply a normalized form of covariance. They are otherwise the same and are often used semi-interchangeably in everyday conversation. It is obviously important to be precise with language when discussing the two, but conceptually they are almost identical.\nCovariance isn't that meaningful by itself\nLet's say we have two variables $X$ and $Y$ and we take the covariance of the two.", "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nX = np.random.rand(50)\nY = 2 * X + np.random.normal(0, 0.1, 50)\n\nnp.cov(X, Y)[0, 1]", "So now what? What does this mean? Correlation uses information about the variance of X and Y to normalize this metric. Once we've normalized the metric to the -1 to 1 scale, we can make meaningful statements and compare correlations.\nTo see how this is done consider the formula.\n$$\\frac{Cov(X, Y)}{std(X)std(Y)}$$\n$$= \\frac{Cov(X, Y)}{\\sqrt{var(X)}\\sqrt{var(Y)}}$$\n$$= \\frac{Cov(X, Y)}{\\sqrt{Cov(X, X)}\\sqrt{Cov(Y, Y)}}$$\nTo demonstrate this let's compare the correlation and covariance of two series.", "X = np.random.rand(50)\nY = 2 * X + 4\n\nprint 'Covariance of X and Y: \\n' + str(np.cov(X, Y))\nprint 'Correlation of X and Y: \\n' + str(np.corrcoef(X, Y))", "Why do both np.cov and np.corrcoef return matrices?\nThe covariance matrix is an important concept in statistics. Often people will refer to the covariance of two variables $X$ and $Y$, but in reality that is just one entry in the covariance matrix of $X$ and $Y$. For each input variable we have one row and one column. The diagonal is just the variance of that variable, or $Cov(X, X)$, entries off the diagonal are covariances between different variables. The matrix is symmetric across the diagonal. Let's check that this is true.", "cov_matrix = np.cov(X, Y)\n\n# We need to manually set the degrees of freedom on X to 1, as numpy defaults to 0 for variance\n# This is usually fine, but will result in a slight mismatch as np.cov defaults to 1\nerror = cov_matrix[0, 0] - X.var(ddof=1)\n\nprint 'error: ' + str(error)\n\nX = np.random.rand(50)\nY = np.random.rand(50)\n\nplt.scatter(X,Y)\nplt.xlabel('X Value')\nplt.ylabel('Y Value')\n\n# taking the relevant value from the matrix returned by np.cov\nprint 'Correlation: ' + str(np.cov(X,Y)[0,1]/(np.std(X)*np.std(Y)))\n# Let's also use the builtin correlation function\nprint 'Built-in Correlation: ' + str(np.corrcoef(X, Y)[0, 1])", "Now let's see what two correlated sets of data look like.", "X = np.random.rand(50)\nY = X + np.random.normal(0, 0.1, 50)\n\nplt.scatter(X,Y)\nplt.xlabel('X Value')\nplt.ylabel('Y Value')\n\nprint 'Correlation: ' + str(np.corrcoef(X, Y)[0, 1])", "Let's dial down the relationship by introducing more noise.", "X = np.random.rand(50)\nY = X + np.random.normal(0, .2, 50)\n\nplt.scatter(X,Y)\nplt.xlabel('X Value')\nplt.ylabel('Y Value')\n\nprint 'Correlation: ' + str(np.corrcoef(X, Y)[0, 1])", "Finally, let's see what an inverse relationship looks like.", "X = np.random.rand(50)\nY = -X + np.random.normal(0, .1, 50)\n\nplt.scatter(X,Y)\nplt.xlabel('X Value')\nplt.ylabel('Y Value')\n\nprint 'Correlation: ' + str(np.corrcoef(X, Y)[0, 1])", "We see a little bit of rounding error, but they are clearly the same value.\nHow is this useful in finance?\nDetermining related assets\nOnce we've established that two series are probably related, we can use that in an effort to predict future values of the series. For example, let's look at the price of Apple and a semiconductor equipment manufacturer, Lam Research Corporation.", "# Pull the pricing data for our two stocks and S&P 500\nstart = '2013-01-01'\nend = '2015-01-01'\nbench = get_pricing('SPY', fields='price', start_date=start, end_date=end)\na1 = get_pricing('LRCX', fields='price', start_date=start, end_date=end)\na2 = get_pricing('AAPL', fields='price', start_date=start, end_date=end)\n\nplt.scatter(a1,a2)\nplt.xlabel('LRCX')\nplt.ylabel('AAPL')\nplt.title('Stock prices from ' + start + ' to ' + end)\nprint \"Correlation coefficients\"\nprint \"LRCX and AAPL: \", np.corrcoef(a1,a2)[0,1]\nprint \"LRCX and SPY: \", np.corrcoef(a1,bench)[0,1]\nprint \"AAPL and SPY: \", np.corrcoef(bench,a2)[0,1]", "Constructing a portfolio of uncorrelated assets\nAnother reason that correlation is useful in finance is that uncorrelated assets produce the best portfolios. The intuition for this is that if the assets are uncorrelated, a drawdown in one will not correspond with a drawdown in another. This leads to a very stable return stream when many uncorrelated assets are combined.\nLimitations\nSignificance\nIt's hard to rigorously determine whether or not a correlation is significant, especially when, as here, the variables are not normally distributed. Their correlation coefficient is close to 1, so it's pretty safe to say that the two stock prices are correlated over the time period we use, but is this indicative of future correlation? If we examine the correlation of each of them with the S&P 500, we see that it is also quite high. So, AAPL and LRCX are slightly more correlated with each other than with the average stock.\nOne fundamental problem is that it is easy to datamine correlations by picking the right time period. To avoid this, one should compute the correlation of two quantities over many historical time periods and examine the distibution of the correlation coefficient. More details on why single point estimates are bad will be covered in future notebooks.\nAs an example, remember that the correlation of AAPL and LRCX from 2013-1-1 to 2015-1-1 was 0.95. Let's take the rolling 60 day correlation between the two to see how that varies.", "rolling_correlation = pd.rolling_corr(a1, a2, 60)\nplt.plot(rolling_correlation)\nplt.xlabel('Day')\nplt.ylabel('60-day Rolling Correlation')", "Non-Linear Relationships\nThe correlation coefficient can be useful for examining the strength of the relationship between two variables. However, it's important to remember that two variables may be associated in different, predictable ways which this analysis would not pick up. For instance, one variable might precisely follow the behavior of a second, but with a delay. There are techniques for dealing with this lagged correlation. Alternatively, a variable may be related to the rate of change of another. Neither of these relationships are linear, but can be very useful if detected.\nAdditionally, the correlation coefficient can be very sensitive to outliers. This means that including or excluding even a couple of data points can alter your result, and it is not always clear whether these points contain information or are simply noise.\nAs an example, let's make the noise distribution poisson rather than normal and see what happens.", "X = np.random.rand(100)\nY = X + np.random.poisson(size=100)\n\nplt.scatter(X, Y)\n\nnp.corrcoef(X, Y)[0, 1]", "In conclusion, correlation is a powerful technique, but as always in statistics, one should be careful not to interpret results where there are none.\nThis presentation is for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation for any security; nor does it constitute an offer to provide investment advisory or other services by Quantopian, Inc. (\"Quantopian\"). Nothing contained herein constitutes investment advice or offers any opinion with respect to the suitability of any security, and any views expressed herein should not be taken as advice to buy, sell, or hold any security or as an endorsement of any security or company. In preparing the information contained herein, Quantopian, Inc. has not taken into account the investment needs, objectives, and financial circumstances of any particular investor. Any views expressed and data illustrated herein were prepared based upon information, believed to be reliable, available to Quantopian, Inc. at the time of publication. Quantopian makes no guarantees as to their accuracy or completeness. All information is subject to change and may quickly become unreliable for various reasons, including changes in market conditions or economic circumstances." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
0.17/_downloads/73d54262ed918b5836ccb242d27450bb/plot_read_noise_covariance_matrix.ipynb
bsd-3-clause
[ "%matplotlib inline", "=========================================\nReading/Writing a noise covariance matrix\n=========================================\nPlot a noise covariance matrix.", "# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>\n#\n# License: BSD (3-clause)\n\nfrom os import path as op\nimport mne\nfrom mne.datasets import sample\n\nprint(__doc__)\n\ndata_path = sample.data_path()\nfname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')\nfname_evo = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')\n\ncov = mne.read_cov(fname_cov)\nprint(cov)\nevoked = mne.read_evokeds(fname_evo)[0]", "Show covariance", "cov.plot(evoked.info, exclude='bads', show_svd=False)" ]
[ "code", "markdown", "code", "markdown", "code" ]
GoogleCloudPlatform/mlops-on-gcp
workshops/tfx-caip-tf23/lab-02-tfx-pipeline/solutions/lab-02.ipynb
apache-2.0
[ "Continuous training with TFX and Google Cloud AI Platform\nLearning Objectives\n\nUse the TFX CLI to build a TFX pipeline.\nDeploy a TFX pipeline version without tuning to a hosted AI Platform Pipelines instance.\nCreate and monitor a TFX pipeline run using the TFX CLI.\nDeploy a new TFX pipeline version with tuning enabled to a hosted AI Platform Pipelines instance.\nCreate and monitor another TFX pipeline run directly in the KFP UI.\n\nIn this lab, you use utilize the following tools and services to deploy and run a TFX pipeline on Google Cloud that automates the development and deployment of a TensorFlow 2.3 WideDeep Classifer to predict forest cover from cartographic data:\n\nThe TFX CLI utility to build and deploy a TFX pipeline.\nA hosted AI Platform Pipeline instance (Kubeflow Pipelines) for TFX pipeline orchestration.\nDataflow jobs for scalable, distributed data processing for TFX components.\nA AI Platform Training job for model training and flock management for parallel tuning trials. \nAI Platform Prediction as a model server destination for blessed pipeline model versions.\nCloudTuner and AI Platform Vizier for advanced model hyperparameter tuning using the Vizier algorithm.\n\nYou will then create and monitor pipeline runs using the TFX CLI as well as the KFP UI.\nSetup\nUpdate lab environment PATH to include TFX CLI and skaffold", "import yaml\n\n# Set `PATH` to include the directory containing TFX CLI and skaffold.\nPATH=%env PATH\n%env PATH=/home/jupyter/.local/bin:{PATH}", "Validate lab package version installation", "!python -c \"import tfx; print('TFX version: {}'.format(tfx.__version__))\"\n!python -c \"import kfp; print('KFP version: {}'.format(kfp.__version__))\"", "Note: this lab was built and tested with the following package versions:\nTFX version: 0.25.0\nKFP version: 1.0.4\n(Optional) If running the above command results in different package versions or you receive an import error, upgrade to the correct versions by running the cell below:", "%pip install --upgrade --user tfx==0.25.0\n%pip install --upgrade --user kfp==1.0.4", "Note: you may need to restart the kernel to pick up the correct package versions.\nValidate creation of AI Platform Pipelines cluster\nNavigate to AI Platform Pipelines page in the Google Cloud Console.\nNote you may have already deployed an AI Pipelines instance during the Setup for the lab series. If so, you can proceed using that instance. If not:\n1. Create or select an existing Kubernetes cluster (GKE) and deploy AI Platform. Make sure to select \"Allow access to the following Cloud APIs https://www.googleapis.com/auth/cloud-platform\" to allow for programmatic access to your pipeline by the Kubeflow SDK for the rest of the lab. Also, provide an App instance name such as \"tfx\" or \"mlops\". \nValidate the deployment of your AI Platform Pipelines instance in the console before proceeding.\nReview: example TFX pipeline design pattern for Google Cloud\nThe pipeline source code can be found in the pipeline folder.", "%cd pipeline\n\n!ls -la", "The config.py module configures the default values for the environment specific settings and the default values for the pipeline runtime parameters. \nThe default values can be overwritten at compile time by providing the updated values in a set of environment variables. You will set custom environment variables later on this lab.\nThe pipeline.py module contains the TFX DSL defining the workflow implemented by the pipeline.\nThe preprocessing.py module implements the data preprocessing logic the Transform component.\nThe model.py module implements the training, tuning, and model building logic for the Trainer and Tuner components.\nThe runner.py module configures and executes KubeflowDagRunner. At compile time, the KubeflowDagRunner.run() method converts the TFX DSL into the pipeline package in the argo format for execution on your hosted AI Platform Pipelines instance.\nThe features.py module contains feature definitions common across preprocessing.py and model.py.\nExercise: build your pipeline with the TFX CLI\nYou will use TFX CLI to compile and deploy the pipeline. As explained in the previous section, the environment specific settings can be provided through a set of environment variables and embedded into the pipeline package at compile time.\nConfigure your environment resource settings\nUpdate the below constants with the settings reflecting your lab environment. \n\nGCP_REGION - the compute region for AI Platform Training, Vizier, and Prediction.\nARTIFACT_STORE - An existing GCS bucket. You can use any bucket or use the GCS bucket created during installation of AI Platform Pipelines. The default bucket name will contain the kubeflowpipelines- prefix.", "# Use the following command to identify the GCS bucket for metadata and pipeline storage.\n!gsutil ls", "CUSTOM_SERVICE_ACCOUNT - In the gcp console Click on the Navigation Menu. Navigate to IAM &amp; Admin, then to Service Accounts and use the service account starting with prefix - 'tfx-tuner-caip-service-account'. This enables CloudTuner and the Google Cloud AI Platform extensions Tuner component to work together and allows for distributed and parallel tuning backed by AI Platform Vizier's hyperparameter search algorithm. Please see the lab setup README for setup instructions.\n\n\nENDPOINT - set the ENDPOINT constant to the endpoint to your AI Platform Pipelines instance. The endpoint to the AI Platform Pipelines instance can be found on the AI Platform Pipelines page in the Google Cloud Console. Open the SETTINGS for your instance and use the value of the host variable in the Connect to this Kubeflow Pipelines instance from a Python client via Kubeflow Pipelines SKD section of the SETTINGS window. The format is '...pipelines.googleusercontent.com'.", "#TODO: Set your environment resource settings here for GCP_REGION, ARTIFACT_STORE_URI, ENDPOINT, and CUSTOM_SERVICE_ACCOUNT.\nGCP_REGION = 'us-central1'\nARTIFACT_STORE_URI = 'gs://dougkelly-sandbox-kubeflowpipelines-default' #Change\nENDPOINT = '6f857f6a72ef2a99-dot-us-central2.pipelines.googleusercontent.com' #Change\nCUSTOM_SERVICE_ACCOUNT = 'tfx-tuner-caip-service-account@dougkelly-sandbox.iam.gserviceaccount.com' #Change\n\nPROJECT_ID = !(gcloud config get-value core/project)\nPROJECT_ID = PROJECT_ID[0]\n\n# Set your resource settings as environment variables. These override the default values in pipeline/config.py.\n%env GCP_REGION={GCP_REGION}\n%env ARTIFACT_STORE_URI={ARTIFACT_STORE_URI}\n%env CUSTOM_SERVICE_ACCOUNT={CUSTOM_SERVICE_ACCOUNT}\n%env PROJECT_ID={PROJECT_ID}", "Set the compile time settings to first create a pipeline version without hyperparameter tuning\nDefault pipeline runtime environment values are configured in the pipeline folder config.py. You will set their values directly below:\n\n\nPIPELINE_NAME - the pipeline's globally unique name. For each pipeline update, each pipeline version uploaded to KFP will be reflected on the Pipelines tab in the Pipeline name &gt; Version name dropdown in the format PIPELINE_NAME_datetime.now().\n\n\nMODEL_NAME - the pipeline's unique model output name for AI Platform Prediction. For multiple pipeline runs, each pushed blessed model will create a new version with the format 'v{}'.format(int(time.time())).\n\n\nDATA_ROOT_URI - the URI for the raw lab dataset gs://workshop-datasets/covertype/small.\n\n\nCUSTOM_TFX_IMAGE - the image name of your pipeline container build by skaffold and published by Cloud Build to Cloud Container Registry in the format 'gcr.io/{}/{}'.format(PROJECT_ID, PIPELINE_NAME).\n\n\nRUNTIME_VERSION - the TensorFlow runtime version. This lab was built and tested using TensorFlow 2.3.\n\n\nPYTHON_VERSION - the Python runtime version. This lab was built and tested using Python 3.7.\n\n\nUSE_KFP_SA - The pipeline can run using a security context of the GKE default node pool's service account or the service account defined in the user-gcp-sa secret of the Kubernetes namespace hosting Kubeflow Pipelines. If you want to use the user-gcp-sa service account you change the value of USE_KFP_SA to True. Note that the default AI Platform Pipelines configuration does not define the user-gcp-sa secret.\n\n\nENABLE_TUNING - boolean value indicating whether to add the Tuner component to the pipeline or use hyperparameter defaults. See the model.py and pipeline.py files for details on how this changes the pipeline topology across pipeline versions. You will create pipeline versions without and with tuning enabled in the subsequent lab exercises for comparison.", "PIPELINE_NAME = 'tfx_covertype_continuous_training'\nMODEL_NAME = 'tfx_covertype_classifier'\nDATA_ROOT_URI = 'gs://workshop-datasets/covertype/small'\nCUSTOM_TFX_IMAGE = 'gcr.io/{}/{}'.format(PROJECT_ID, PIPELINE_NAME)\nRUNTIME_VERSION = '2.3'\nPYTHON_VERSION = '3.7'\nUSE_KFP_SA=False\nENABLE_TUNING=False\n\n%env PIPELINE_NAME={PIPELINE_NAME}\n%env MODEL_NAME={MODEL_NAME}\n%env DATA_ROOT_URI={DATA_ROOT_URI}\n%env KUBEFLOW_TFX_IMAGE={CUSTOM_TFX_IMAGE}\n%env RUNTIME_VERSION={RUNTIME_VERSION}\n%env PYTHON_VERIONS={PYTHON_VERSION}\n%env USE_KFP_SA={USE_KFP_SA}\n%env ENABLE_TUNING={ENABLE_TUNING}", "Compile your pipeline code\nYou can build and upload the pipeline to the AI Platform Pipelines instance in one step, using the tfx pipeline create command. The tfx pipeline create goes through the following steps:\n- (Optional) Builds the custom image to that provides a runtime environment for TFX components or uses the latest image of the installed TFX version \n- Compiles the pipeline code into a pipeline package \n- Uploads the pipeline package via the ENDPOINT to the hosted AI Platform instance.\nAs you debug the pipeline DSL, you may prefer to first use the tfx pipeline compile command, which only executes the compilation step. After the DSL compiles successfully you can use tfx pipeline create to go through all steps.", "!tfx pipeline compile --engine kubeflow --pipeline_path runner.py", "Note: you should see a {PIPELINE_NAME}.tar.gz file appear in your current pipeline directory.\nExercise: deploy your pipeline container to AI Platform Pipelines with TFX CLI\nAfter the pipeline code compiles without any errors you can use the tfx pipeline create command to perform the full build and deploy the pipeline. You will deploy your compiled pipeline container hosted on Google Container Registry e.g. gcr.io/[PROJECT_ID]/tfx_covertype_continuous_training to run on AI Platform Pipelines with the TFX CLI.", "# TODO: Your code here to use the TFX CLI to deploy your pipeline image to AI Platform Pipelines.\n\n!tfx pipeline create \\\n--pipeline_path=runner.py \\\n--endpoint={ENDPOINT} \\\n--build_target_image={CUSTOM_TFX_IMAGE}", "Hint: review the TFX CLI documentation on the \"pipeline group\" to create your pipeline. You will need to specify the --pipeline_path to point at the pipeline DSL and runner defined locally in runner.py, --endpoint, and --build_target_image arguments using the environment variables specified above.\nNote: you should see a build.yaml file in your pipeline folder created by skaffold. The TFX CLI compile triggers a custom container to be built with skaffold using the instructions in the Dockerfile.\nIf you need to redeploy the pipeline you can first delete the previous version using tfx pipeline delete or you can update the pipeline in-place using tfx pipeline update.\nTo delete the pipeline:\ntfx pipeline delete --pipeline_name {PIPELINE_NAME} --endpoint {ENDPOINT}\nTo update the pipeline:\ntfx pipeline update --pipeline_path runner.py --endpoint {ENDPOINT}\nCreate and monitor a pipeline run with the TFX CLI\nAfter the pipeline has been deployed, you can trigger and monitor pipeline runs using TFX CLI.\nHint: review the TFX CLI documentation on the \"run group\".", "# TODO: your code here to trigger a pipeline run with the TFX CLI\n\n!tfx run create --pipeline_name={PIPELINE_NAME} --endpoint={ENDPOINT}", "To view the status of existing pipeline runs:", "!tfx run list --pipeline_name {PIPELINE_NAME} --endpoint {ENDPOINT}", "To retrieve the status of a given run:", "RUN_ID='[YOUR RUN ID]'\n\n!tfx run status --pipeline_name {PIPELINE_NAME} --run_id {RUN_ID} --endpoint {ENDPOINT}", "Important\nA full pipeline run without tuning enabled will take about 40 minutes to complete. You can view the run's progress using the TFX CLI commands above or in the\nExercise: deploy a pipeline version with tuning enabled\nIncorporating automatic model hyperparameter tuning into a continuous training TFX pipeline workflow enables faster experimentation, development, and deployment of a top performing model.\nThe previous pipeline version read from hyperparameter default values in the search space defined in _get_hyperparameters() in model.py and used these values to build a TensorFlow WideDeep Classifier model.\nLet's now deploy a new pipeline version with the Tuner component added to the pipeline that calls out to the AI Platform Vizier service for distributed and parallelized hyperparameter tuning. The Tuner component \"best_hyperparameters\" artifact will be passed directly to your Trainer component to deploy the top performing model. Review pipeline.py to see how this environment variable changes the pipeline topology. Also, review the tuning function in model.py for configuring CloudTuner.\nNote that you might not want to tune the hyperparameters every time you retrain your model due to the computational cost. Once you have used Tuner determine a good set of hyperparameters, you can remove Tuner from your pipeline and use model hyperparameters defined in your model code or use a ImporterNode to import the Tuner \"best_hyperparameters\"artifact from a previous Tuner run to your model Trainer.", "ENABLE_TUNING=True\n\n%env ENABLE_TUNING={ENABLE_TUNING}", "Compile your pipeline code", "!tfx pipeline compile --engine kubeflow --pipeline_path runner.py", "Deploy your pipeline container to AI Platform Pipelines with the TFX CLI", "#TODO: your code to update your pipeline \n!tfx pipeline update --pipeline_path runner.py --endpoint {ENDPOINT}", "Trigger a pipeline run from the Kubeflow Pipelines UI\nOn the AI Platform Pipelines page, click OPEN PIPELINES DASHBOARD. A new browser tab will open. Select the Pipelines tab to the left where you see the PIPELINE_NAME pipeline you deployed previously. You should see 2 pipeline versions. \nClick on the most recent pipeline version with tuning enabled which will open up a window with a graphical display of your TFX pipeline directed graph. \nNext, click the Create a run button. Verify the Pipeline name and Pipeline version are pre-populated and optionally provide a Run name and Experiment to logically group the run metadata under before hitting Start to trigger the pipeline run.\nImportant\nA full pipeline run with tuning enabled will take about 50 minutes and can be executed in parallel while the previous pipeline run without tuning continues running. \nTake the time to review the pipeline metadata artifacts created in the GCS artifact repository for each component including data splits, your Tensorflow SavedModel, model evaluation results, etc. as the pipeline executes. In the GCP console, you can also view the Dataflow jobs for pipeline data processing as well as the AI Platform Training jobs for model training and tuning.\nWhen your pipelines runs are complete, review your model versions on Cloud AI Platform Prediction and model evaluation metrics. Did your model performance improve with hyperparameter tuning?\nNext Steps\nIn this lab, you learned how to build and deploy a TFX pipeline with the TFX CLI and then update, build and deploy a new pipeline with automatic hyperparameter tuning. You practiced triggered continuous pipeline runs using the TFX CLI as well as the Kubeflow Pipelines UI.\nIn the next lab, you will construct a Cloud Build CI/CD workflow that further automates the building and deployment of the TensorFlow WideDeep Classifer pipeline code introduced in this lab.\nLicense\n<font size=-1>Licensed under the Apache License, Version 2.0 (the \\\"License\\\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \\\"AS IS\\\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.</font>" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
oditorium/blog
iPython/ReportLab1-Intro.ipynb
agpl-3.0
[ "ReportLab\nNote: this code needs a running installation of reportlab as well as of pillow. The latter needs some image processing libraries to function properly. See the associated blogpost for installation instructions\nThe reportlab user guide is here and the Pillow guide is here\n1. Tools and Assets\nDisplay hook for pillow images\nNormally, pillow images are in iPython only displayed as object description strings. This code inserts a hook that automatically draws them, akin to the matplotlib interactive backends.\nsource", "from io import BytesIO\nfrom IPython.core import display\nfrom PIL import Image\n\n\ndef display_pil_image(im):\n \"\"\"Displayhook function for PIL Images, rendered as PNG.\"\"\"\n\n b = BytesIO()\n im.save(b, format='png')\n data = b.getvalue()\n\n ip_img = display.Image(data=data, format='png', embed=True)\n return ip_img._repr_png_()\n\npng_formatter = get_ipython().display_formatter.formatters['image/png']\ndpi = png_formatter.for_type(Image.Image, display_pil_image)", "Assets\nwe need to get the following assets into the local directory for the code to run (once the assets are there the wget statements can be commented out)", "!wget http://blogs.oditorium.com/technology/wp-content/uploads/2014/08/logo-oditorium-whitebg.jpg\n!wget http://blogs.oditorium.com/technology/wp-content/uploads/2014/08/graph.png", "2. Reportlab - canvas\nWriting text to the canvas\nThis code fragment generates a one-page file t1.pdf with some text lines written onto it using drawString", "from reportlab.pdfgen import canvas\n\nmsg = \"This is not a message\"\nc = canvas.Canvas(\"test1.pdf\")\nfor i in range(1,10):\n c.drawString(50*i,50*i,msg)\nc.showPage()\nc.save()\n\n!ls -l test1.pdf", "Writing images from disk to the canvas\nThe function drawImage writes an image from disk to the canvas. An image written that way is stored in the pdf file only once, which makes this important for multiple-use images, eg logos. There is another function drawInlineImage that places an image directly into the pdf flow. This can be slightly faster, but it will duplicate storage needs for repeating images", "msg = \"This is really not a message\"\nc = canvas.Canvas(\"test2.pdf\")\nfor i in range(1,10):\n c.drawString(50*i,50*i,msg)\nc.drawImage('graph.png',100,100)\nc.showPage()\nc.save()\n\n!ls -l test2.pdf", "Writing images to the canvas on the fly\nOften we generate images on the fly - eg using matplotlib - and dont want to save them to include them. The following code shows how this is done source", "import matplotlib.pyplot as plt\nfrom io import BytesIO\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.units import inch, cm\nfrom reportlab.lib.utils import ImageReader\n\nfig = plt.figure(figsize=(4, 3))\nplt.plot([1,4,3,2])\nplt.ylabel('some numbers')\n\nimgdata = BytesIO()\nfig.savefig(imgdata, format='png')\nimgdata.seek(0) # rewind the data\n\nImage = ImageReader(imgdata)\n\nc = canvas.Canvas('test3.pdf')\nc.drawImage(Image, cm, cm, inch, inch)\nc.save()\n\n!ls -l test3.pdf", "3. Reportlab - platypus\nPlatypus is reportlab's layout engine (see the user guide from page 59). It defines the following hierarchy of templates\n\n\nDocTemplate - a DocTemplate describes the overall document\n\n\nPageTemplate - a PageTemplate describes a page in the document; usually different pages of the same document share the same template, but for example the title page might be different from the rest\n\n\nFrame - a Frame is a layout are within a page; in the most simple case there will be one frame per page, but eg for 2col layouts there will be 2 etc\n\n\nFlowable - a Flowable is something that can flow into a frame, eg text or an image\n\n\nDifferent page templates, different styles\nThe functions myTitlePage and myRegularPage define page templates. The command styles = getSampleStyleSheet() gets a whole list of styles that can be passed into the Paragraph function.", "def lipsum():\n return \"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aliquam ultrices ligula et libero tempus, ac pretium velit ultricies. Pellentesque sit amet vestibulum quam. Maecenas turpis ante, feugiat eu ultricies feugiat, ultricies ac elit. Praesent eleifend, nibh eu tempor consequat, nisi nunc hendrerit mi, at rhoncus massa sem quis nulla. Nunc ullamcorper mi a risus pretium, ac faucibus massa vehicula. Vestibulum venenatis aliquam felis eget hendrerit. Nulla porta massa placerat velit ultrices dictum. Curabitur mattis, lacus in convallis porta, ligula enim dignissim est, vel aliquam elit metus nec dolor. Vestibulum lacinia ac magna adipiscing iaculis. Suspendisse potenti. Nunc adipiscing magna id suscipit viverra. Sed tristique tortor ac erat mattis aliquam. Etiam nunc libero, iaculis non lectus quis, tincidunt adipiscing lacus. Aliquam in auctor dui.\"\n#lipsum()\n\nfrom reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer\nfrom reportlab.lib.styles import getSampleStyleSheet\nfrom reportlab.rl_config import defaultPageSize\nfrom reportlab.lib.units import inch\n\nPAGE_HEIGHT=defaultPageSize[1]; PAGE_WIDTH=defaultPageSize[0]\nstyles = getSampleStyleSheet()\n\nTitle = \"<Document Title>\"\ndef myTitlePage(canvas, doc):\n canvas.saveState()\n canvas.setFont('Times-Bold',48)\n canvas.drawCentredString(PAGE_WIDTH/2.0, PAGE_HEIGHT-108, Title)\n canvas.restoreState()\n\npageinfo = \" - <some document info here>\"\ndef myRegularPage(canvas, doc):\n canvas.saveState()\n canvas.setFont('Times-Roman',9)\n canvas.drawString(inch, 0.75 * inch, \"page %d %s\" % (doc.page, pageinfo))\n canvas.restoreState()\n\n!rm test4.pdf\ndoc = SimpleDocTemplate(\"test4.pdf\")\nStory = [Spacer(1,2*inch)]\nstyle = styles[\"Normal\"]\nstyleH1 = styles[\"Heading1\"]\nstyleH2 = styles[\"Heading2\"]\n\nfor i in range(100): \n if i % 10 == 0: \n p = Paragraph(\"Headline 1\", styleH1)\n Story.append(p)\n \n if i % 3 == 0: \n p = Paragraph(\"Headline 1\", styleH2)\n Story.append(p)\n \n bogustext = lipsum()\n p = Paragraph(bogustext, style)\n Story.append(p)\n Story.append(Spacer(1,0.2*inch))\n\ndoc.build(Story, onFirstPage=myTitlePage, onLaterPages=myRegularPage)\n\n!ls -l test4.pdf", "Flowables and frames\nIt is possible to construct a document solely based on flowables and frames", "!rm test5.pdf\n\nfrom reportlab.pdfgen.canvas import Canvas\nfrom reportlab.lib.styles import getSampleStyleSheet\nfrom reportlab.lib.units import inch\nfrom reportlab.platypus import Paragraph, Frame\nstyles = getSampleStyleSheet()\nstyleN = styles['Normal']\nstyleH = styles['Heading1']\n\nstory = []\nstory.append(Paragraph(\"This is a Heading\",styleH))\nfor i in range(100):\n story.append(Paragraph(lipsum(),styleN))\n story.append(Spacer(1,0.2*inch))\nc = Canvas('test5.pdf')\nf = Frame(3*inch, inch, 3*inch, 9*inch, showBoundary=1)\nf.addFromList(story,c)\nc.save()\n\n!ls -l test5.pdf", "Two column layout\nsource", "from reportlab.platypus import BaseDocTemplate, Frame, Paragraph, PageBreak, PageTemplate\nfrom reportlab.lib.styles import getSampleStyleSheet\nimport random\n\nwords = \"lorem ipsum dolor sit amet consetetur sadipscing elitr sed diam nonumy eirmod tempor invidunt ut labore et\".split()\n\nstyles=getSampleStyleSheet()\nstory=[]\n\ndoc = BaseDocTemplate('test6.pdf',showBoundary=1)\n\nframe1 = Frame(doc.leftMargin, doc.bottomMargin, doc.width/2-6, doc.height, id='col1')\nframe2 = Frame(doc.leftMargin+doc.width/2+6, doc.bottomMargin, doc.width/2-6, doc.height, id='col2')\n\nstory.append(Paragraph(\" \".join([random.choice(words) for i in range(1000)]),styles['Normal']))\ndoc.addPageTemplates([PageTemplate(id='TwoCol',frames=[frame1,frame2]), ])\n\ndoc.build(story)\n\n!ls -l test6.pdf", "Mixed layout, one and two cols, title and back\nsource", "from reportlab.platypus import BaseDocTemplate, Frame, Paragraph, NextPageTemplate, PageBreak, PageTemplate, Image\nfrom reportlab.lib.units import inch\nfrom reportlab.lib.styles import getSampleStyleSheet\n\ndoc = BaseDocTemplate('test7.pdf',showBoundary=1)\nstyles=getSampleStyleSheet()\nstory=[]\n\ndef static_title(canvas,doc):\n canvas.saveState()\n canvas.drawImage('logo-oditorium-whitebg.jpg',doc.width-2.5*inch,doc.height, width=4*inch, preserveAspectRatio=True)\n canvas.setFont('Times-Roman',48)\n canvas.drawString(inch, doc.height - 1*inch, \"TITLE\")\n canvas.restoreState()\n \ndef static_back(canvas,doc):\n canvas.saveState()\n canvas.setFont('Times-Roman',9)\n canvas.drawString(inch, 0.75 * inch, \"Back\")\n canvas.restoreState()\n\ndef static_1col(canvas,doc):\n canvas.saveState()\n canvas.drawImage('logo-oditorium-whitebg.jpg',doc.width+0.5*inch,doc.height+0.5*inch, width=1*inch, preserveAspectRatio=True)\n canvas.setFont('Times-Roman',9)\n canvas.drawString(inch, 0.75 * inch, \"One Col - Page %d\" % doc.page)\n canvas.restoreState()\n\ndef static_2col(canvas,doc):\n canvas.saveState()\n canvas.drawImage('logo-oditorium-whitebg.jpg',doc.width+0.5*inch,doc.height+0.5*inch, width=1*inch, preserveAspectRatio=True)\n canvas.setFont('Times-Roman',9)\n canvas.drawString(inch, 0.75 * inch, \"Two Col - Page %d\" % doc.page)\n canvas.restoreState()\n\nframe_title = Frame(doc.leftMargin, doc.bottomMargin, doc.width, doc.height - 5*inch, id='normal')\nframe_back = Frame(doc.leftMargin, doc.bottomMargin, doc.width, doc.height - 8*inch, id='normal')\n\nframe_1col = Frame(doc.leftMargin, doc.bottomMargin, doc.width, doc.height, id='normal')\n\nframe1_2col = Frame(doc.leftMargin, doc.bottomMargin, doc.width/2-6, doc.height, id='col1')\nframe2_2col = Frame(doc.leftMargin+doc.width/2+6, doc.bottomMargin, doc.width/2-6,\n doc.height, id='col2')\n\ndoc.addPageTemplates([\n PageTemplate(id='Title',frames=frame_title,onPage=static_title), \n PageTemplate(id='Back',frames=frame_back,onPage=static_back), \n PageTemplate(id='OneCol',frames=frame_1col,onPage=static_1col), \n PageTemplate(id='TwoCol',frames=[frame1_2col,frame2_2col],onPage=static_2col),\n ])\n\n\nstory.append(Paragraph(\"Title, \"*100,styles['Normal']))\nstory.append(NextPageTemplate('OneCol'))\nstory.append(PageBreak())\nfor i in range(0,15): \n story.append(Paragraph(lipsum(),styles['Normal']))\n story.append(Spacer(1,0.2*inch))\nstory.append(NextPageTemplate('TwoCol'))\nstory.append(PageBreak())\nfor i in range(0,15): \n story.append(Paragraph(lipsum(),styles['Normal']))\n story.append(Spacer(1,0.2*inch))\nstory.append(NextPageTemplate('OneCol'))\nstory.append(PageBreak())\nfor i in range(0,5): \n story.append(Paragraph(lipsum(),styles['Normal']))\n story.append(Spacer(1,0.2*inch))\nstory.append(Image('graph.png'))\nfor i in range(0,5): \n story.append(Paragraph(lipsum(),styles['Normal']))\n story.append(Spacer(1,0.2*inch))\nstory.append(NextPageTemplate('Back'))\nstory.append(PageBreak())\nstory.append(Paragraph(\"Back, \"*100,styles['Normal']))\n\ndoc.build(story)\n\n!ls -l test7.pdf", "Landscape", "from reportlab.platypus import BaseDocTemplate, Frame, Paragraph, NextPageTemplate, PageBreak, FrameBreak, PageTemplate, Image\nfrom reportlab.lib.units import inch\nfrom reportlab.lib.pagesizes import A4, A5, landscape, portrait\nfrom reportlab.lib.styles import getSampleStyleSheet\n\ndoc = BaseDocTemplate('test8.pdf',showBoundary=1, pagesize=landscape(A4))\nstyles=getSampleStyleSheet()\nstory=[]\n\ndef static_title(canvas,doc):\n canvas.saveState()\n canvas.setFont('Times-Roman',48)\n canvas.drawString(inch, doc.height - 1*inch, \"TITLE\")\n canvas.restoreState()\n \ndef static_page(canvas,doc):\n canvas.saveState()\n canvas.setFont('Times-Roman',9)\n canvas.drawString(inch, 0.75 * inch, \"Page - Page %d\" % doc.page)\n canvas.restoreState()\n\nframe_title = Frame(doc.leftMargin, doc.bottomMargin, doc.width, doc.height - 5*inch, id='normal')\n\nframe1_page = Frame(doc.leftMargin, doc.bottomMargin, doc.width/2-6, doc.height, id='left')\nframe2_page = Frame(doc.leftMargin+doc.width/2+6, doc.bottomMargin, doc.width/2-6, doc.height, id='right')\n\ndoc.addPageTemplates([\n PageTemplate(id='Title',frames=frame_title,onPage=static_title), \n PageTemplate(id='Page',frames=[frame1_page, frame2_page],onPage=static_page), \n ])\n\n\nstory.append(Paragraph(\"Title, \"*100,styles['Normal']))\nstory.append(NextPageTemplate('Page'))\nstory.append(PageBreak())\nstory.append(Image('graph.png', width=doc.width/3))\nstory.append(FrameBreak())\nfor i in range(0,2): \n story.append(Paragraph(lipsum(),styles['Normal']))\n story.append(Spacer(1,0.2*inch))\nstory.append(PageBreak())\nfor i in range(0,2): \n story.append(Paragraph(lipsum(),styles['Normal']))\n story.append(Spacer(1,0.2*inch))\nstory.append(FrameBreak())\nstory.append(Image('graph.png'))\n\ndoc.build(story)\n\n!ls -l test8.pdf", "HTTPServer\nto get access to the whole current directory, uncomment the below line and navigate the browser to this server's IP address at port :8080 (note that this command is blocking and needs to be interrupted using the command in the Kernel menu above; this is a feature not a bug, because the server should be only run when needed)", "#!python -m SimpleHTTPServer 8080\n#!netstat -tulpn\n#!kill 25239 " ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
cosmoscalibur/herramientas_computacionales
Presentaciones/Notas/06_Versionamiento_git.ipynb
mit
[ "Git es un sistema de manejo de versiones distribuido creado por Linus Torvalds. \nLos sistemas de manejo de versiones permiten tener un control detallado sobre el historial de cambios de un archivo o conjunto de archivos creando instantaneas de estos conjuntos cada que se presenta un cambio. Estos sistemas de control de versiones pueden ser centralizados (ejemplo subversion) o distribuidos (ejemplo git), acorde a si existe un repositorio principal donde se maneja el historial y desde el cual todos los demas repositorios extraen las versiones requeridas, o si todo el historial se distribuye en cada repositorio. \nPara el desarrollo de esta sesión se requiere la creación de cuenta en github o bitbucket. \nInstalación\nPara la instalación de git use el siguiente procedimiento.\nWindows\nSi usa sistema operativo windows puede descargar los binarios del cliente de git for windows y durante la instalación usar la configuración recomendada. \nUbuntu Linux\nEn su sistema linux, muy probablemente encontrará el cliente de comandos para instalar desde el repositorio de su distribución. \nsudo apt install git\n\nSi desea realizar la autenticación por medio de SSH requiere instalar los paquetes adicionales a SSH, sin embargo se recomienda la autenticación por medio de HTTP para reducir el número de pasos requeridos, y cuando se desea trabajar en equipos de uso no habitual o que siendo de uso frecuente podría ser usado por un tercero. Igualmente, al usar autenticación por HTTP es posible conectarse a pesar de los bloqueos que por seguridad establecen las politicas de muchas entidades. \nMac\nEn mac, puede usar igualmente a linux la instalación por binarios disponibles en la página oficial, git. \nAlternativas\nPuede usar un cliente multiplataforma y gráfico de fácil instalación como gitkraken o github desktop (este último solo para windows o mac), sin embargo para fines de este curso se exigirá el manejo directo del cliente de comandos de git. \nUso\nUna vez instalado nuestro cliente git podemos consultar sus opciones y argumentos.", "%%bash\ngit --help", "Configuración\nPara una adecuada identificación de nuestros aportes en los repositorios, es necesario configurar nuestro nombre y correo en git. \ngit config --global user.name \"YOUR NAME\"\ngit config --global user.email \"YOUR EMAIL ADDRESS\"\n\nCreación de repositorios\nUna vez creada la cuenta github, debe crear un repositorio git en ella y copiar la dirección de clonado, la cual se usa con el fin de obtener una copia del repositorio localmente.", "%%bash\ngit clone https://github.com/cosmoscalibur/herramientas_computacionales.git herramientas", "De esta forma crearemos una copia del repositorio git herramientas_computacionales en el directorio ./herramientas. Si no se indica el directorio, se crea un directorio nuevo en la posición actual. \nTras el clonado, tenemos a disposición todo el directorio del repositorio, incluido el directorio .git que almacena la información del historial y repositorios (para este momento, heredada del repositorio clonado).", "%%bash\ncd herramientas\nls -oha", "Se evidencia que tras el clonado, tenemos configurados los repositorios remotos por defecto.", "%%bash\ncd herramientas\ngit remote -v", "En caso de ser un repositorio nuevo o desear reinicializar (aceptar nuevas plantillas o mover el directorio), debemos crear el directorio .git y agregar los elementos que teniamos en nuestro directorio. Siempre que se agreguen (o se realiza cualquier actualización de los archivos y directorios, se sugiere realizar un commit para la indicación de la acción en nuestro historial).", "%%bash\ncd herramientas\ngit init", "Repositorios remotos\nUna vez se inicializa o se clona, podemos agregar nuestro reposotorio remoto, que previamente fue creado en github o bitbucket. Si solo se desea un uso local o si es justo el repositorio clonado no será necesario.", "%%bash\ncd herramientas\ngit remote add pruebas https://github.com/cosmoscalibur/pruebas.git\ngit remote -v", "Tambien es posible eliminar el acceso al servidor remoto, con git remote rm.", "%%bash\ncd herramientas\ngit remote rm origin\ngit remote -v", "Objetos y referencias\nA continuación, y para evitar posibles conflictos de los archivos, es recomendable iniciar con un fetch o pull para obtener los objetos y referencias del repositorio remoto (esto si creamos el repositorio con los archivos de licencia y README.md que nos invita a crear el servicio git), y hacer la unión del contenido existente en el remoto con el local justo en el local. \ngit pull pruebas master\n\nEl paso anterior se evita si solo usamos el servicio para la creación del repositorio remoto vacio en lugar de poblarlo con los asistentes de archivo de licencia y archivo de descripción. Si en el paso anterior los archivos del repositorio remoto y el local poseen los mismos nombres pero son diferentes, pueden existir conflictos que deban resolverse de forma manual. \nCrearemos archivos de prueba la adición al repositorio.", "%%bash\ncd herramientas\necho \"linea 1\" > probar_1\nprintf \"s\\n 5\" > probar_2", "Aunque en este caso es claro que nuestros archivos no se encuentran agregados al repositorio, consultaremos por el estado de los archivos frente a su registro en el repositorio.", "%%bash\ncd herramientas\ngit status", "Ahora agregamos al registro los archivos de nuestro repositorio local.", "%%bash\ncd herramientas\ngit add . # Con la indicación \".\" agrega todo.\ngit commit -m \"Agregados archivos de prueba 1 y 2.\"", "Realizaremos una modificación en uno de los archivos y agregaremos directamente a este.", "%%bash\ncd herramientas\necho \"linea 2\" >> probar_1\nprintf \"otro\\narchivo\\npara probar.\" > probar_3\ngit add probar_1 probar_3\ngit commit -m \"Actualizado prueba 1 y agregado prueba 3.\"", "Es posible indicar directamente a git la eliminación de archivos mediante\ngit rm archivo\n\no tambien al eliminar el archivo de manera directa, se incluye en git\ngit add -A\n\ncon el fin de actualizar el arbol de archivos y rastrear incluso los archivos eliminados. Sin embargo, es recomendable que la adición y remoción de archivos sea lo más especifica posible con el fin de tener un registro descriptivo que ayude a la adecuada solución de problemas o la descripción de cambios de una versión a otro de los códigos. Tengase en cuenta que en el caso de directorios, debe indicarse el argumento recursivo para el borrado. \nSi se desea eliminar un archivo del repositorio pero no del disco, debe eliminarse de la cache del repositorio. \ngit rm --cached archivo\n\nPosterior a esto, realizamos el commit asociado. \nActualizar de/desde repositorios remotos\nAhora deseamos actualizar nuestro repositorio remoto con nuestro repositorio local, para lo cual debemos hacer un push con el nombre del repositorio remoto y el nombre del branch que por defecto si solo hay uno es master. Es posible configurar el push para que su acción sea la de enviar todos los branchs del repositorio pero es preferible un control más fino con el fin de reducir el trafico de datos en la red.", "%%bash\ncd herramientas\ngit push pruebas master", "Durante la ejecución del push se nos solicitará el usuario y contraseña de nuestra cuenta en el servicio git. Al usar el protocolo https por defecto tendremos 3600 segundos tras cada autenticación, o si lo preferimos podemos usar ssh y crear la llave de autenticación con el fin de tener acceso permanente en nuestro equipo privado sin requerir de la autenticación continua. \nPara la actualización de nuestro repositorio local con el repositorio remoto reaizamos una acción pull, tal cual se indico en la subsección anterior para actualizar el repositorio local con los archivos que posiblemente se crearon con el asistente del servicio git. \nOtras acciones\nSe recomienda la lectura de otras acciones de git importantes, las cuales son: branch y merge para experimentar con modificaciones de un código base y su posterior unión al original, o log, show para la visualización del historial de modificaciones.\nSi particularmente queremos ver el estado del repositorio en un commit especifico, usamos checkout seguido del commit. Esta acción no genera ninguna modificación del repositorio y para continuar en el último estadosolo debemos volver a nuestro estado actual realizando el checkout al master. Si deseamos conservar este estado consultado realizamos una reversión al commit dado con reverse.\nSi la acción anterior se realiza sobre un archivo, los cambios deben deshacerse con checkout a HEAD seguido del archivo, de lo contrario el estado del archivo consultado estará como una modificación.\nIgualmente existen acciones de interes en caso de usar los servicios git como la acción del fork que hace uso de la solicitud de pull (acciones no incluidas en git pero que pueden ser usadas con clientes basados en las API de los servicios git).\nEs posible tener submodules para lo cual git ofrece la utilidad submodule para su gestión.", "%%bash\ncd herramientas\ngit log --oneline\n\n%%bash\ncd herramientas\ngit show d193ae6\n\n%%bash\ncd herramientas\necho \"Actual\"\nls\ngit checkout e4a4be5\necho \"Anterior\"\nls\ngit checkout master\n\n%%bash\ncd herramientas\necho \"Actual\"\ncat probar_1\ngit checkout d193ae6 probar_1\necho \"Anterior\"\ncat probar_1\ngit checkout HEAD probar_1", "Referencias\n\nGit Downloads. Git. Consultado el 19 de agosto de 2016. \nBootCamp. Github. Consultado el 20 de agosto de 2016. \nGit Doc. Git. Consultado el 20 de agosto de 2016. \nGit Tutorials. Atlassian. Consultado el 20 de agosto de 2016. \nVersion control with git. Software Carpentry. Consultado el 20 de agosto de 2016. \nManaging remotes. Github. Consultado el 21 de agosto de 2016. \nBasic Git Commands. Atlassian. Consultado el 22 de agosto de 2016." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
jss367/assemble
exploratory_notebooks/4chan_sample_thread_exploration.ipynb
mit
[ "4chan Sample Thread Exploration\nThis notebook contains the cleaning and exploration of the chan_example csv which is hosted on the far-right s3 bucket. It contains cleaning out the html links from the text of the messages with beautiful soup, grouping the messages into their threads, and an exploratory sentiment analysis.\nFurther work could be to get the topic modelling for messages working and perhaps look at sentiment regarding different topics.", "import boto3\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport numpy as np\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\nsession = boto3.Session(profile_name='default')\ns3 = session.resource('s3')\nbucket = s3.Bucket(\"far-right\")\nsession.available_profiles\n\n# print all objects in bucket\nfor obj in bucket.objects.all():\n if \"chan\" in obj.key:\n #print(obj.key)\n pass\n\nbucket.download_file('fourchan/chan_example.csv', 'chan_example.csv')\n\nchan = pd.read_csv(\"chan_example.csv\")\n# remove the newline tags. They're not useful for our analysis and just clutter the text.\nchan.com = chan.com.astype(str).apply(lambda x: x.replace(\"<br>\", \" \"))\n\nbucket.download_file('info-source/daily/20170228/fourchan/fourchan_1204.json', '2017-02-28-1204.json')\nchan2 = pd.read_json(\"2017-02-28-1204.json\")\n\nsoup = BeautifulSoup(chan.com[19], \"lxml\")\nquotes = soup.find(\"span\")\nfor quote in quotes.contents:\n print(quote.replace(\">>\", \"\"))\nparent = soup.find(\"a\")\nprint(parent.contents[0].replace(\">>\", \"\"))\n\nprint(chan.com[19])\n\n# If there's a quote and then the text, this would work. \nprint(chan.com[19].split(\"</span>\")[-1])\n\ndef split_comment(comment):\n \"\"\"Splits up a comment into parent, quotes, and text\"\"\"\n \n # I used lxml to \n soup = BeautifulSoup(comment, \"lxml\")\n quotes, quotelink, text = None, None, None\n try:\n quotes = soup.find(\"span\")\n quotes = [quote.replace(\">>\", \"\") for quote in quotes.contents]\n except:\n pass\n try:\n quotelink = soup.find(\"a\").contents[0].replace(\">>\", \"\")\n except: \n pass\n # no quote or parent\n if quotes is None and quotelink is None:\n text = comment\n # Parent but no quote\n if quotelink is not None and quotes is None:\n text = comment.split(\"a>\")[-1]\n # There is a quote\n if quotes is not None:\n text = comment.split(\"</span>\")[-1]\n return {'quotes':quotes, 'quotelink': quotelink, 'text': text}\n\ndf = pd.DataFrame({'quotes':[], 'quotelink':[], 'text':[]})\nfor comment in chan['com']:\n df = df.append(split_comment(comment), ignore_index = True)\n \nfull = pd.merge(chan, df, left_index = True, right_index = True)\n\nquotes = pd.Series()\nquotelinks = pd.Series()\ntexts = pd.Series()\nfor comment in chan['com']:\n parse = split_comment(comment)\n quotes.append(pd.Series(parse['quotes']))\n quotelinks.append(pd.Series(parse['quotelink']))\n texts.append(pd.Series(parse['text']))\nchan['quotes'] = quotes\nchan['quotelinks'] = quotelinks\nchan['text'] = texts", "Message Threads\nForchan messages are all part of a message thread, which can be reassembled by following the parents for each post and chaining them back together. This code creates a thread ID and maps that thread ID to the corresponding messages. \nI don't know currently whether or not messages are linear, or if they can be a tree structure. This section of code simply tries to find which messages belong to which threads\nLooks like a thread is all just grouped by the parent comment. Doh\nHere i'll group the threads into a paragraph like structure and store it in a dictionary with the key being the parent chan_id.", "threads = full['parent'].unique()\nfull_text = {}\nfor thread in threads:\n full_text[int(thread)] = \". \".join(full[full['parent'] == thread]['text'])", "Now we can do some topic modeling on the different threads\nFollowing along with the topic modelling tweet exploration, we're going to tokenize our messages and then build a corpus from it. We'll then use the gensim library to run our topic model over the tokenized messages", "import gensim\nimport pyLDAvis.gensim as gensimvis\nimport pyLDAvis\n\ntokenized_messages = []\nfor msg in nlp.pipe(full['text'], n_threads = 100, batch_size = 100):\n ents = msg.ents\n msg = [token.lemma_ for token in msg if token.is_alpha and not token.is_stop]\n tokenized_messages.append(msg)\n\n# Build the corpus using gensim \ndictionary = gensim.corpora.Dictionary(tokenized_messages)\nmsg_corpus = [dictionary.doc2bow(x) for x in tokenized_messages]\nmsg_dictionary = gensim.corpora.Dictionary([])\n \n# gensim.corpora.MmCorpus.serialize(tweets_corpus_filepath, tweets_corpus)", "Creating an Emotion Sentiment Classifier\nLabeled dataset provided by @crowdflower hosted on data.world. Dataset contains 40,000 tweets which are labeled as one of 13 emotions. Here I looked at the top 5 emotions, since the bottom few had very tweets by comparison, so it would be hard to get a properly split dataset on for train/testing. Probably the one i'd want to include that wasn't included yet is anger, but neutral, worry, happinness, sadness, love are pretty good starting point for emotion classification regarding news tweets.\nhttps://data.world/crowdflower/sentiment-analysis-in-text", "import nltk\nfrom nltk.classify import NaiveBayesClassifier\nfrom nltk.classify import accuracy\nfrom nltk import WordNetLemmatizer\nlemma = nltk.WordNetLemmatizer()\ndf = pd.read_csv('https://query.data.world/s/8c7bwy8c55zx1t0c4yyrnjyax')\n\nemotions = list(df.groupby(\"sentiment\").agg(\"count\").sort_values(by = \"content\", ascending = False).head(6).index)\nprint(emotions)\nemotion_subset = df[df['sentiment'].isin(emotions)]\n\ndef format_sentence(sent):\n ex = [i.lower() for i in sent.split()]\n lemmas = [lemma.lemmatize(i) for i in ex]\n \n return {word: True for word in nltk.word_tokenize(\" \".join(lemmas))}\n\n\ndef create_train_vector(row):\n \"\"\"\n Formats a row when used in df.apply to create a train vector to be used by a \n Naive Bayes Classifier from the nltk library.\n \"\"\"\n sentiment = row[1]\n text = row[3]\n return [format_sentence(text), sentiment]\n\ntrain = emotion_subset.apply(create_train_vector, axis = 1)\n# Split off 10% of our train vector to be for test.\n\ntest = train[:int(0.1*len(train))]\ntrain = train[int(0.9)*len(train):]\n\nemotion_classifier = NaiveBayesClassifier.train(train)\n\nprint(accuracy(emotion_classifier, test))", "64% test accuracy on the test is nothing to phone home about. It's also likely to be a lot less accurate on our data from the 4chan messages, since those will be using much different language than the messages in our training set.", "emotion_classifier.show_most_informative_features()\n\nfor comment in full['text'].head(10):\n print(emotion_classifier.classify(format_sentence(comment)), \": \", comment)", "Looking at this sample of 10 posts, I'm not convinced in the accuracy of this classifier on the far-right data, but out of curiosity, what did it classifer the", "full['emotion'] = full['text'].apply(lambda x: emotion_classifier.classify(format_sentence(x)))\n\ngrouped_emotion_messages = full.groupby('emotion').count()[[2]]\ngrouped_emotion_messages.columns = [\"count\"]\ngrouped_emotion_messages\n\ngrouped_emotion_messages.plot.bar()", "Considering the dataset is extremely out of sample in regards to training data, there's no way this emotion classifier is accurate.\nThese results do seem semi logical though, based on some knowledge of the group. Online trolls are well known for their anger and rudeness, which could seemingly be classified as surprise and worry on a more standard data set." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
yashdeeph709/Algorithms
PythonBootCamp/Complete-Python-Bootcamp-master/List Comprehensions.ipynb
apache-2.0
[ "Comprehensions\nIn addition to sequence operations and list methods, Python includes a more advanced operation called a list comprehension.\nList comprehensions allow us to build out lists using a different notation. You can think of it as essentially a one line for loop built inside of brackets. For a simple example:\nExample 1", "# Grab every letter in string\nlst = [x for x in 'word']\n\n# Check\nlst", "This is the basic idea of a list comprehension. If you're familiar with mathematical notation this format should feel familiar for example: x^2 : x in { 0,1,2...10} \nLets see a few more example of list comprehensions in Python:\nExample 2", "# Square numbers in range and turn into list\nlst = [x**2 for x in range(0,11)]\n\nlst", "Example 3\nLets see how to add in if statements:", "# Check for even numbers in a range\nlst = [x for x in range(11) if x % 2 == 0]\n\nlst", "Example 4\nCan also do more complicated arithmetic:", "# Convert Celsius to Fahrenheit\ncelsius = [0,10,20.1,34.5]\n\nfahrenheit = [ ((float(9)/5)*temp + 32) for temp in Celsius ]\n\nfahrenheit", "Example 5\nWe can also perform nested list comprehensions, for example:", "lst = [ x**2 for x in [x**2 for x in range(11)]]\nlst", "Later on in the course we will learn about generator comprehensions. After this lecture you should feel comfortable reading and writing basic list comprehensions." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
junhwanjang/DataSchool
Lecture/14. 선형 회귀 분석/8) patsy 패키지 소개.ipynb
mit
[ "patsy 패키지 소개\n\n회귀 분석 전처리 패키지\nencoding/transform/design matrix 기능\nR-style formula 문자열 지원\n\ndesign matrix\n\ndmatrix(fomula[, data])\nR-style formula 문자열을 받아서 X matrix 생성\n자동으로 intercept (bias) column 생성\nlocal namespace에서 변수를 찾음\ndata parameter에 pandas DataFrame을 주면 column lable에서 변수를 찾음", "from patsy import dmatrix, dmatrices\n\nnp.random.seed(0)\nx1 = np.random.rand(5) + 10\nx2 = np.random.rand(5) * 10\nx1, x2\n\ndmatrix(\"x1\")", "R-style formula\n| 기호 | 설명 |\n|-|-|\n|+| 설명 변수 추가 |\n|-| 설명 변수 제거 |\n|1, 0| intercept. (제거시 사용) |\n|:| interaction (곱) |\n|*| a*b = a + b + a:b |\n|/| a/b = a + a:b |\n|~| 종속 - 독립 관계 |", "dmatrix(\"x1 - 1\")\n\ndmatrix(\"x1 + 0\")\n\ndmatrix(\"x1 + x2\")\n\ndmatrix(\"x1 + x2 - 1\")\n\ndf = pd.DataFrame(np.array([x1, x2]).T, columns=[\"x1\", \"x2\"])\ndf\n\ndmatrix(\"x1 + x2 - 1\", data=df)", "변환(Transform)\n\nnumpy 함수 이름 사용 가능\n사용자 정의 함수 사용 가능\npatsy 전용 함수 이름 사용 가능\ncenter(x)\nstandardize(x)\nscale(x)", "dmatrix(\"x1 + np.log(np.abs(x2))\", data=df)\n\ndef doubleit(x):\n return 2 * x\n\ndmatrix(\"doubleit(x1)\", data=df)\n\ndmatrix(\"center(x1) + standardize(x2)\", data=df)", "변수 보호 I()\n\n다른 formula 기호로부터 보호", "dmatrix(\"x1 + x2\", data=df)\n\ndmatrix(\"I(x1 + x2)\", data=df)", "다항 선형 회귀", "dmatrix(\"x1 + I(x1**2) + I(x1**3) + I(x1**4)\", data=df)", "카테고리 변수", "df[\"a1\"] = pd.Series([\"a1\", \"a1\", \"a2\", \"a2\", \"a3\", \"a5\"])\ndf[\"a2\"] = pd.Series([1, 4, 5, 6, 8, 9])\ndf\n\ndmatrix(\"a1\", data=df)\n\ndmatrix(\"a2\", data=df)\n\ndmatrix(\"C(a2)\", data=df)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tBuLi/symfit
docs/examples/ex_tikhonov.ipynb
mit
[ "Example: Matrix Equations using Tikhonov Regularization\nThis is an example of the use of matrix expressions in symfit models. This is illustrated by performing an inverse Laplace transform using Tikhonov regularization, but this could be adapted to other problems involving matrix quantities.", "from symfit import (\n\tvariables, parameters, Model, Fit, exp, laplace_transform, symbols, \n\tMatrixSymbol, sqrt, Inverse, CallableModel\n) \nimport numpy as np\nimport matplotlib.pyplot as plt\n", "Say $f(t) = t * exp(- t)$, and $F(s)$ is the Laplace transform of $f(t)$. Let us first evaluate this transform using sympy.", "t, f, s, F = variables('t, f, s, F')\nmodel = Model({f: t * exp(- t)})\nlaplace_model = Model(\n\t{F: laplace_transform(model[f], t, s, noconds=True)}\n)\nprint(laplace_model)\n", "Suppose we are confronted with a dataset $F(s)$, but we need to know $f(t)$. This means an inverse Laplace transform has to be performed. However, numerically this operation is ill-defined. In order to solve this, Tikhonov regularization can be performed.\nTo demonstrate this, we first generate mock data corresponding to $F(s)$ and will then try to find (our secretly known) $f(t)$.", "epsilon = 0.01 # 1 percent noise\ns_data = np.linspace(0, 10, 101)\nF_data = laplace_model(s=s_data).F\nF_sigma = epsilon * F_data\nnp.random.seed(2)\nF_data = np.random.normal(F_data, F_sigma)\n\nplt.errorbar(s_data, F_data, yerr=F_sigma, fmt='none', label=r'$\\mathcal{L}[f] = F(s)$')\nplt.xlabel(r'$s_i$')\nplt.ylabel(r'$F(s_i)$')\nplt.xlim(0, None)\nplt.legend()\n", "We will now invert this data, using the procedure outlined in \\cite{}.", "N_s = symbols('N_s', integer=True) # Number of s_i points\n\nM = MatrixSymbol('M', N_s, N_s)\nW = MatrixSymbol('W', N_s, N_s)\nFs = MatrixSymbol('Fs', N_s, 1)\nc = MatrixSymbol('c', N_s, 1)\nd = MatrixSymbol('d', 1, 1)\nI = MatrixSymbol('I', N_s, N_s)\na, = parameters('a')\n\nmodel_dict = {\n W: Inverse(I + M / a**2),\n c: - W * Fs,\n d: sqrt(c.T * c),\n}\ntikhonov_model = CallableModel(model_dict)\nprint(tikhonov_model)", "A CallableModel is needed because derivatives of matrix expressions sometimes cause problems.\nBuild required matrices, ignore s=0 because it causes a singularity.", "I_mat = np.eye(len(s_data[1:]))\ns_i, s_j = np.meshgrid(s_data[1:], s_data[1:])\nM_mat = 1 / (s_i + s_j)\ndelta = np.atleast_2d(np.linalg.norm(F_sigma))\nprint('d', delta)\n", "Perform the fit", "model_data = {\n\tI.name: I_mat,\n\tM.name: M_mat,\n\tFs.name: F_data[1:],\n} \nall_data = dict(**model_data, **{d.name: delta})\n\n\nfit = Fit(tikhonov_model, **all_data)\nfit_result = fit.execute()\nprint(fit_result)\n", "Check the quality of the reconstruction", "ans = tikhonov_model(**model_data, **fit_result.params)\nF_re = - M_mat.dot(ans.c) / fit_result.value(a)**2\nprint(ans.c.shape, F_re.shape)\n\nplt.errorbar(s_data, F_data, yerr=F_sigma, label=r'$F(s)$', fmt='none')\nplt.plot(s_data[1:], F_re, label=r'$F_{re}(s)$')\nplt.xlabel(r'$x$')\nplt.xlabel(r'$F(s)$')\nplt.xlim(0, None)\nplt.legend()", "Reconstruct $f(t)$ and compare with the known original.", "t_data = np.linspace(0, 10, 101)\nf_data = model(t=t_data).f\nf_re_func = lambda x: - np.exp(- x * s_data[1:]).dot(ans.c) / fit_result.value(a)**2\nf_re = [f_re_func(t_i) for t_i in t_data]\n\nplt.axhline(0, color='black')\nplt.axvline(0, color='black')\nplt.plot(t_data, f_data, label=r'$f(t)$')\nplt.plot(t_data, f_re, label=r'$f_{re}(t)$')\nplt.xlabel(r'$t$')\nplt.xlabel(r'$f(t)$')\nplt.legend()", "Not bad, for an ill-defined problem." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
variani/study
02-intro-python/projects/pandas/babynames.ipynb
cc0-1.0
[ "About\nR data-munging idioms and their equvalents in pandas/python:\n\nSubset with multiple-choise %in%:\nR: `subset(df, name %in% c(\"Andrew\", \"Andre\"))\npython: df.query('name in [\"Andrew\", \"Andre\"]') via link\n\n\n\nSet up", "%qtconsole\n\n%matplotlib inline", "Imports", "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom ggplot import *", "Read data", "df = pd.read_csv(\"data/babynames.csv\")", "Basic statistics\nLooking at the first samples:", "df.head()", "How many unique names are collected?", "(df['name'].nunique(), df['name'].size)\n\ndf['name'].nunique() / float(df['name'].size)", "We might thhink that approx. 20 entries per name are collected. Should it be equal to the length of the years period?", "df['year'].max() - df['year'].min()", "Not really. That means there are many zero entries in n column for many names.\nFilter\nBy a single name", "df['name'].isin(['Andrew']).value_counts()\n\ndf.query('name == \"Andrew\"').shape\n\nggplot(df.query('name == \"Joe\"'), aes(x = 'year', y = 'n')) + geom_point() + ggtitle(\"name: Joe\")", "Don't forget the names are given for two genders.", "ggplot(df.query('name == \"Joe\"'), aes(x = 'year', y = 'n', color = 'sex')) +\\\n geom_point(size = 10) + geom_smooth(span = 0.1) + ggtitle(\"name: Joe\")", "Joe as a name for girls seems to be OK. What about Mary?", "ggplot(df.query('name == \"Mary\"'), aes(x = 'year', y = 'n', color = 'sex')) +\\\n geom_point(size = 10) + geom_smooth(span = 0.1) + ggtitle(\"name: Mary\")", "Name Mary for boys? Let's do a bit more subsetting by n &lt; 500 filter.", "ggplot(df.query('name == \"Mary\" & n < 500'), aes(x = 'year', y = 'n', color = 'sex')) +\\\n geom_point(size = 10) + geom_smooth(span = 0.1) + ggtitle(\"name: Mary, n < 500\")", "Now let's get a record, where the number of female names Mary was the maximum.", "ind = np.argmax(df.query('name == \"Mary\" & sex == \"M\"')['n'])\nind\n\ndf.query('index == @ind')", "By multiple names", "df.query('name in [\"Andrew\", \"Andrey\"]').shape\n\nanames = ['Andrew', 'Andrey', 'Andres', 'Andre', 'And']\n\ndf.query('name in @anames').shape\n\nggplot(df.query('name in @anames'), aes(x = 'year', y = 'n', color = 'name')) +\\\n geom_point(size = 10) + geom_smooth(span = 0.1) + facet_wrap(\"sex\")\n\nggplot(df.query('name in @anames & sex == \"M\"'), aes(x = 'year', y = 'n', color = 'name')) +\\\n geom_point(size = 10) + geom_smooth(span = 0.1, se = False) + scale_y_log(10) +\\\n ggtitle('Andre* male names ')\n\n(df.query('name in @anames')\n .groupby(['name', 'sex'])\n [['n']].sum())\n\n(df.query('name in @anames & sex == \"M\"')\n .groupby(['name'])\n [['n']].sum())\n\nsf = (df.query('name in @anames & sex == \"M\"')\n .groupby(['name'])\n .agg({'n': {'total': sum, 'max': lambda x: x.max()},\n 'prop': {'max': max}}))\nsf\n\nsf = (df.query('name in @anames & sex == \"M\"')\n .groupby(['name'])\n .apply(lambda x: sum(x.n)))\nsf\n\nsf = (df.query('name in @anames & sex == \"M\"')\n .groupby(['name'])\n .apply(lambda x: pd.DataFrame({\n 'min': min(x.n), \n 'total': sum(x.n)}, index = x.index)))\nsf" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
statkraft/shyft-doc
notebooks/nea-example/simulation-configured.ipynb
lgpl-3.0
[ "Configured Shyft simulations\nIntroduction\nShyft provides a toolbox for running hydrologic simulations. As it was designed to work in an operational environment, we've provided several different workflows for running a model simulation. The main concept to be aware of is that while we demonstrate and build on the use of a 'configuration', nearly all simulation functionality is also accessible with pure python through access to the API. This is the encouraged approach to simulation. The use of configurations is intended to be a mechanism of running repeated operational simulations when one is interested in archiving and storing (potentially to a database) the specifics of the simulation.\nBelow we start with a high level description using a configuration object, and in Part II of the simulation notebooks we describe the approach using the lower level APIs. It is recommended, if you intend to use Shyft for any kind of hydrologic exploration, to become familiar with the API functionality.\nThis notebook briefly runs through the simulation process for a pre-configured catchment. The following steps are described:\n\nLoading required python modules and setting path to Shyft installation\nConfiguration of a Shyft simulation\nRunning a Shyft simulation\nPost-processing: Fetching simulation results from the simulator-object.\n\n1. Loading required python modules and setting path to SHyFT installation\nShyft requires a number of different modules to be loaded as part of the package. Below, we describe the required steps for loading the modules, and note that some steps are only required for the use of the jupyter notebook.", "# Pure python modules and jupyter notebook functionality\n# first you should import the third-party python modules which you'll use later on\n# the first line enables that figures are shown inline, directly in the notebook\n%pylab inline\nimport os\nimport datetime as dt\nfrom os import path\nimport sys\nfrom matplotlib import pyplot as plt", "The Shyft Environment\nThis next step is highly specific on how and where you have installed Shyft. If you have followed the guidelines at github, and cloned the three shyft repositories: i) shyft, ii) shyft-data, and iii) shyft-doc, then you may need to tell jupyter notebooks where to find shyft. Uncomment the relevant lines below.\nIf you have a 'system' shyft, or used conda install -s sigbjorn shyft to install shyft, then you probably will want to make sure you have set the SHYFTDATA directory correctly, as otherwise, Shyft will assume the above structure and fail. This has to be done before import shyft. In that case, uncomment the relevant lines below.\nnote: it is most likely that you'll need to do one or the other.", "# try to auto-configure the path, -will work in all cases where doc and data\n# are checked out at same level\nshyft_data_path = path.abspath(\"../../../shyft-data\")\nif path.exists(shyft_data_path) and 'SHYFT_DATA' not in os.environ:\n os.environ['SHYFT_DATA']=shyft_data_path\n \n# shyft should be available either by it's install in python\n# or by PYTHONPATH set by user prior to starting notebook.\n# This is equivalent to the two lines below\n# shyft_path=path.abspath('../../../shyft')\n# sys.path.insert(0,shyft_path)\n\n# once the shyft_path is set correctly, you should be able to import shyft modules\n\nimport shyft\n\n# if you have problems here, it may be related to having your LD_LIBRARY_PATH\n# pointing to the appropriate libboost_python libraries (.so files)\nfrom shyft import api\nfrom shyft.repository.default_state_repository import DefaultStateRepository\nfrom shyft.orchestration.configuration.yaml_configs import YAMLSimConfig\nfrom shyft.orchestration.simulators.config_simulator import ConfigSimulator", "2. Configuration of a SHyFT simulation\nThe following shows how to set up a Shyft simulation using the yaml_configs.YAMLSimConfig class. Note that this is a high level approach, providing a working example for a simple simulation. More advanced users will want to eventually make use of direct API calls, as outlined in Part II.\nAt this point, you may want to have a look to the configuration file used in this example.\n```\nneanidelva:\n region_config_file: neanidelva_region.yaml\n model_config_file: neanidelva_model_calibrated.yaml\n datasets_config_file: neanidelva_datasets.yaml\n interpolation_config_file: neanidelva_interpolation.yaml\n start_datetime: 2013-09-01T00:00:00\n run_time_step: 86400 # 1 day time step\n number_of_steps: 365 # 1 year\n region_model_id: 'neanidelva-ptgsk'\n #interpolation_id: 2 # this is optional (default 0)\n initial_state:\n repository:\n class: !!python/name:shyft.repository.generated_state_repository.GeneratedStateRepository\n params:\n model: !!python/name:shyft.api.pt_gs_k.PTGSKModel\n tags: []\n...\n```\nThe file is structured as follows:\nneanidelva is the name of the simulation. Your configuration file may contain multiple \"stanzas\" or blocks of simulation configurations. You'll see below that we use the name to instantiate a configuration object.\nregion_config_file points to another yaml file that contains basic information about the region of the simulation. You can explore that file here\nmodel_config_file contains the model parameters. Note that when you are calibrating the model, this is the file that you would put your optimized parameters into once you have completed a calibrations.\ndatasets_config_file contains details regarding the input datasets and the repositories they are contained in. You can see this file here\ninterpolation_config_file provides details regarding how the observational data in your catchment or region will be interpolated to the domain of the simulation. If you are using a repository with distributed data, the interpolation is still used. See this file for more details.\nThe following:\nstart_datetime: 2013-09-01T00:00:00\n run_time_step: 86400 # 1 hour time step\n number_of_steps: 365 # 1 year\n region_model_id: 'neanidelva-ptgsk'\nare considered self-explantory. Note that region_model_id is simply a string name, but it should be unique. We will explain the details regarding initial_state later on in this tutorial.", "# set up configuration using *.yaml configuration files\n# here is the *.yaml file that configures the simulation:\nconfig_file_path = os.path.abspath(\"./nea-config/neanidelva_simulation.yaml\")\n \n# and here we pass it to the configurator, together with the name of the region \n# stated in the simulation.yaml file (here: \"neanidelva\") which we would like to run\ncfg = YAMLSimConfig(config_file_path, \"neanidelva\")\n\nprint(cfg.datasets_config)\n\n\n# Once we have all the configuration in place (read in from the .yaml files)\n# we can start to do the simulation. Here we use the ConfigSimulator class \n# to initialize a simulator-object. Shyft has several ways to achieve this\n# but the .yaml configs are the most straight forward\n\nsimulator = ConfigSimulator(cfg) \n# Now the simulator is ready to run!", "The simulator and the region_model\nIt is important to note that the simulator provides a wrapping of underlying API functionality. It is designed to provide a quick and simple interface for conducting runs based on a configuration saved in a .yaml file, or otherwise. Core functionality is contained in the region_model which is just an instance of a model stack, or what is referred to as a model in the api intro notebook. This is an import concept in Shyft. To understand the framework, one should be familiar with this class. \nBefore we begin the simulation, one should explore the simulator object with tab completion. As an example, you can see here how to get the number of cells in the region that was set up. This is used later for extracting the data.\nMost importantly, understand the simulator has an attribute called region_model. Most of the underlying functionality of the simulator methods are actually making calls to the region_model which is an instantiation of one of Shyft's \"model stack\" classes. To conduct more advanced simulations one would use this object directly.", "#simulator. #try tab completion\nn_cells = simulator.region_model.size()\nprint(n_cells)", "3. Running a SHyFT simulation\nOkay, so thus far we have set up our cfg object which contains most the information required to run the simulation. We can simply run the simulation using the run method.", "simulator.run()", "But this is may be too simple. Let's explore the simulator.run method a bit further:", "help(simulator.run)", "Note that you can pass two parameters to run. To run a simulation, we need a time_axis (length of the simulation), and an initial state. Initially we got both of these from the cfg object (which takes it from the .yaml files). However, in some cases you will likely want to change these and conduct simulations for different periods, or starting from different states. We explore this further in Part II: advanced simulation\n4. Post processing and data extraction\nYou have now completed a simple simulation. You probably are interested to explore some of the output from the simulation and to visulize the quality of the results. Let's explore first, how to access the underlying data produced from the simulation.\nVisualizing the discharge for each [sub-]catchment\nRecall that we earlier referred to the importance of understanding the region_model. You'll see now that this is where information from the simulation is actually contained, and that the simulator object is more or less a convenience wrapper.", "# Here we are going to extact data from the simulator object.\n# let's work directly with the `region_model`\nregion_model = simulator.region_model\n\nprint(region_model.catchment_ids)", "We see here that each sub-catchment in our simulation is associated with a unique ID. These are user defined IDs. In the case of the nea-nidelva simulation, they are taken from the GIS database used to create the example configuration files.\nTo get data out of the region_model you need to specify which catchments you are interested in evaluating. In the following example we are going to extract the data for each catchment and make a simple plot.\nNote that Shyft uses many specialized C++ types. Many of these have methods to convert to the more familiar numpy objects. An example may be the discharge timeseries for a catchment.", "q_1228_ts = simulator.region_model.statistics.discharge([1228])\nq_1228_np = simulator.region_model.statistics.discharge([1228]).values\nprint(type(q_1228_ts))\nprint(type(q_1228_np))", "Look at the discharge timeseries\nAs mentioned above, Shyft has it's own Timeseries class. This class is quite powerful, and the api-timeseries notebook shows more of the functionality. For now, let's look at some key aspects, and how to create a quick plot of the individual catchment discharge.", "# We can make a quick plot of the data of each sub-catchment\nfig, ax = plt.subplots(figsize=(20,15))\n\n# plot each catchment discharge in the catchment_ids\nfor i,cid in enumerate(region_model.catchment_ids):\n # a ts.time_axis can be enumerated to it's UtcPeriod, \n # that will have a .start and .end of type utctimestamp\n # to use matplotlib support for datetime-axis, we convert it to datetime (as above)\n ts_timestamps = [dt.datetime.utcfromtimestamp(p.start) for p in region_model.time_axis]\n data = region_model.statistics.discharge([int(cid)]).values\n\n ax.plot(ts_timestamps,data, label = \"{}\".format(region_model.catchment_ids[i]))\n\nfig.autofmt_xdate()\nax.legend(title=\"Catch. ID\")\nax.set_ylabel(\"discharge [m3 s-1]\")", "Visualizing the distributed catchment data\nAn important, but difficult concept, to remember when working with Shyft, is that internally there is no 'grid' to speak of. The simulation is vectorized, and each 'cell' represents a spatial area with it's own area and geolocation information. Therefore, we cannot just load a datacube of data, as some may be familiar with.\nVisualization of this data is a bit more complex, because each individual cell is in practice an individual polygon. Depending on how the data has been configured for Shyft (see region_model), the cells may, in fact, be simple squares or more complex shapes. For the visualization below, we simply treat them as uniform size, and plot them with the scatter function in matplotlib.\nExtract data for individual simulation cells\nWe'll start by looking at values of individual cells, rather than at the catchment level. Since Shyft does not have an underlying 'raster' model, you need to fetch all cells directly from the underlying region_model.", "cells = region_model.get_cells()\n\n# Once we have the cells, we can get their coordinate information\n# and fetch the x- and y-location of the cells\nx = np.array([cell.geo.mid_point().x for cell in cells])\ny = np.array([cell.geo.mid_point().y for cell in cells])", "We also will need to get a 'z' value to make things interesting. Since this is the first time we've visualized our catchment, let's make a map of the sub-catchments. To do this, the first thing we need to do is get the membership of each cell. That is, to which catchment does it below. We do this by extracting the catchment_id of each cell -- and this is what we'll map. The result will be a map of the sub-catchments.\nRecall from above we extracted the catchment_id_map from the region_model:\n# mapping of internal catch ID to catchment\ncatchment_id_map = simulator.region_model.catchment_id_map\n\nWe could just use the catchment_id as the 'z' value, but since this could be a string, we'll take a different approach. We'll assign a unique integer to each catchment_id and plot those (it is also easier for the color bar scaling).", "# let's create the mapping of catchment_id to an integer:\ncatchment_ids = region_model.catchment_ids\ncid_z_map = dict([ (catchment_ids[i],i) for i in range(len(catchment_ids))])\n\n# then create an array the same length as our 'x' and 'y', which holds the\n# integer value that we'll use for the 'z' value\ncatch_ids = np.array([cid_z_map[cell.geo.catchment_id()] for cell in cells])\n\n# and make a quick catchment map...\n# using a scatter plot of the cells\nfig, ax = plt.subplots(figsize=(15,5))\ncm = plt.cm.get_cmap('rainbow')\nplot = ax.scatter(x, y, c=catch_ids, marker='.', s=40, lw=0, cmap=cm)\nplt.colorbar(plot).set_label('zero-based mapping(proper map tbd)')", "Visualing the Snow Cover Area of all cells for a certain point in time\nHere we'll do some more work to look at a snapshot value of data in each of the cells. This example is collecting the response variable (here the Snow Cover Area (SCA)) for each of the cells for a certain point of time.\nThe \"response collector\" is another concept within Shyft that is important keep in mind. We don't collect and store responses for every variable, in order to keep the simulation memory use lean. Therefore, depending on your application, it may be required to explicitly enable this. The relevant code is found in region_model.h in the C++ core source code.\nFor the ConfigSimulator class, which we used to instantiate the simulator, a standard collector is used that will provide access to the most relevant variables.\nFor a model run during calibration, we are use a collector that just does the required minimum for the calibration. And, it is still configurable: we can turn on/off the snow-collection, so if we don't calibrate for snow, they are not collected. More on calibration is shown in the tutorial: Calibration with Shyft\nThe state collector used for the 'highspeed' calibration models (C++), is a null-collector, so no memory allocated, and no cpu-time used.", "#first, set a date: year, month, day, (hour of day if hourly time step)\noslo = api.Calendar('Europe/Oslo') # specifying input calendar in Oslo tz-id\ntime_x = oslo.time(2014,5,15) # the oslo calendar(incl dst) converts calendar coordinates Y,M,D.. to its utc-time\n\n# we need to get the index of the time_axis for the time\ntry:\n idx = simulator.region_model.time_axis.index_of(time_x) # index of time x on time-axis\nexcept:\n print(\"Date out of range, setting index to 0\")\n idx = 0\n\n# fetching SCA (the response variable is named \"snow_sca\")\n# You can use tab-completion to explore the `rc`, short for \"response collector\"\n# object of the cell, to see further response variables available.\n# specifying empty list [] indicates all catchments, otherwise pass catchment_id\nsca = simulator.region_model.gamma_snow_response.sca([],idx)", "Let's take a closer look at this... \nsimulator.region_model.time_axis.index_of(time_x)\n\nSimply provided an index value that we can use to index the cells for the time we're interested in looking at.\nNext we use:\nsimulator.region_model.gamma_snow_response\n\nWhat is this? This is a collector from the simulation. In this case, for the gamma_snow routine. It contains a convenient method to access the response variables from the simulation on a per catchment level. Each response variable (outflow, sca, swe) can be called with two arguments. The first a list of the catchments, and the second an index to the time, as shown above. Note, this will return the values for each cell in the sub-catchment. Maybe one is only interested in the total outflow or total swe for the region. In this case you can use: .outflow_value which will return a single value.\nThere is also a response collector for the state variables: .gamma_snow_state. \nExplore both of these further with tab completion or help. As well as the full region_model to see what other algorithm collectors are available as this example is configured.", "# for attr in dir(simulator.region_model):\n# if attr[0] is not '_': #ignore privates\n# print(attr)\n# # and don't forget:\n# help(simulator.region_model.gamma_snow_state)", "We are now ready to explore some of the variables from the simulation. We'll continue on with SCA.", "# We can make a simple scatter plot again for quick visualization\nfig, ax = plt.subplots(figsize=(15,5))\ncm = plt.cm.get_cmap('winter')\nplot = ax.scatter(x, y, c=sca, \n vmin=0, vmax=1, \n marker='s', s=40, lw=0, \n cmap=cm)\nplt.colorbar(plot)\nplt.title('Snow Covered area of {0} on {1}'.format(cfg.region_model_id, oslo.to_string(time_x)))", "A note about the geometry of the region\nAgain, keep in mind that while we have created a variable that contains the values for sca in each cell, this is only an iterable object. The only reason we know where each value is located is because we have corresponding x and y values for each cell. It is not an array.\nWe can calculate some statistics directly out of sca:", "# look at the catchment-wide average:\nnea_avg_sca = np.average(sca)\nprint(\"Average SCA for Nea Nidelva: {0}\".format(nea_avg_sca))\n\n# And let's compute histogram of the snow covered area as well\nfig, ax = plt.subplots()\nax.hist(sca, bins=20, range=(0,1), color='y', alpha=0.5)\nax.set_xlabel(\"SCA of grid cell\")\nax.set_ylabel(\"frequency\")" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Neuroglycerin/neukrill-net-work
notebooks/tutorials/Training a Maxout CIFAR10-based model.ipynb
mit
[ "This notebook describes setting up a model inspired by the Maxout Network (Goodfellow et al.) which they ran out the CIFAR-10 dataset.\nThe yaml file was modified as little as possible, substituting variables for settings and dataset paths, getting rid of their data pre-processing, and changing the number of channels.", "!obj:pylearn2.train.Train {\n dataset: &train !obj:neukrill_net.dense_dataset.DensePNGDataset {\n settings_path: %(settings_path)s,\n run_settings: %(run_settings_path)s,\n training_set_mode: \"train\"\n },\n model: !obj:pylearn2.models.mlp.MLP {\n batch_size: &batch_size 128,\n layers: [\n !obj:pylearn2.models.maxout.MaxoutConvC01B {\n layer_name: 'h0',\n pad: 4,\n tied_b: 1,\n W_lr_scale: .05,\n b_lr_scale: .05,\n num_channels: 96,\n num_pieces: 2,\n kernel_shape: [8, 8],\n pool_shape: [4, 4],\n pool_stride: [2, 2],\n irange: .005,\n max_kernel_norm: .9,\n partial_sum: 33,\n },\n !obj:pylearn2.models.maxout.MaxoutConvC01B {\n layer_name: 'h1',\n pad: 3,\n tied_b: 1,\n W_lr_scale: .05,\n b_lr_scale: .05,\n num_channels: 192,\n num_pieces: 2,\n kernel_shape: [8, 8],\n pool_shape: [4, 4],\n pool_stride: [2, 2],\n irange: .005,\n max_kernel_norm: 1.9365,\n partial_sum: 15,\n },\n !obj:pylearn2.models.maxout.MaxoutConvC01B {\n pad: 3,\n layer_name: 'h2',\n tied_b: 1,\n W_lr_scale: .05,\n b_lr_scale: .05,\n num_channels: 192,\n num_pieces: 2,\n kernel_shape: [5, 5],\n pool_shape: [2, 2],\n pool_stride: [2, 2],\n irange: .005,\n max_kernel_norm: 1.9365,\n },\n !obj:pylearn2.models.maxout.Maxout {\n layer_name: 'h3',\n irange: .005,\n num_units: 500,\n num_pieces: 5,\n max_col_norm: 1.9\n },\n !obj:pylearn2.models.mlp.Softmax {\n max_col_norm: 1.9365,\n layer_name: 'y',\n n_classes: %(n_classes)i,\n irange: .005\n }\n ],\n input_space: !obj:pylearn2.space.Conv2DSpace {\n shape: &window_shape [32, 32],\n num_channels: 3,\n axes: ['c', 0, 1, 'b'],\n },\n },\n algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {\n learning_rate: .17,\n learning_rule: !obj:pylearn2.training_algorithms.learning_rule.Momentum {\n init_momentum: .5\n },\n train_iteration_mode: 'even_shuffled_sequential',\n monitor_iteration_mode: 'even_sequential',\n monitoring_dataset:\n {\n 'test' : !obj:neukrill_net.dense_dataset.DensePNGDataset {\n settings_path: %(settings_path)s,\n run_settings: %(run_settings_path)s,\n training_set_mode: \"test\"\n },\n },\n cost: !obj:pylearn2.costs.mlp.dropout.Dropout {\n input_include_probs: { 'h0' : .8 },\n input_scales: { 'h0' : 1. }\n },\n termination_criterion: !obj:pylearn2.termination_criteria.EpochCounter {\n max_epochs: 474 \n },\n },\n extensions: [\n !obj:pylearn2.training_algorithms.learning_rule.MomentumAdjustor {\n start: 1,\n saturate: 250,\n final_momentum: .65\n },\n !obj:pylearn2.training_algorithms.sgd.LinearDecayOverEpoch {\n start: 1,\n saturate: 500,\n decay_factor: .01\n },\n !obj:pylearn2.train_extensions.best_params.MonitorBasedSaveBest {\n channel_name: test_y_misclass,\n save_path: '%(save_path)s'\n },\n ],\n}", "Before we can start training the model, we need to create a dictionary with all preprocessing settings and poimt to the yaml file corresponding to the model: cifar10.yaml.", "run_settings = {\n \"model type\":\"pylearn2\",\n \"yaml file\": \"cifar10.yaml\",\n \"preprocessing\":{\"resize\":[48,48]},\n \"final_shape\":[48,48],\n \"augmentation_factor\":1,\n \"train_split\": 0.8\n}", "To set up the path for settings, utils and os must be imported.", "import neukrill_net.utils\nimport os\nreload(neukrill_net.utils)\n\ncd ..\n\nrun_settings[\"run_settings_path\"] = os.path.abspath(\"run_settings/cifar10_based.json\")\n\nrun_settings", "Now the settings can be saved.", "neukrill_net.utils.save_run_settings(run_settings)\n\n!cat run_settings/cifar10_based.json", "Now we can start training the model with:", "python train.py run_settings/cifar10_based.json", "After trying to run the model, it broke with an error that partialSum does not divide numModules. Turns out that partialSum is a parameter of a convolutional layer that affects the performance of the weight gradient computation and it has to divide the area of the output grid in this layer, which is given by numModules. Conveniently, the error gave the values of numModules (which are not specified in the yaml file) so we just changed partialSum in each layer to a factor of the corresponding numModules.", "!obj:pylearn2.train.Train {\n dataset: &train !obj:neukrill_net.dense_dataset.DensePNGDataset {\n settings_path: %(settings_path)s,\n run_settings: %(run_settings_path)s,\n training_set_mode: \"train\"\n },\n model: !obj:pylearn2.models.mlp.MLP {\n batch_size: &batch_size 128,\n layers: [\n !obj:pylearn2.models.maxout.MaxoutConvC01B {\n layer_name: 'h0',\n pad: 4,\n tied_b: 1,\n W_lr_scale: .05,\n b_lr_scale: .05,\n num_channels: 96,\n num_pieces: 2,\n kernel_shape: [8, 8],\n pool_shape: [4, 4],\n pool_stride: [2, 2],\n irange: .005,\n max_kernel_norm: .9,\n partial_sum: 49,\n },\n !obj:pylearn2.models.maxout.MaxoutConvC01B {\n layer_name: 'h1',\n pad: 3,\n tied_b: 1,\n W_lr_scale: .05,\n b_lr_scale: .05,\n num_channels: 192,\n num_pieces: 2,\n kernel_shape: [8, 8],\n pool_shape: [4, 4],\n pool_stride: [2, 2],\n irange: .005,\n max_kernel_norm: 1.9365,\n partial_sum: 23,\n },\n !obj:pylearn2.models.maxout.MaxoutConvC01B {\n pad: 3,\n layer_name: 'h2',\n tied_b: 1,\n W_lr_scale: .05,\n b_lr_scale: .05,\n num_channels: 192,\n num_pieces: 2,\n kernel_shape: [5, 5],\n pool_shape: [2, 2],\n pool_stride: [2, 2],\n irange: .005,\n max_kernel_norm: 1.9365,\n },\n !obj:pylearn2.models.maxout.Maxout {\n layer_name: 'h3',\n irange: .005,\n num_units: 500,\n num_pieces: 5,\n max_col_norm: 1.9\n },\n !obj:pylearn2.models.mlp.Softmax {\n max_col_norm: 1.9365,\n layer_name: 'y',\n n_classes: %(n_classes)i,\n irange: .005\n }\n ],\n input_space: !obj:pylearn2.space.Conv2DSpace {\n shape: &window_shape [32, 32],\n num_channels: 3,\n axes: ['c', 0, 1, 'b'],\n },\n },\n algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {\n learning_rate: .17,\n learning_rule: !obj:pylearn2.training_algorithms.learning_rule.Momentum {\n init_momentum: .5\n },\n train_iteration_mode: 'even_shuffled_sequential',\n monitor_iteration_mode: 'even_sequential',\n monitoring_dataset:\n {\n 'test' : !obj:neukrill_net.dense_dataset.DensePNGDataset {\n settings_path: %(settings_path)s,\n run_settings: %(run_settings_path)s,\n training_set_mode: \"test\"\n },\n },\n cost: !obj:pylearn2.costs.mlp.dropout.Dropout {\n input_include_probs: { 'h0' : .8 },\n input_scales: { 'h0' : 1. }\n },\n termination_criterion: !obj:pylearn2.termination_criteria.EpochCounter {\n max_epochs: 474 \n },\n },\n extensions: [\n !obj:pylearn2.training_algorithms.learning_rule.MomentumAdjustor {\n start: 1,\n saturate: 250,\n final_momentum: .65\n },\n !obj:pylearn2.training_algorithms.sgd.LinearDecayOverEpoch {\n start: 1,\n saturate: 500,\n decay_factor: .01\n },\n !obj:pylearn2.train_extensions.best_params.MonitorBasedSaveBest {\n channel_name: test_y_misclass,\n save_path: '%(save_path)s'\n },\n ],\n}", "The results were not very good (nll = ~3) so we are not going to continue working on this model." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
Bio204-class/bio204-notebooks
2016-04-25-Parallels-Regression-and-ANOVA.ipynb
cc0-1.0
[ "%matplotlib inline\n\nimport numpy as np\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sbn\n\n# the formula API allow us to write R-like formulas for regression models\nimport statsmodels as sm\nimport statsmodels.stats.anova as anova\nimport statsmodels.formula.api as smf \n\nnp.random.seed(20160425)\nsbn.set_style(\"white\")", "Regression as sum-of-squares decomposition\nExample data sets\nWe setup two synthetic data sets -- one where $Y$ is independent of $X$, and a second where $Y$ is depedendent on $X$.", "n = 25\nx = np.linspace(-5, 5, n) + stats.norm.rvs(loc=0, scale=1, size=n)\n\na, b = 1, 0.75\n\n# I've chosen values to make yind and ydep have about the same variance\nyind = a + stats.norm.rvs(loc=0, scale=np.sqrt(8), size=n) \nydep = a + b*x + stats.norm.rvs(loc=0, scale=1, size=n)\n\n# create two different data frames for ease of use with statsmodels\ndata_ind = pd.DataFrame(dict(x = x, y = yind))\ndata_dep = pd.DataFrame(dict(x = x, y = ydep))", "And we plot the data sets", "fig, (ax1, ax2) = plt.subplots(1,2, figsize=(10,4), sharex=True, sharey=True)\nax1.scatter(x, yind, s=60, alpha=0.75, color='steelblue')\nax2.scatter(x, ydep, s=60, alpha=0.75, color='steelblue')\n\nax1.set_xlabel(\"X\",fontsize=15)\nax1.set_ylabel(\"Y\",fontsize=15)\nax1.set_title(\"Y independent of X\", fontsize=18)\n\nax2.set_xlabel(\"X\",fontsize=15)\nax2.set_title(\"Y dependent on X\", fontsize=18)\n\npass", "Fit the regressions with statsmodels", "fit_ind = smf.ols('y ~ x', data_ind).fit()\nfit_dep = smf.ols('y ~ x', data_dep).fit()", "Plot the regressions using info return by statsmodels", "fig, (ax1, ax2) = plt.subplots(1,2, figsize=(10,4), sharex=True, sharey=True)\n\nax1.scatter(x, yind, s=60, alpha=0.75, color='steelblue')\nax1.plot(x, fit_ind.predict(), color='firebrick', alpha=0.5)\n\nax2.scatter(x, ydep, s=60, alpha=0.75, color='steelblue')\nax2.plot(x, fit_dep.predict(), color='firebrick', alpha=0.5)\n\nax1.set_xlabel(\"X\",fontsize=15)\nax1.set_ylabel(\"Y\",fontsize=15)\nax1.set_title(\"Y independent of X\", fontsize=18)\n\nax2.set_xlabel(\"X\",fontsize=15)\nax2.set_title(\"Y dependent on X\", fontsize=18)\n\npass", "Functions to decompose sums of squares", "def sum_squares(x):\n x = np.asarray(x)\n return np.sum((x - np.mean(x))**2)\n\n\ndef bivariate_regression_table(fit):\n \"\"\" A function to create an ANOVA-like table for a bivariate regression\"\"\"\n df_model = fit.df_model\n df_resid = fit.df_resid\n df_total = df_model + df_resid\n\n SStotal = sum_squares(fit.model.endog)\n SSmodel = sum_squares(fit.predict())\n SSresid = sum_squares(fit.resid)\n\n MSmodel = SSmodel / df_model\n MSresid = SSresid / df_resid\n\n Fstat = MSmodel / MSresid\n\n Pval = stats.f.sf(Fstat, df_model, df_resid)\n\n Ftable = pd.DataFrame(index=[\"Model\",\"Residuals\",\"Total\"],\n columns=[\"df\", \"SS\", \"MS\", \"F\", \"Pval\"],\n data = dict(df = [df_model, df_resid, df_total],\n SS = [SSmodel, SSresid, SStotal],\n MS = [MSmodel, MSresid, \"\"],\n F = [Fstat, \"\", \"\"],\n Pval = [Pval, \"\", \"\"])) \n return Ftable", "ANOVA like tables for each regression", "Ftable_ind = bivariate_regression_table(fit_ind)\nFtable_ind\n\nFtable_dep = bivariate_regression_table(fit_dep)\nFtable_dep", "Two-group one-way ANOVA as a bivariate regression\nTo setup ANOVA for two-groups as a regression problem, we use \"dummy coding\" where we incorporate group information into a predictor variable.", "Y1 = stats.norm.rvs(loc=0, scale=1, size=10)\nY2 = stats.norm.rvs(loc=1, scale=1, size=10)\nY = np.concatenate([Y1,Y2])\ngroups = [-1]*10 + [1]*10 # setup dummy variable to represent grouping\ndata = pd.DataFrame(dict(Y = Y,\n group = groups))\n\ndata.head(3)\n\ndata.tail(3)\n\ndata.corr()\n\nsbn.stripplot(x=\"group\", y=\"Y\", hue=\"group\", data=data,s=10)\npass", "Fit regression model", "fit_data = smf.ols('Y ~ group', data).fit()", "Plot regression", "plt.scatter(data.group[data.group == -1], data.Y[data.group == -1], s=60, alpha=0.75, color='steelblue')\nplt.scatter(data.group[data.group == 1], data.Y[data.group == 1], s=60, alpha=0.75, color='forestgreen')\ngroups = [-1,1]\npredicted = fit_data.predict(dict(group=groups))\nplt.plot(groups, predicted, color='firebrick', alpha=0.75)\nplt.xticks([-1,1])\nplt.xlabel(\"Group\",fontsize=15)\nplt.ylabel(\"Y\", fontsize=15)\npass", "Compare regression F-statistic and corresponding p-value to that from ANOVA", "fit_data.fvalue, fit_data.f_pvalue\n\nstats.f_oneway(data.Y[data.group == -1], data.Y[data.group == 1])", "Multi-group one-way ANOVA as a multiple regression\nWhen we want to consider multiple groups, we have to extend the idea of dummy coding to allow for more groups. We can setup simple dummy variables for $g-1$ groups, or we can use a slight variant called \"effect coding\". The results when using effect coding are usually easier to interpret, and that's what we'll use this here.\nThe two links below contrast dummy and effect coding:\n\nDummy coding: http://www.ats.ucla.edu/stat/mult_pkg/faq/general/dummy.htm\nEffect coding: http://www.ats.ucla.edu/stat/mult_pkg/faq/general/effect.htm", "iris = pd.read_csv(\"http://roybatty.org/iris.csv\")\niris.Species.unique()\n\niris.columns = iris.columns.str.replace('.','')\niris.columns\n\nsbn.violinplot(x=\"Species\", y=\"SepalLength\", data=iris)\n\neffect1 = []\neffect2 = []\nfor s in iris.Species:\n if s == 'setosa':\n effect1.append(1)\n effect2.append(0)\n elif s == 'versicolor':\n effect1.append(0)\n effect2.append(1)\n else:\n effect1.append(-1)\n effect2.append(-1)\n \n\nprint(effect1)\n\nprint(effect2)\n\n# add effect variables to iris data frame\niris.effect1 = effect1\niris.effect2 = effect2\n\niris_fit = smf.ols(\"SepalLength ~ effect1 + effect2\", iris).fit()\n\niris_fit.fvalue, iris_fit.f_pvalue\n\nstats.f_oneway(iris.SepalLength[iris.Species == \"setosa\"], \n iris.SepalLength[iris.Species == \"versicolor\"],\n iris.SepalLength[iris.Species == \"virginica\"])", "If using statsmodels you don't need to explicit create the effect coding variables like we did above. You can specify that a variable is a categorical variables in the formula itself.", "iris_fit2 = smf.ols('SepalLength ~ C(Species)', iris).fit()", "You can then pass the model fit results to statsmodels.stats.anova.anova_lm to get an appropriate ANOVA table.", "anova.anova_lm(iris_fit2)" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
adityaka/misc_scripts
python-scripts/data_analytics_learn/link_pandas/Ex_Files_Pandas_Data/Exercise Files/02_06/Final/.ipynb_checkpoints/60 Merge-checkpoint.ipynb
bsd-3-clause
[ "Merge\n\nConcat \nJoin \nAppend", "import pandas as pd\nimport numpy as np\n\nstarting_date = '20160701'\nsample_numpy_data = np.array(np.arange(24)).reshape((6,4))\ndates_index = pd.date_range(starting_date, periods=6)\nsample_df = pd.DataFrame(sample_numpy_data, index=dates_index, columns=list('ABCD'))\n\nsample_df_2 = sample_df.copy()\nsample_df_2['Fruits'] = ['apple', 'orange','banana','strawberry','blueberry','pineapple']\n\nsample_series = pd.Series([1,2,3,4,5,6], index=pd.date_range(starting_date, periods=6))\nsample_df_2['Extra Data'] = sample_series *3 +1\n\nsecond_numpy_array = np.array(np.arange(len(sample_df_2))) *100 + 7\nsample_df_2['G'] = second_numpy_array\n\nsample_df_2", "concat()\ndocumentation: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.concat.html\nseparate data frame into a list with 3 elements", "pieces = [sample_df_2[:2], sample_df_2[2:4], sample_df_2[4:]]\npieces", "concatenate first and last elements", "new_list = pieces[0], pieces[2]\npd.concat(new_list)", "append()\ndocumentation: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.append.html", "new_last_row = sample_df_2.iloc[2]\n\nsample_df_2.append(new_last_row)\n\nsample_df_2", "merge()\ndocumentation: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.merge.html\nMerge DataFrame objects by performing a database-style join operation by columns or indexes.\nIf joining columns on columns, the DataFrame indexes will be ignored. Otherwise if joining indexes on indexes or indexes on a column or columns, the index will be passed on.", "left = pd.DataFrame({'my_key': ['K0', 'K1', 'K2', 'K3'],\n 'A': ['A0', 'A1', 'A2', 'A3'],\n 'B': ['B0', 'B1', 'B2', 'B3']})\nright = pd.DataFrame({'my_key': ['K0', 'K1', 'K2', 'K3'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3']})\n\nresult = pd.merge(left, right, on='my_key')\nresult" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
kcyu1993/ML_course_kyu
projects/project1/scripts/project1.ipynb
mit
[ "# Useful starting lines\n%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\n%load_ext autoreload\n%autoreload 2", "Load the training data into feature matrix, class labels, and event ids:", "from proj1_helpers import *\nDATA_TRAIN_PATH = '' # TODO: download train data and supply path here \ny, tX, ids = load_csv_data(DATA_TRAIN_PATH)", "Do your thing crazy machine learning thing here :) ...\nGenerate predictions and save ouput in csv format for submission:", "DATA_TEST_PATH = '' # TODO: download train data and supply path here \n_, tX_test, ids_test = load_csv_data(DATA_TEST_PATH)\n\nOUTPUT_PATH = '' # TODO: fill in desired name of output file for submission\ny_pred = predict_labels(weights, tX_test)\ncreate_csv_submission(ids_test, y_pred, OUTPUT_PATH)\n\nfrom helpers import sample_data, load_data, standardize\n\n# load data.\nheight, weight, gender = load_data()\n\n# build sampled x and y.\nseed = 1\ny = np.expand_dims(gender, axis=1)\nX = np.c_[height.reshape(-1), weight.reshape(-1)]\ny, X = sample_data(y, X, seed, size_samples=200)\nx, mean_x, std_x = standardize(X)\n\n" ]
[ "code", "markdown", "code", "markdown", "code" ]
grfiv/MNIST
svm.scikit/svm_rbf.scikit_random_gridsearch.ipynb
mit
[ "MNIST digit recognition using SVC with RBF in scikit-learn\n> Using RANDOMIZED grid search, find optimal parameters\nSee Comparing randomized search and grid search for hyperparameter estimation for a discussion of using a randomized grid search rather than an exhaustive one. The statement is made The result in parameter settings is quite similar, while the run time for randomized search is dramatically lower. The performance is slightly worse for the randomized search, though this is most likely a noise effect and would not carry over to a held-out test set. \nMy process was to iteratively narrow the bounds of the grid search so that fewer duds showed up in the random search. Narrowing the end points and increasing the density can improve precision but I'm not sure at what point greater precision no longer matters in a stochastic domain nor am I certain that the C/gamma tradeoff is strictly monotone linear.", "from __future__ import division\nimport os, time, math, csv\nimport cPickle as pickle\n\nimport matplotlib.pyplot as plt\n\nimport numpy as np\n\nfrom print_imgs import print_imgs # my own function to print a grid of square images\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.utils import shuffle\n\nfrom sklearn.svm import SVC\n\nfrom sklearn.cross_validation import StratifiedKFold\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.grid_search import RandomizedSearchCV\n\nfrom sklearn.metrics import classification_report, confusion_matrix\n\nnp.random.seed(seed=1009)\n\n%matplotlib inline\n\n#%qtconsole", "Where's the data?", "file_path = '../data/'\n\nDESKEWED = True\nif DESKEWED:\n train_img_filename = 'train-images_deskewed.csv'\n test_img_filename = 't10k-images_deskewed.csv'\nelse:\n train_img_filename = 'train-images.csv'\n test_img_filename = 't10k-images.csv'\n \ntrain_label_filename = 'train-labels.csv'\ntest_label_filename = 't10k-labels.csv'", "How much of the data will we use?", "portion = 1.0 # set to 1.0 for all of it, less than 1.0 for less", "Read the training images and labels", "# read trainX\nwith open(file_path + train_img_filename,'r') as f:\n data_iter = csv.reader(f, delimiter = ',')\n data = [data for data in data_iter]\ntrainX = np.ascontiguousarray(data, dtype = np.float64) \n\nif portion < 1.0:\n trainX = trainX[:portion*trainX.shape[0]]\n\n# scale trainX\nscaler = StandardScaler()\nscaler.fit(trainX) # find mean/std for trainX\ntrainX = scaler.transform(trainX) # scale trainX with trainX mean/std\n\n# read trainY\nwith open(file_path + train_label_filename,'r') as f:\n data_iter = csv.reader(f, delimiter = ',')\n data = [data for data in data_iter]\ntrainY = np.ascontiguousarray(data, dtype = np.int8) \n\nif portion < 1.0:\n trainY = trainY[:portion*trainY.shape[0]].ravel()\n \n# shuffle trainX & trainY\ntrainX, trainY = shuffle(trainX, trainY, random_state=0)\n\nprint(\"trainX shape: {0}\".format(trainX.shape))\nprint(\"trainY shape: {0}\\n\".format(trainY.shape))\n\nprint(trainX.flags)", "Read the test images and labels", "# read testX\nwith open(file_path + test_img_filename,'r') as f:\n data_iter = csv.reader(f, delimiter = ',')\n data = [data for data in data_iter]\ntestX = np.ascontiguousarray(data, dtype = np.float64) \n\nif portion < 1.0:\n testX = testX[:portion*testX.shape[0]]\n\n# scale testX\ntestX = scaler.transform(testX) # scale testX with trainX mean/std\n\n\n# read testY\nwith open(file_path + test_label_filename,'r') as f:\n data_iter = csv.reader(f, delimiter = ',')\n data = [data for data in data_iter]\ntestY = np.ascontiguousarray(data, dtype = np.int8)\n\nif portion < 1.0:\n testY = testY[:portion*testY.shape[0]].ravel()\n\n# shuffle testX, testY\ntestX, testY = shuffle(testX, testY, random_state=0)\n\nprint(\"testX shape: {0}\".format(testX.shape))\nprint(\"testY shape: {0}\".format(testY.shape))", "Use the smaller, fewer images for testing\nPrint a sample", "print_imgs(images = trainX, \n actual_labels = trainY.ravel(), \n predicted_labels = trainY.ravel(),\n starting_index = np.random.randint(0, high=trainY.shape[0]-36, size=1)[0],\n size = 6)", "SVC Default Parameter Settings", "# default parameters for SVC\n# ==========================\ndefault_svc_params = {}\n\ndefault_svc_params['C'] = 1.0 # penalty\ndefault_svc_params['class_weight'] = None # Set the parameter C of class i to class_weight[i]*C\n # set to 'auto' for unbalanced classes\ndefault_svc_params['gamma'] = 0.0 # Kernel coefficient for 'rbf', 'poly' and 'sigmoid'\n\ndefault_svc_params['kernel'] = 'rbf' # 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or a callable \ndefault_svc_params['shrinking'] = True # Whether to use the shrinking heuristic. \ndefault_svc_params['probability'] = False # Whether to enable probability estimates. \ndefault_svc_params['tol'] = 0.001 # Tolerance for stopping criterion. \ndefault_svc_params['cache_size'] = 200 # size of the kernel cache (in MB).\n\ndefault_svc_params['max_iter'] = -1 # limit on iterations within solver, or -1 for no limit. \n\ndefault_svc_params['random_state'] = 1009 \ndefault_svc_params['verbose'] = False \ndefault_svc_params['degree'] = 3 # 'poly' only\ndefault_svc_params['coef0'] = 0.0 # 'poly' and 'sigmoid' only\n\n# set parameters for the estimator\nsvc_params = dict(default_svc_params)\n\nsvc_params['cache_size'] = 2000\n\n# the classifier\nsvc_clf = SVC(**svc_params)", "RANDOMIZED grid search", "t0 = time.time()\n\n# search grid\n# ===========\nsearch_grid = dict(C = np.logspace( 0, 3, 50),\n gamma = np.logspace(-5, -3, 50))\n\n# stratified K-Fold indices\n# =========================\nSKFolds = StratifiedKFold(y = trainY.ravel(), \n n_folds = 3, \n indices = None, \n shuffle = True, \n random_state = 1009)\n\n# default parameters for RandomizedSearchCV\n# =========================================\ndefault_random_params = {}\ndefault_random_params['scoring'] = None \ndefault_random_params['fit_params'] = None # dict of parameters to pass to the fit method\ndefault_random_params['n_jobs'] = 1 \ndefault_random_params['pre_dispatch'] = '2*n_jobs' # memory is copied this many times\n # reduce if you're running into memory problems\n \ndefault_random_params['iid'] = True # assume the folds are iid \ndefault_random_params['refit'] = True # Refit the best estimator with the entire dataset \ndefault_random_params['cv'] = None \ndefault_random_params['verbose'] = 0 \ndefault_random_params['random_state'] = None\ndefault_random_params['n_iter'] = 10\n\n# set parameters for the randomized grid search\n# =============================================\nrandom_params = dict(default_random_params)\n\nrandom_params['verbose'] = 1\nrandom_params['random_state'] = 1009\nrandom_params['cv'] = SKFolds \nrandom_params['n_jobs'] = -1 # -1 => use all available cores\n # one core per fold\n # for each point in the grid\n\nrandom_params['n_iter'] = 100 # choose this many random combinations of parameters\n # from the 'search_grid'\n\n\n# perform the randomized parameter grid search\n# ============================================\nrandom_search = RandomizedSearchCV(estimator = svc_clf, \n param_distributions = search_grid, \n **random_params)\n\nrandom_search.fit(trainX, trainY.ravel())\n\nprint(random_search)\n\nprint(\"\\ntime in minutes {0:.2f}\".format((time.time()-t0)/60))", "Analyze the results of the parameter pairs randomly selected", "from operator import itemgetter\n\n# how many duds?\nmean_score_list = [score.mean_validation_score for score in random_search.grid_scores_]\nprint(\"\\nProportion of random scores below 98%: {0:.2f}\\n\".format(sum(np.array(mean_score_list)<0.98)/len(mean_score_list)))\n \n# what do the best ones look like?\nfor score in sorted(random_search.grid_scores_, key=itemgetter(1), reverse=True)[:10]:\n print score", "Heatmap of the accuracy of the C and gamma pairs chosen in the grid search\nsee http://scikit-learn.org/stable/auto_examples/svm/plot_rbf_parameters.html\nThis script was extensively modified to work with the score results from RandomizedSearchCV", "from matplotlib.colors import Normalize\n\nclass MidpointNormalize(Normalize):\n \"\"\"Utility function to move the midpoint of a colormap to be around the values of interest.\"\"\"\n\n def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):\n self.midpoint = midpoint\n Normalize.__init__(self, vmin, vmax, clip)\n\n def __call__(self, value, clip=None):\n x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]\n return np.ma.masked_array(np.interp(value, x, y))\n \n# --------------------------------------------------------------------------------\n\n# skip this many parameter values on the display axes\ntick_step_size_C = math.ceil(len(search_grid['C']) / 15) \ntick_step_size_gamma = math.ceil(len(search_grid['gamma']) / 15)\n \n# create 'heatmap'\n# ================\n\n# a C x gamma matrix; initially all zeros (black)\nheatmap = np.zeros((len(search_grid['C']), len(search_grid['gamma'])))\n\n# for each score, find the index in 'heatmap' of the 'C' and 'gamma' values\n# at that index intersection put the mean score\nfor score in random_search.grid_scores_:\n # index of C and gamma in 'search_grid'\n ceeinx = search_grid['C'].tolist().index(score[0]['C'])\n gaminx = search_grid['gamma'].tolist().index(score[0]['gamma'])\n heatmap[ceeinx, gaminx] = score[1]\n\n\n# display the heatmap\n# ===================\nplt.figure(figsize=(10, 8))\nplt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)\n\nplt.imshow(heatmap, interpolation='nearest', cmap=plt.cm.hot,\n norm=MidpointNormalize(vmin=0.2, midpoint=0.92))\nplt.xlabel('gamma')\nplt.ylabel('C')\n\nplt.colorbar()\n\n# label the axes\nplt.xticks(np.arange(0, len(search_grid['gamma']), tick_step_size_gamma), \n search_grid['gamma'][::tick_step_size_gamma], \n rotation=45)\n\nplt.yticks(np.arange(0, len(search_grid['C']), tick_step_size_C), \n search_grid['C'][::tick_step_size_C])\n\n# cross hairs\nceeinx = search_grid['C'].tolist().index(random_search.best_params_['C'])\nplt.axhline(y=ceeinx)\ngaminx = search_grid['gamma'].tolist().index(random_search.best_params_['gamma'])\nplt.axvline(x=gaminx)\n\nplt.title('Parameter-pair accuracy')\nplt.show()\n\nprint(\"\\nThe best parameters are %s\\nwith a score of %0.2f, misclass of %0.4f\"\n % (random_search.best_params_, random_search.best_score_, 1-random_search.best_score_))", "Predict the test set and analyze the result", "target_names = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n\npredicted_values = random_search.predict(testX)\ny_true, y_pred = testY.ravel(), predicted_values\n\nprint(classification_report(y_true, y_pred, target_names=target_names))\n\ndef plot_confusion_matrix(cm, \n target_names,\n title='Proportional Confusion matrix', \n cmap=plt.cm.Paired): \n \"\"\"\n given a confusion matrix (cm), make a nice plot\n see the skikit-learn documentation for the original done for the iris dataset\n \"\"\"\n plt.figure(figsize=(8, 6))\n plt.imshow((cm/cm.sum(axis=1)), interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(target_names))\n plt.xticks(tick_marks, target_names, rotation=45)\n plt.yticks(tick_marks, target_names)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n \n# --------------------------------------------------------------------------------------------\n \ncm = confusion_matrix(y_true, y_pred) \n\nprint(cm)\nmodel_accuracy = sum(cm.diagonal())/len(testY)\nmodel_misclass = 1 - model_accuracy\nprint(\"\\nModel accuracy: {0}, model misclass rate: {1}\".format(model_accuracy, model_misclass))\n\nplot_confusion_matrix(cm, target_names)", "Learning Curves\nsee http://scikit-learn.org/stable/auto_examples/model_selection/plot_learning_curve.html\nThe red line shows how well we fit the training data. The larger the score, the lower the bias. We expect the red line to start very near to 1.0 since we ought to be able to fit just a few points very well. We expect the red line to decline slightly since more points to fit requires a more complex model.\nThe green line shows the accuracy of the predictions of the test set. We expect it to start much lower than the red line but to increase continuously as the amount of training data used to create the model grows. An appropriate algorithm, correctly parameterized should push the green line higher and higher as we train with more training data. The best case is for the red line to decline only very slightly from 1.0 and for the green line to rise to intersect the red line.\nA red line that starts below 1.0 and/or declines steeply indicates bias, a model that does not even fit the data it already knows the answer for. In addition to reviewing whether the algorithm is appropriate and whether it is optimally parameterized you may consider ways to increase the number of useful predictor variables.\nA red line that hugs the top but for which the green line does not rise to meet it indicates overfitting.", "t0 = time.time()\n\nfrom sklearn.learning_curve import learning_curve\nfrom sklearn.cross_validation import ShuffleSplit\n\n\ndef plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):\n \"\"\"\n Generate a simple plot of the test and training learning curve.\n\n Parameters\n ----------\n estimator : object type that implements the \"fit\" and \"predict\" methods\n An object of that type which is cloned for each validation.\n\n title : string\n Title for the chart.\n\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like, shape (n_samples) or (n_samples, n_features), optional\n Target relative to X for classification or regression;\n None for unsupervised learning.\n\n ylim : tuple, shape (ymin, ymax), optional\n Defines minimum and maximum yvalues plotted.\n\n cv : integer, cross-validation generator, optional\n If an integer is passed, it is the number of folds (defaults to 3).\n Specific cross-validation objects can be passed, see\n sklearn.cross_validation module for the list of possible objects\n\n n_jobs : integer, optional\n Number of jobs to run in parallel (default 1).\n \"\"\"\n plt.figure(figsize=(8, 6))\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n \n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n \n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n \n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt\n\n# --------------------------------------------------------------------------------\n\nC_gamma = \"C=\" + str(np.round(random_search.best_params_['C'],4)) + \\\n \", gamma=\" + str(np.round(random_search.best_params_['gamma'],6))\n\nplot_learning_curve(estimator = random_search.best_estimator_, \n title = \"Learning Curves (SVM, RBF, \" + C_gamma + \")\", \n X = trainX, \n y = trainY.ravel(), \n ylim = (0.85, 1.01), \n cv = ShuffleSplit(n = trainX.shape[0], \n n_iter = 5, \n test_size = 0.2, \n random_state = 0), \n n_jobs = 8)\n\nplt.show()\n\nprint(\"\\ntime in minutes {0:.2f}\".format((time.time()-t0)/60))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
GoogleCloudPlatform/vertex-ai-samples
notebooks/community/gapic/automl/showcase_automl_tabular_classification_online_explain.ipynb
apache-2.0
[ "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "Vertex client library: AutoML tabular classification model for online prediction with explanation\n<table align=\"left\">\n <td>\n <a href=\"https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/automl/showcase_automl_tabular_classification_online_explain.ipynb\">\n <img src=\"https://cloud.google.com/ml-engine/images/colab-logo-32px.png\" alt=\"Colab logo\"> Run in Colab\n </a>\n </td>\n <td>\n <a href=\"https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/automl/showcase_automl_tabular_classification_online_explain.ipynb\">\n <img src=\"https://cloud.google.com/ml-engine/images/github-logo-32px.png\" alt=\"GitHub logo\">\n View on GitHub\n </a>\n </td>\n</table>\n<br/><br/><br/>\nOverview\nThis tutorial demonstrates how to use the Vertex client library for Python to create tabular classification models and do online prediction with explanation using Google Cloud's AutoML.\nDataset\nThe dataset used for this tutorial is the Iris dataset from TensorFlow Datasets. This dataset does not require any feature engineering. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model predicts the type of Iris flower species from a class of three species: setosa, virginica, or versicolor.\nObjective\nIn this tutorial, you create an AutoML tabular classification model and deploy for online prediction with explainability from a Python script using the Vertex client library. You can alternatively create and deploy models using the gcloud command-line tool or online using the Google Cloud Console.\nThe steps performed include:\n\nCreate a Vertex Dataset resource.\nTrain the model.\nView the model evaluation.\nDeploy the Model resource to a serving Endpoint resource.\nMake a prediction with explainability.\nUndeploy the Model.\n\nCosts\nThis tutorial uses billable components of Google Cloud (GCP):\n\nVertex AI\nCloud Storage\n\nLearn about Vertex AI\npricing and Cloud Storage\npricing, and use the Pricing\nCalculator\nto generate a cost estimate based on your projected usage.\nInstallation\nInstall the latest version of Vertex client library.", "import os\nimport sys\n\n# Google Cloud Notebook\nif os.path.exists(\"/opt/deeplearning/metadata/env_version\"):\n USER_FLAG = \"--user\"\nelse:\n USER_FLAG = \"\"\n\n! pip3 install -U google-cloud-aiplatform $USER_FLAG", "Install the latest GA version of google-cloud-storage library as well.", "! pip3 install -U google-cloud-storage $USER_FLAG", "Restart the kernel\nOnce you've installed the Vertex client library and Google cloud-storage, you need to restart the notebook kernel so it can find the packages.", "if not os.getenv(\"IS_TESTING\"):\n # Automatically restart kernel after installs\n import IPython\n\n app = IPython.Application.instance()\n app.kernel.do_shutdown(True)", "Before you begin\nGPU runtime\nMake sure you're running this notebook in a GPU runtime if you have that option. In Colab, select Runtime > Change Runtime Type > GPU\nSet up your Google Cloud project\nThe following steps are required, regardless of your notebook environment.\n\n\nSelect or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs.\n\n\nMake sure that billing is enabled for your project.\n\n\nEnable the Vertex APIs and Compute Engine APIs.\n\n\nThe Google Cloud SDK is already installed in Google Cloud Notebook.\n\n\nEnter your project ID in the cell below. Then run the cell to make sure the\nCloud SDK uses the right project for all the commands in this notebook.\n\n\nNote: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $ into these commands.", "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}\n\nif PROJECT_ID == \"\" or PROJECT_ID is None or PROJECT_ID == \"[your-project-id]\":\n # Get your GCP project id from gcloud\n shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null\n PROJECT_ID = shell_output[0]\n print(\"Project ID:\", PROJECT_ID)\n\n! gcloud config set project $PROJECT_ID", "Region\nYou can also change the REGION variable, which is used for operations\nthroughout the rest of this notebook. Below are regions supported for Vertex. We recommend that you choose the region closest to you.\n\nAmericas: us-central1\nEurope: europe-west4\nAsia Pacific: asia-east1\n\nYou may not use a multi-regional bucket for training with Vertex. Not all regions provide support for all Vertex services. For the latest support per region, see the Vertex locations documentation", "REGION = \"us-central1\" # @param {type: \"string\"}", "Timestamp\nIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.", "from datetime import datetime\n\nTIMESTAMP = datetime.now().strftime(\"%Y%m%d%H%M%S\")", "Authenticate your Google Cloud account\nIf you are using Google Cloud Notebook, your environment is already authenticated. Skip this step.\nIf you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.\nOtherwise, follow these steps:\nIn the Cloud Console, go to the Create service account key page.\nClick Create service account.\nIn the Service account name field, enter a name, and click Create.\nIn the Grant this service account access to project section, click the Role drop-down list. Type \"Vertex\" into the filter box, and select Vertex Administrator. Type \"Storage Object Admin\" into the filter box, and select Storage Object Admin.\nClick Create. A JSON file that contains your key downloads to your local environment.\nEnter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.", "# If you are running this notebook in Colab, run this cell and follow the\n# instructions to authenticate your GCP account. This provides access to your\n# Cloud Storage bucket and lets you submit training jobs and prediction\n# requests.\n\n# If on Google Cloud Notebook, then don't execute this code\nif not os.path.exists(\"/opt/deeplearning/metadata/env_version\"):\n if \"google.colab\" in sys.modules:\n from google.colab import auth as google_auth\n\n google_auth.authenticate_user()\n\n # If you are running this notebook locally, replace the string below with the\n # path to your service account key and run this cell to authenticate your GCP\n # account.\n elif not os.getenv(\"IS_TESTING\"):\n %env GOOGLE_APPLICATION_CREDENTIALS ''", "Set up variables\nNext, set up some variables used throughout the tutorial.\nImport libraries and define constants\nImport Vertex client library\nImport the Vertex client library into our Python environment.", "import time\n\nimport google.cloud.aiplatform_v1beta1 as aip\nfrom google.protobuf import json_format\nfrom google.protobuf.json_format import MessageToJson, ParseDict\nfrom google.protobuf.struct_pb2 import Struct, Value", "Vertex constants\nSetup up the following constants for Vertex:\n\nAPI_ENDPOINT: The Vertex API service endpoint for dataset, model, job, pipeline and endpoint services.\nPARENT: The Vertex location root path for dataset, model, job, pipeline and endpoint resources.", "# API service endpoint\nAPI_ENDPOINT = \"{}-aiplatform.googleapis.com\".format(REGION)\n\n# Vertex location root path for your dataset, model and endpoint resources\nPARENT = \"projects/\" + PROJECT_ID + \"/locations/\" + REGION", "AutoML constants\nSet constants unique to AutoML datasets and training:\n\nDataset Schemas: Tells the Dataset resource service which type of dataset it is.\nData Labeling (Annotations) Schemas: Tells the Dataset resource service how the data is labeled (annotated).\nDataset Training Schemas: Tells the Pipeline resource service the task (e.g., classification) to train the model for.", "# Tabular Dataset type\nDATA_SCHEMA = \"gs://google-cloud-aiplatform/schema/dataset/metadata/tables_1.0.0.yaml\"\n# Tabular Labeling type\nLABEL_SCHEMA = (\n \"gs://google-cloud-aiplatform/schema/dataset/ioformat/table_io_format_1.0.0.yaml\"\n)\n# Tabular Training task\nTRAINING_SCHEMA = \"gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_tables_1.0.0.yaml\"", "Hardware Accelerators\nSet the hardware accelerators (e.g., GPU), if any, for prediction.\nSet the variable DEPLOY_GPU/DEPLOY_NGPU to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify:\n(aip.AcceleratorType.NVIDIA_TESLA_K80, 4)\n\nFor GPU, available accelerators include:\n - aip.AcceleratorType.NVIDIA_TESLA_K80\n - aip.AcceleratorType.NVIDIA_TESLA_P100\n - aip.AcceleratorType.NVIDIA_TESLA_P4\n - aip.AcceleratorType.NVIDIA_TESLA_T4\n - aip.AcceleratorType.NVIDIA_TESLA_V100\nOtherwise specify (None, None) to use a container image to run on a CPU.", "if os.getenv(\"IS_TESTING_DEPOLY_GPU\"):\n DEPLOY_GPU, DEPLOY_NGPU = (\n aip.AcceleratorType.NVIDIA_TESLA_K80,\n int(os.getenv(\"IS_TESTING_DEPOLY_GPU\")),\n )\nelse:\n DEPLOY_GPU, DEPLOY_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)", "Container (Docker) image\nFor AutoML batch prediction, the container image for the serving binary is pre-determined by the Vertex prediction service. More specifically, the service will pick the appropriate container for the model depending on the hardware accelerator you selected.\nMachine Type\nNext, set the machine type to use for prediction.\n\nSet the variable DEPLOY_COMPUTE to configure the compute resources for the VM you will use for prediction.\nmachine type\nn1-standard: 3.75GB of memory per vCPU.\nn1-highmem: 6.5GB of memory per vCPU\nn1-highcpu: 0.9 GB of memory per vCPU\n\n\nvCPUs: number of [2, 4, 8, 16, 32, 64, 96 ]\n\nNote: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs", "if os.getenv(\"IS_TESTING_DEPLOY_MACHINE\"):\n MACHINE_TYPE = os.getenv(\"IS_TESTING_DEPLOY_MACHINE\")\nelse:\n MACHINE_TYPE = \"n1-standard\"\n\nVCPU = \"4\"\nDEPLOY_COMPUTE = MACHINE_TYPE + \"-\" + VCPU\nprint(\"Deploy machine type\", DEPLOY_COMPUTE)", "Tutorial\nNow you are ready to start creating your own AutoML tabular classification model.\nSet up clients\nThe Vertex client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex server.\nYou will use different clients in this tutorial for different steps in the workflow. So set them all up upfront.\n\nDataset Service for Dataset resources.\nModel Service for Model resources.\nPipeline Service for training.\nEndpoint Service for deployment.\nPrediction Service for serving.", "# client options same for all services\nclient_options = {\"api_endpoint\": API_ENDPOINT}\n\n\ndef create_dataset_client():\n client = aip.DatasetServiceClient(client_options=client_options)\n return client\n\n\ndef create_model_client():\n client = aip.ModelServiceClient(client_options=client_options)\n return client\n\n\ndef create_pipeline_client():\n client = aip.PipelineServiceClient(client_options=client_options)\n return client\n\n\ndef create_endpoint_client():\n client = aip.EndpointServiceClient(client_options=client_options)\n return client\n\n\ndef create_prediction_client():\n client = aip.PredictionServiceClient(client_options=client_options)\n return client\n\n\nclients = {}\nclients[\"dataset\"] = create_dataset_client()\nclients[\"model\"] = create_model_client()\nclients[\"pipeline\"] = create_pipeline_client()\nclients[\"endpoint\"] = create_endpoint_client()\nclients[\"prediction\"] = create_prediction_client()\n\nfor client in clients.items():\n print(client)", "Dataset\nNow that your clients are ready, your first step is to create a Dataset resource instance. This step differs from Vision, Video and Language. For those products, after the Dataset resource is created, one then separately imports the data, using the import_data method.\nFor tabular, importing of the data is deferred until the training pipeline starts training the model. What do we do different? Well, first you won't be calling the import_data method. Instead, when you create the dataset instance you specify the Cloud Storage location of the CSV file or BigQuery location of the data table, which contains your tabular data as part of the Dataset resource's metadata.\nCloud Storage\nmetadata = {\"input_config\": {\"gcs_source\": {\"uri\": [gcs_uri]}}}\nThe format for a Cloud Storage path is:\ngs://[bucket_name]/[folder(s)/[file]\n\nBigQuery\nmetadata = {\"input_config\": {\"bigquery_source\": {\"uri\": [gcs_uri]}}}\nThe format for a BigQuery path is:\nbq://[collection].[dataset].[table]\n\nNote that the uri field is a list, whereby you can input multiple CSV files or BigQuery tables when your data is split across files.\nData preparation\nThe Vertex Dataset resource for tabular has a couple of requirements for your tabular data.\n\nMust be in a CSV file or a BigQuery query.\n\nCSV\nFor tabular classification, the CSV file has a few requirements:\n\nThe first row must be the heading -- note how this is different from Vision, Video and Language where the requirement is no heading.\nAll but one column are features.\nOne column is the label, which you will specify when you subsequently create the training pipeline.\n\nLocation of Cloud Storage training data.\nNow set the variable IMPORT_FILE to the location of the CSV index file in Cloud Storage.", "IMPORT_FILE = \"gs://cloud-samples-data/tables/iris_1000.csv\"", "Quick peek at your data\nYou will use a version of the Iris dataset that is stored in a public Cloud Storage bucket, using a CSV index file.\nStart by doing a quick peek at the data. You count the number of examples by counting the number of rows in the CSV index file (wc -l) and then peek at the first few rows.\nYou also need for training to know the heading name of the label column, which is save as label_column. For this dataset, it is the last column in the CSV file.", "count = ! gsutil cat $IMPORT_FILE | wc -l\nprint(\"Number of Examples\", int(count[0]))\n\nprint(\"First 10 rows\")\n! gsutil cat $IMPORT_FILE | head\n\nheading = ! gsutil cat $IMPORT_FILE | head -n1\nlabel_column = str(heading).split(\",\")[-1].split(\"'\")[0]\nprint(\"Label Column Name\", label_column)\nif label_column is None:\n raise Exception(\"label column missing\")", "Dataset\nNow that your clients are ready, your first step in training a model is to create a managed dataset instance, and then upload your labeled data to it.\nCreate Dataset resource instance\nUse the helper function create_dataset to create the instance of a Dataset resource. This function does the following:\n\nUses the dataset client service.\nCreates an Vertex Dataset resource (aip.Dataset), with the following parameters:\ndisplay_name: The human-readable name you choose to give it.\nmetadata_schema_uri: The schema for the dataset type.\nmetadata: The Cloud Storage or BigQuery location of the tabular data.\nCalls the client dataset service method create_dataset, with the following parameters:\nparent: The Vertex location root path for your Database, Model and Endpoint resources.\ndataset: The Vertex dataset object instance you created.\nThe method returns an operation object.\n\nAn operation object is how Vertex handles asynchronous calls for long running operations. While this step usually goes fast, when you first use it in your project, there is a longer delay due to provisioning.\nYou can use the operation object to get status on the operation (e.g., create Dataset resource) or to cancel the operation, by invoking an operation method:\n| Method | Description |\n| ----------- | ----------- |\n| result() | Waits for the operation to complete and returns a result object in JSON format. |\n| running() | Returns True/False on whether the operation is still running. |\n| done() | Returns True/False on whether the operation is completed. |\n| canceled() | Returns True/False on whether the operation was canceled. |\n| cancel() | Cancels the operation (this may take up to 30 seconds). |", "TIMEOUT = 90\n\n\ndef create_dataset(name, schema, src_uri=None, labels=None, timeout=TIMEOUT):\n start_time = time.time()\n try:\n if src_uri.startswith(\"gs://\"):\n metadata = {\"input_config\": {\"gcs_source\": {\"uri\": [src_uri]}}}\n elif src_uri.startswith(\"bq://\"):\n metadata = {\"input_config\": {\"bigquery_source\": {\"uri\": [src_uri]}}}\n dataset = aip.Dataset(\n display_name=name,\n metadata_schema_uri=schema,\n labels=labels,\n metadata=json_format.ParseDict(metadata, Value()),\n )\n\n operation = clients[\"dataset\"].create_dataset(parent=PARENT, dataset=dataset)\n print(\"Long running operation:\", operation.operation.name)\n result = operation.result(timeout=TIMEOUT)\n print(\"time:\", time.time() - start_time)\n print(\"response\")\n print(\" name:\", result.name)\n print(\" display_name:\", result.display_name)\n print(\" metadata_schema_uri:\", result.metadata_schema_uri)\n print(\" metadata:\", dict(result.metadata))\n print(\" create_time:\", result.create_time)\n print(\" update_time:\", result.update_time)\n print(\" etag:\", result.etag)\n print(\" labels:\", dict(result.labels))\n return result\n except Exception as e:\n print(\"exception:\", e)\n return None\n\n\nresult = create_dataset(\"iris-\" + TIMESTAMP, DATA_SCHEMA, src_uri=IMPORT_FILE)", "Now save the unique dataset identifier for the Dataset resource instance you created.", "# The full unique ID for the dataset\ndataset_id = result.name\n# The short numeric ID for the dataset\ndataset_short_id = dataset_id.split(\"/\")[-1]\n\nprint(dataset_id)", "Train the model\nNow train an AutoML tabular classification model using your Vertex Dataset resource. To train the model, do the following steps:\n\nCreate an Vertex training pipeline for the Dataset resource.\nExecute the pipeline to start the training.\n\nCreate a training pipeline\nYou may ask, what do we use a pipeline for? You typically use pipelines when the job (such as training) has multiple steps, generally in sequential order: do step A, do step B, etc. By putting the steps into a pipeline, we gain the benefits of:\n\nBeing reusable for subsequent training jobs.\nCan be containerized and ran as a batch job.\nCan be distributed.\nAll the steps are associated with the same pipeline job for tracking progress.\n\nUse this helper function create_pipeline, which takes the following parameters:\n\npipeline_name: A human readable name for the pipeline job.\nmodel_name: A human readable name for the model.\ndataset: The Vertex fully qualified dataset identifier.\nschema: The dataset labeling (annotation) training schema.\ntask: A dictionary describing the requirements for the training job.\n\nThe helper function calls the Pipeline client service'smethod create_pipeline, which takes the following parameters:\n\nparent: The Vertex location root path for your Dataset, Model and Endpoint resources.\ntraining_pipeline: the full specification for the pipeline training job.\n\nLet's look now deeper into the minimal requirements for constructing a training_pipeline specification:\n\ndisplay_name: A human readable name for the pipeline job.\ntraining_task_definition: The dataset labeling (annotation) training schema.\ntraining_task_inputs: A dictionary describing the requirements for the training job.\nmodel_to_upload: A human readable name for the model.\ninput_data_config: The dataset specification.\ndataset_id: The Vertex dataset identifier only (non-fully qualified) -- this is the last part of the fully-qualified identifier.\nfraction_split: If specified, the percentages of the dataset to use for training, test and validation. Otherwise, the percentages are automatically selected by AutoML.", "def create_pipeline(pipeline_name, model_name, dataset, schema, task):\n\n dataset_id = dataset.split(\"/\")[-1]\n\n input_config = {\n \"dataset_id\": dataset_id,\n \"fraction_split\": {\n \"training_fraction\": 0.8,\n \"validation_fraction\": 0.1,\n \"test_fraction\": 0.1,\n },\n }\n\n training_pipeline = {\n \"display_name\": pipeline_name,\n \"training_task_definition\": schema,\n \"training_task_inputs\": task,\n \"input_data_config\": input_config,\n \"model_to_upload\": {\"display_name\": model_name},\n }\n\n try:\n pipeline = clients[\"pipeline\"].create_training_pipeline(\n parent=PARENT, training_pipeline=training_pipeline\n )\n print(pipeline)\n except Exception as e:\n print(\"exception:\", e)\n return None\n return pipeline", "Construct the task requirements\nNext, construct the task requirements. Unlike other parameters which take a Python (JSON-like) dictionary, the task field takes a Google protobuf Struct, which is very similar to a Python dictionary. Use the json_format.ParseDict method for the conversion.\nThe minimal fields you need to specify are:\n\nprediction_type: Whether we are doing \"classification\" or \"regression\".\ntarget_column: The CSV heading column name for the column we want to predict (i.e., the label).\ntrain_budget_milli_node_hours: The maximum time to budget (billed) for training the model, where 1000 = 1 hour.\ndisable_early_stopping: Whether True/False to let AutoML use its judgement to stop training early or train for the entire budget.\ntransformations: Specifies the feature engineering for each feature column.\n\nFor transformations, the list must have an entry for each column. The outer key field indicates the type of feature engineering for the corresponding column. In this tutorial, you set it to \"auto\" to tell AutoML to automatically determine it.\nFinally, create the pipeline by calling the helper function create_pipeline, which returns an instance of a training pipeline object.", "TRANSFORMATIONS = [\n {\"auto\": {\"column_name\": \"sepal_width\"}},\n {\"auto\": {\"column_name\": \"sepal_length\"}},\n {\"auto\": {\"column_name\": \"petal_length\"}},\n {\"auto\": {\"column_name\": \"petal_width\"}},\n]\n\nPIPE_NAME = \"iris_pipe-\" + TIMESTAMP\nMODEL_NAME = \"iris_model-\" + TIMESTAMP\n\ntask = Value(\n struct_value=Struct(\n fields={\n \"target_column\": Value(string_value=label_column),\n \"prediction_type\": Value(string_value=\"classification\"),\n \"train_budget_milli_node_hours\": Value(number_value=1000),\n \"disable_early_stopping\": Value(bool_value=False),\n \"transformations\": json_format.ParseDict(TRANSFORMATIONS, Value()),\n }\n )\n)\n\nresponse = create_pipeline(PIPE_NAME, MODEL_NAME, dataset_id, TRAINING_SCHEMA, task)", "Now save the unique identifier of the training pipeline you created.", "# The full unique ID for the pipeline\npipeline_id = response.name\n# The short numeric ID for the pipeline\npipeline_short_id = pipeline_id.split(\"/\")[-1]\n\nprint(pipeline_id)", "Get information on a training pipeline\nNow get pipeline information for just this training pipeline instance. The helper function gets the job information for just this job by calling the the job client service's get_training_pipeline method, with the following parameter:\n\nname: The Vertex fully qualified pipeline identifier.\n\nWhen the model is done training, the pipeline state will be PIPELINE_STATE_SUCCEEDED.", "def get_training_pipeline(name, silent=False):\n response = clients[\"pipeline\"].get_training_pipeline(name=name)\n if silent:\n return response\n\n print(\"pipeline\")\n print(\" name:\", response.name)\n print(\" display_name:\", response.display_name)\n print(\" state:\", response.state)\n print(\" training_task_definition:\", response.training_task_definition)\n print(\" training_task_inputs:\", dict(response.training_task_inputs))\n print(\" create_time:\", response.create_time)\n print(\" start_time:\", response.start_time)\n print(\" end_time:\", response.end_time)\n print(\" update_time:\", response.update_time)\n print(\" labels:\", dict(response.labels))\n return response\n\n\nresponse = get_training_pipeline(pipeline_id)", "Deployment\nTraining the above model may take upwards of 30 minutes time.\nOnce your model is done training, you can calculate the actual time it took to train the model by subtracting end_time from start_time. For your model, you will need to know the fully qualified Vertex Model resource identifier, which the pipeline service assigned to it. You can get this from the returned pipeline instance as the field model_to_deploy.name.", "while True:\n response = get_training_pipeline(pipeline_id, True)\n if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED:\n print(\"Training job has not completed:\", response.state)\n model_to_deploy_id = None\n if response.state == aip.PipelineState.PIPELINE_STATE_FAILED:\n raise Exception(\"Training Job Failed\")\n else:\n model_to_deploy = response.model_to_upload\n model_to_deploy_id = model_to_deploy.name\n print(\"Training Time:\", response.end_time - response.start_time)\n break\n time.sleep(60)\n\nprint(\"model to deploy:\", model_to_deploy_id)", "Model information\nNow that your model is trained, you can get some information on your model.\nEvaluate the Model resource\nNow find out how good the model service believes your model is. As part of training, some portion of the dataset was set aside as the test (holdout) data, which is used by the pipeline service to evaluate the model.\nList evaluations for all slices\nUse this helper function list_model_evaluations, which takes the following parameter:\n\nname: The Vertex fully qualified model identifier for the Model resource.\n\nThis helper function uses the model client service's list_model_evaluations method, which takes the same parameter. The response object from the call is a list, where each element is an evaluation metric.\nFor each evaluation (you probably only have one) we then print all the key names for each metric in the evaluation, and for a small set (logLoss and auPrc) you will print the result.", "def list_model_evaluations(name):\n response = clients[\"model\"].list_model_evaluations(parent=name)\n for evaluation in response:\n print(\"model_evaluation\")\n print(\" name:\", evaluation.name)\n print(\" metrics_schema_uri:\", evaluation.metrics_schema_uri)\n metrics = json_format.MessageToDict(evaluation._pb.metrics)\n for metric in metrics.keys():\n print(metric)\n print(\"logloss\", metrics[\"logLoss\"])\n print(\"auPrc\", metrics[\"auPrc\"])\n\n return evaluation.name\n\n\nlast_evaluation = list_model_evaluations(model_to_deploy_id)", "Deploy the Model resource\nNow deploy the trained Vertex Model resource you created with AutoML. This requires two steps:\n\n\nCreate an Endpoint resource for deploying the Model resource to.\n\n\nDeploy the Model resource to the Endpoint resource.\n\n\nCreate an Endpoint resource\nUse this helper function create_endpoint to create an endpoint to deploy the model to for serving predictions, with the following parameter:\n\ndisplay_name: A human readable name for the Endpoint resource.\n\nThe helper function uses the endpoint client service's create_endpoint method, which takes the following parameter:\n\ndisplay_name: A human readable name for the Endpoint resource.\n\nCreating an Endpoint resource returns a long running operation, since it may take a few moments to provision the Endpoint resource for serving. You call response.result(), which is a synchronous call and will return when the Endpoint resource is ready. The helper function returns the Vertex fully qualified identifier for the Endpoint resource: response.name.", "ENDPOINT_NAME = \"iris_endpoint-\" + TIMESTAMP\n\n\ndef create_endpoint(display_name):\n endpoint = {\"display_name\": display_name}\n response = clients[\"endpoint\"].create_endpoint(parent=PARENT, endpoint=endpoint)\n print(\"Long running operation:\", response.operation.name)\n\n result = response.result(timeout=300)\n print(\"result\")\n print(\" name:\", result.name)\n print(\" display_name:\", result.display_name)\n print(\" description:\", result.description)\n print(\" labels:\", result.labels)\n print(\" create_time:\", result.create_time)\n print(\" update_time:\", result.update_time)\n return result\n\n\nresult = create_endpoint(ENDPOINT_NAME)", "Now get the unique identifier for the Endpoint resource you created.", "# The full unique ID for the endpoint\nendpoint_id = result.name\n# The short numeric ID for the endpoint\nendpoint_short_id = endpoint_id.split(\"/\")[-1]\n\nprint(endpoint_id)", "Compute instance scaling\nYou have several choices on scaling the compute instances for handling your online prediction requests:\n\nSingle Instance: The online prediction requests are processed on a single compute instance.\n\nSet the minimum (MIN_NODES) and maximum (MAX_NODES) number of compute instances to one.\n\n\nManual Scaling: The online prediction requests are split across a fixed number of compute instances that you manually specified.\n\n\nSet the minimum (MIN_NODES) and maximum (MAX_NODES) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and online prediction requests are evenly distributed across them.\n\n\nAuto Scaling: The online prediction requests are split across a scaleable number of compute instances.\n\nSet the minimum (MIN_NODES) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions.\n\nThe minimum number of compute instances corresponds to the field min_replica_count and the maximum number of compute instances corresponds to the field max_replica_count, in your subsequent deployment request.", "MIN_NODES = 1\nMAX_NODES = 1", "Deploy Model resource to the Endpoint resource\nUse this helper function deploy_model to deploy the model to the endpoint you created for serving predictions, with the following parameters:\n\nmodel: The Vertex fully qualified identifier of the Model resource to upload (deploy) from the training pipeline.\ndeploy_model_display_name: A human readable name for the deployed model.\nendpoint: The Vertex fully qualified Endpoint resource identifier to deploy the Model resource to.\n\nThe helper function calls the Endpoint client service's method deploy_model, which takes the following parameters:\n\nendpoint: The Vertex fully qualified Endpoint resource identifier to deploy the Model resource to to.\ndeployed_model: The requirements for deploying the model.\ntraffic_split: Percent of traffic at endpoint that goes to this model, which is specified as a dictioney of one or more key/value pairs.\nIf only one model, then specify as { \"0\": 100 }, where \"0\" refers to this model being uploaded and 100 means 100% of the traffic.\nIf there are existing models on the endpoint, for which the traffic will be split, then specify as, where model_id is the model id of an existing model to the deployed endpoint. The percents must add up to 100. { \"0\": percent, model_id: percent, ... }\n\n\n\nLet's now dive deeper into the deployed_model parameter. This parameter is specified as a Python dictionary with the minimum required fields:\n\nmodel: The Vertex fully qualified identifier of the (upload) Model resource to deploy.\ndisplay_name: A human readable name for the deployed model.\ndedicated_resources: This refers to how many compute instances (replicas) that are scaled for serving prediction requests.\nmachine_spec: The compute instance to provision. Use the variable you set earlier DEPLOY_GPU != None to use a GPU; otherwise only a CPU is allocated.\nmin_replica_count: The number of compute instances to initially provision, which you set earlier as the variable MIN_NODES.\nmax_replica_count: The maximum number of compute instances to scale to, which you set earlier as the variable MAX_NODES.\nenable_container_logging: This enables logging of container events, such as execution failures (default is container logging is disabled). Container logging is typically enabled when debugging the deployment and then disabled when deployed for production.\n\nTraffic Split\nLet's now dive deeper into the traffic_split parameter. This parameter is specified as a Python dictionary. This might at first be a tad bit confusing. Let me explain, you can deploy more than one instance of your model to an endpoint, and then set how much (percent) goes to each instance.\nWhy would you do that? Perhaps you already have a previous version deployed in production -- let's call that v1. You got better model evaluation on v2, but you don't know for certain that it is really better until you deploy to production. So in the case of traffic split, you might want to deploy v2 to the same endpoint as v1, but it only get's say 10% of the traffic. That way, you can monitor how well it does without disrupting the majority of users -- until you make a final decision.\nResponse\nThe method returns a long running operation response. We will wait sychronously for the operation to complete by calling the response.result(), which will block until the model is deployed. If this is the first time a model is deployed to the endpoint, it may take a few additional minutes to complete provisioning of resources.", "DEPLOYED_NAME = \"iris_deployed-\" + TIMESTAMP\n\n\ndef deploy_model(\n model, deployed_model_display_name, endpoint, traffic_split={\"0\": 100}\n):\n\n if DEPLOY_GPU:\n machine_spec = {\n \"machine_type\": DEPLOY_COMPUTE,\n \"accelerator_type\": DEPLOY_GPU,\n \"accelerator_count\": DEPLOY_NGPU,\n }\n else:\n machine_spec = {\n \"machine_type\": DEPLOY_COMPUTE,\n \"accelerator_count\": 0,\n }\n\n deployed_model = {\n \"model\": model,\n \"display_name\": deployed_model_display_name,\n \"dedicated_resources\": {\n \"min_replica_count\": MIN_NODES,\n \"max_replica_count\": MAX_NODES,\n \"machine_spec\": machine_spec,\n },\n \"enable_container_logging\": False,\n }\n\n response = clients[\"endpoint\"].deploy_model(\n endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split\n )\n\n print(\"Long running operation:\", response.operation.name)\n result = response.result()\n print(\"result\")\n deployed_model = result.deployed_model\n print(\" deployed_model\")\n print(\" id:\", deployed_model.id)\n print(\" model:\", deployed_model.model)\n print(\" display_name:\", deployed_model.display_name)\n print(\" create_time:\", deployed_model.create_time)\n\n return deployed_model.id\n\n\ndeployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id)", "Make a online prediction request with explainability\nNow do a online prediction with explainability to your deployed model. In this method, the predicted response will include an explanation on how the features contributed to the explanation.\nMake test item\nYou will use synthetic data as a test data item. Don't be concerned that we are using synthetic data -- we just want to demonstrate how to make a prediction.", "INSTANCE = {\n \"petal_length\": \"1.4\",\n \"petal_width\": \"1.3\",\n \"sepal_length\": \"5.1\",\n \"sepal_width\": \"2.8\",\n}", "Make a prediction with explanation\nOk, now you have a test item. Use this helper function explain_item, which takes the following parameters:\n\ndata_items: The test tabular data items.\nendpoint: The Vertex fully qualified identifier for the Endpoint resource where the Model resource was deployed.\nparameters_dict: Additional filtering parameters for serving prediction results -- in your case you will pass None.\n\nThis function uses the prediction client service and calls the explain method with the following parameters:\n\nendpoint: The Vertex fully qualified identifier for the Endpoint resource where the Model resource was deployed.\ninstances: A list of instances (data items) to predict.\nparameters: Additional filtering parameters for serving prediction results. Note, image segmentation models do not support additional parameters.\ndeployed_model_id: The Vertex fully qualified identifier for the deployed model, when more than one model is deployed at the endpoint. Otherwise, if only one model deployed, can be set to None.\n\nRequest\nThe format of each instance is:\n{ 'content': text_item }\n\nSince the explain() method can take multiple items (instances), you send your single test item as a list of one test item. As a final step, you package the instances list into Google's protobuf format -- which is what we pass to the explain() method.\nResponse\nThe response object returns a list, where each element in the list corresponds to the corresponding image in the request. You will see in the output for each prediction -- in this case there is just one:\n\ndeployed_model_id -- The Vertex fully qualified identifer for the Model resource that did the prediction/explanation.\npredictions -- The predicated class and confidence level between 0 and 1.\nconfidences: Confidence level in the prediction.\ndisplayNames: The predicted label.\nexplanations -- How each feature contributed to the prediction.", "def explain_item(\n data_items, endpoint, parameters_dict, deployed_model_id, silent=False\n):\n parameters = json_format.ParseDict(parameters_dict, Value())\n\n # The format of each instance should conform to the deployed model's prediction input schema.\n instances = [json_format.ParseDict(s, Value()) for s in data_items]\n\n response = clients[\"prediction\"].explain(\n endpoint=endpoint,\n instances=instances,\n parameters=parameters,\n deployed_model_id=deployed_model_id,\n )\n if silent:\n return response\n\n print(\"response\")\n print(\" deployed_model_id:\", response.deployed_model_id)\n try:\n predictions = response.predictions\n print(\"predictions\")\n for prediction in predictions:\n print(\" prediction:\", dict(prediction))\n except:\n pass\n\n explanations = response.explanations\n print(\"explanations\")\n for explanation in explanations:\n print(explanation)\n return response\n\n\nresponse = explain_item([INSTANCE], endpoint_id, None, None)", "Understanding the explanations response\nFirst, you will look what your model predicted and compare it to the actual value.", "import numpy as np\n\ntry:\n predictions = response.predictions\n label = np.argmax(predictions[0][\"scores\"])\n cls = predictions[0][\"classes\"][label]\n print(\"Predicted Value:\", cls, predictions[0][\"scores\"][label])\nexcept:\n pass", "Examine feature attributions\nNext you will look at the feature attributions for this particular example. Positive attribution values mean a particular feature pushed your model prediction up by that amount, and vice versa for negative attribution values.", "from tabulate import tabulate\n\nfeature_names = [\"petal_length\", \"petal_width\", \"sepal_length\", \"sepal_width\"]\nattributions = response.explanations[0].attributions[0].feature_attributions\n\nrows = []\nfor i, val in enumerate(feature_names):\n rows.append([val, INSTANCE[val], attributions[val]])\nprint(tabulate(rows, headers=[\"Feature name\", \"Feature value\", \"Attribution value\"]))", "Check your explanations and baselines\nTo better make sense of the feature attributions you're getting, you should compare them with your model's baseline. In most cases, the sum of your attribution values + the baseline should be very close to your model's predicted value for each input. Also note that for regression models, the baseline_score returned from AI Explanations will be the same for each example sent to your model. For classification models, each class will have its own baseline.\nIn this section you'll send 10 test examples to your model for prediction in order to compare the feature attributions with the baseline. Then you'll run each test example's attributions through a sanity check in the sanity_check_explanations method.\nGet explanations", "import random\n\n# Prepare 10 test examples to your model for prediction using a random distribution to generate\n# test instances\ninstances = []\nfor i in range(10):\n pl = str(random.uniform(1.0, 2.0))\n pw = str(random.uniform(1.0, 2.0))\n sl = str(random.uniform(4.0, 6.0))\n sw = str(random.uniform(2.0, 4.0))\n instances.append(\n {\"petal_length\": pl, \"petal_width\": pw, \"sepal_length\": sl, \"sepal_width\": sw}\n )\n\nresponse = explain_item(instances, endpoint_id, None, None, silent=True)", "Sanity check\nIn the function below you perform a sanity check on the explanations.", "def sanity_check_explanations(\n explanation, prediction, mean_tgt_value=None, variance_tgt_value=None\n):\n passed_test = 0\n total_test = 1\n # `attributions` is a dict where keys are the feature names\n # and values are the feature attributions for each feature\n baseline_score = explanation.attributions[0].baseline_output_value\n print(\"baseline:\", baseline_score)\n\n # Sanity check 1\n # The prediction at the input is equal to that at the baseline.\n # Please use a different baseline. Some suggestions are: random input, training\n # set mean.\n if abs(prediction - baseline_score) <= 0.05:\n print(\"Warning: example score and baseline score are too close.\")\n print(\"You might not get attributions.\")\n else:\n passed_test += 1\n print(\"Sanity Check 1: Passed\")\n\n print(passed_test, \" out of \", total_test, \" sanity checks passed.\")\n\n\ni = 0\nfor explanation in response.explanations:\n try:\n prediction = np.max(response.predictions[i][\"scores\"])\n except TypeError:\n prediction = np.max(response.predictions[i])\n sanity_check_explanations(explanation, prediction)\n i += 1", "Undeploy the Model resource\nNow undeploy your Model resource from the serving Endpoint resoure. Use this helper function undeploy_model, which takes the following parameters:\n\ndeployed_model_id: The model deployment identifier returned by the endpoint service when the Model resource was deployed to.\nendpoint: The Vertex fully qualified identifier for the Endpoint resource where the Model is deployed to.\n\nThis function calls the endpoint client service's method undeploy_model, with the following parameters:\n\ndeployed_model_id: The model deployment identifier returned by the endpoint service when the Model resource was deployed.\nendpoint: The Vertex fully qualified identifier for the Endpoint resource where the Model resource is deployed.\ntraffic_split: How to split traffic among the remaining deployed models on the Endpoint resource.\n\nSince this is the only deployed model on the Endpoint resource, you simply can leave traffic_split empty by setting it to {}.", "def undeploy_model(deployed_model_id, endpoint):\n response = clients[\"endpoint\"].undeploy_model(\n endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={}\n )\n print(response)\n\n\nundeploy_model(deployed_model_id, endpoint_id)", "Cleaning up\nTo clean up all GCP resources used in this project, you can delete the GCP\nproject you used for the tutorial.\nOtherwise, you can delete the individual resources you created in this tutorial:\n\nDataset\nPipeline\nModel\nEndpoint\nBatch Job\nCustom Job\nHyperparameter Tuning Job\nCloud Storage Bucket", "delete_dataset = True\ndelete_pipeline = True\ndelete_model = True\ndelete_endpoint = True\ndelete_batchjob = True\ndelete_customjob = True\ndelete_hptjob = True\ndelete_bucket = True\n\n# Delete the dataset using the Vertex fully qualified identifier for the dataset\ntry:\n if delete_dataset and \"dataset_id\" in globals():\n clients[\"dataset\"].delete_dataset(name=dataset_id)\nexcept Exception as e:\n print(e)\n\n# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline\ntry:\n if delete_pipeline and \"pipeline_id\" in globals():\n clients[\"pipeline\"].delete_training_pipeline(name=pipeline_id)\nexcept Exception as e:\n print(e)\n\n# Delete the model using the Vertex fully qualified identifier for the model\ntry:\n if delete_model and \"model_to_deploy_id\" in globals():\n clients[\"model\"].delete_model(name=model_to_deploy_id)\nexcept Exception as e:\n print(e)\n\n# Delete the endpoint using the Vertex fully qualified identifier for the endpoint\ntry:\n if delete_endpoint and \"endpoint_id\" in globals():\n clients[\"endpoint\"].delete_endpoint(name=endpoint_id)\nexcept Exception as e:\n print(e)\n\n# Delete the batch job using the Vertex fully qualified identifier for the batch job\ntry:\n if delete_batchjob and \"batch_job_id\" in globals():\n clients[\"job\"].delete_batch_prediction_job(name=batch_job_id)\nexcept Exception as e:\n print(e)\n\n# Delete the custom job using the Vertex fully qualified identifier for the custom job\ntry:\n if delete_customjob and \"job_id\" in globals():\n clients[\"job\"].delete_custom_job(name=job_id)\nexcept Exception as e:\n print(e)\n\n# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job\ntry:\n if delete_hptjob and \"hpt_job_id\" in globals():\n clients[\"job\"].delete_hyperparameter_tuning_job(name=hpt_job_id)\nexcept Exception as e:\n print(e)\n\nif delete_bucket and \"BUCKET_NAME\" in globals():\n ! gsutil rm -r $BUCKET_NAME" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
jerjorg/dft
tests/Example.ipynb
gpl-3.0
[ "Poisson Solver\nThis notebook demonstrates how the module crystal and planewave can be used in practice. It creates a convergence plot of the the total energy vs. sampling density, and plots the charge density.", "%load_ext autoreload\n%autoreload 2\n\nimport numpy as np\nfrom numpy.linalg import det\nfrom numpy.linalg import inv\nfrom numpy.fft import fftn, ifftn\nfrom itertools import product\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom pydft.crystal import *\nfrom pydft.bases.planewave import *\n\nsigma1 = 0.75\nsigma2 = 0.50\n\nlat_const = 6\nr = [lat_const,0,0]\nS = [16,16,16]\nR = makeR(r)\norigin = 1./2*np.sum(R,1)\ncrystal = Crystal(R,S)\ng(crystal)\n\n# Create vector of basis expansion coefficients for the electron density.\nn = -gauss_charge_dist(crystal._r,origin,sigma1) + (\n gauss_charge_dist(crystal._r,origin,sigma2))", "Plot the charge density on a plane lying along the x-y axis.", "# Plane refers to one of the pages of the n and r matrices.\nplane=0\n\nnpl = n[plane*S[1]*S[2]:(plane+1)*S[1]*S[2]]\nrpl = crystal._r[plane*S[1]*S[2]:(plane+1)*S[1]*S[2]]\nxpl = [rpl[i,0] for i in range(len(rpl))]\nypl = [rpl[i,1] for i in range(len(rpl))]\n\nfig = plt.figure()\nax = plt.gca(projection='3d')\nax.scatter(xpl,ypl,list(npl))\nplt.title(\"charge density distribution\")\nplt.show()\nplt.close()\n\nϕ=-4*np.pi*I(crystal, Linv(crystal, O(crystal, J(crystal, n))))\nUNumeric = 1./2*np.real(np.dot(J(crystal, ϕ),O(crystal, J(crystal, n))))\nUNumeric\n\n# Define the Gaussian broadening parameters\nsigma1 = 0.75\nsigma2 = 0.50\n\n# Find the analytic solution.\nU_analytic = ((1/sigma1 + 1/sigma2)/2 - np.sqrt(2)/\n np.sqrt(sigma1**2 + sigma2**2))/np.sqrt(np.pi)\nUerrors = []\nss = range(8,15)\nfor si in ss:\n # Create integer, sampling matrices.\n S = [si,si,si]\n\n # Define Coordinates.\n lat_const = 6\n r = [lat_const,0,0]\n R = makeR(r)\n origin = 1./2*np.sum(R,1)\n \n # Create the crystal\n crystal = Crystal(R, S)\n g(crystal)\n \n # Create vector of basis expansion coefficients for the electron density.\n n = -gauss_charge_dist(crystal._r, origin,sigma1) + (\n gauss_charge_dist(crystal._r,origin,sigma2))\n\n ϕ = -4*np.pi*I(crystal, Linv(crystal, O(crystal, J(crystal, n))))\n U_numeric = 1./2*np.real(np.dot(J(crystal, ϕ),O(crystal, J(crystal, n))))\n\n\n Uerrors.append(abs(U_numeric-U_analytic))\n del crystal\nplt.semilogy(ss, Uerrors)\nplt.xlabel(\"Sampling points per edge\")\nplt.ylabel(\"Error\")\nplt.show()\nplt.close()", "Ewald Sum", "from pydft.crystal import *\nfrom pydft.energies.ewald import *\n\nimport numpy as np\n\n# We should get something close to what Arias got with similar parameters.\nLC = 6\nr = [LC,0,0]\nR = makeR(r)\nS = [15,15,15]\ncrystal = Crystal(R, S, LC)\ncrystal._X = [[0., 0., 0.]]\nenergy = ewald_energy(crystal)\nprint(energy)", "In the assignment Arias got -1./3 so this is fairly close", "error = abs(energy - (-1./3))\nprint(error)", "Energy from Solving Schrodinger", "from pydft.energies.schrodinger import *\nfrom pydft.crystal import *\n\nLC = 1.\nR = makeR([LC, 0., 0.])\nS = [2,2,2]\ncrystal = Crystal(R, S, LC)\nbasis = \"planewave\"\npotential = \"sho\"\nw = 1.\nE = getE(crystal, basis, potential, [w])\n\nE", "I got the expected value of practically 0 for the imaginary part." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
silburt/rebound2
ipython_examples/Forces.ipynb
gpl-3.0
[ "Additional forces\nREBOUND is a gravitational N-body integrator. But you can also use it to integrate systems with additional, non-gravitational forces.\nThis tutorial gives you a very quick overview of how that works. Implementing additional forces in python as below will typically be a factor of a few slower than a C implementation. For a library that has C implementations for several commonly used additional effects (with everything callable from Python), see REBOUNDx.\nStark problem\nWe'll start be adding two particles, the Sun and an Earth-like planet to REBOUND.", "import rebound\nsim = rebound.Simulation()\nsim.integrator = \"whfast\"\nsim.add(m=1.)\nsim.add(m=1e-6,a=1.)\nsim.move_to_com() # Moves to the center of momentum frame", "We could integrate this system and the planet would go around the star at a fixed orbit with $a=1$ forever. Let's add an additional constant force that acting on the planet and is pointing in one direction $F_x = m\\cdot c$, where $m$ is the planet's mass and $c$ a constant. This is called the Stark problem. In python we can describe this with the following function", "ps = sim.particles\nc = 0.01\ndef starkForce(reb_sim):\n ps[1].ax += c", "Next, we need to tell REBOUND about this function.", "sim.additional_forces = starkForce", "Now we can just integrate as usual. Let's keep track of the eccentricity as we integrate as it will change due to the additional force.", "import numpy as np\nNout = 1000\nes = np.zeros(Nout)\ntimes = np.linspace(0.,100.*2.*np.pi,Nout)\nfor i, time in enumerate(times):\n sim.integrate(time, exact_finish_time=0) # integrate to the nearest timestep so WHFast's timestep stays constant\n es[i] = sim.particles[1].e ", "And let's plot the result.", "%matplotlib inline\nimport matplotlib.pyplot as plt\nfig = plt.figure(figsize=(15,5))\nax = plt.subplot(111)\nplt.plot(times, es);", "You can see that the eccentricity is oscillating between 0 and almost 1. \nNon-conservative forces\nThe previous example assumed a conservative force, i.e. we could describe it as a potential as it is velocity independent. Now, let's assume we have a velocity dependent force. This could be a migration force in a protoplanetary disk or PR drag. We'll start from scratch and add the same two particles as before.", "sim = rebound.Simulation()\nsim.integrator = \"ias15\"\nsim.add(m=1.)\nsim.add(m=1e-6,a=1.)\nsim.move_to_com() # Moves to the center of momentum frame", "But we change the additional force to be", "ps = sim.particles\ntau = 1000.\ndef migrationForce(reb_sim):\n ps[1].ax -= ps[1].vx/tau\n ps[1].ay -= ps[1].vy/tau\n ps[1].az -= ps[1].vz/tau", "We need to let REBOUND know that our force is velocity dependent. Otherwise, REBOUND will not update the velocities of the particles.", "sim.additional_forces = migrationForce\nsim.force_is_velocity_dependent = 1", "Now, we integrate as before. But this time we keep track of the semi-major axis instead of the eccentricity.", "Nout = 1000\na_s = np.zeros(Nout)\ntimes = np.linspace(0.,100.*2.*np.pi,Nout)\nfor i, time in enumerate(times):\n sim.integrate(time)\n a_s[i] = sim.particles[1].a \nfig = plt.figure(figsize=(15,5))\nax = plt.subplot(111)\nax.set_xlabel(\"time\")\nax.set_ylabel(\"semi-major axis\")\nplt.plot(times, a_s);", "The semi-major axis decaus exponentially on a timescale tau.\nIn the above example, REBOUND is calling a python function at every timestep. This can be slow. Note that you can also set rebound.additional_forces to a c function pointer. This let's you speed up the simulation significantly. However, you need to write you own c function/library that knows how to calculate the forces. Or, you use Dan Tamayo's new migration library (in preparation)." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
tensorflow/docs-l10n
site/en-snapshot/tfx/tutorials/tfx/components.ipynb
apache-2.0
[ "Copyright 2021 The TensorFlow Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "TFX Estimator Component Tutorial\nA Component-by-Component Introduction to TensorFlow Extended (TFX)\nNote: We recommend running this tutorial in a Colab notebook, with no setup required! Just click \"Run in Google Colab\".\n<div class=\"devsite-table-wrapper\"><table class=\"tfo-notebook-buttons\" align=\"left\">\n<td><a target=\"_blank\" href=\"https://www.tensorflow.org/tfx/tutorials/tfx/components\">\n<img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a></td>\n<td><a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tfx/blob/master/docs/tutorials/tfx/components.ipynb\">\n<img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\">Run in Google Colab</a></td>\n<td><a target=\"_blank\" href=\"https://github.com/tensorflow/tfx/tree/master/docs/tutorials/tfx/components.ipynb\">\n<img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\">View source on GitHub</a></td>\n<td><a target=\"_blank\" href=\"https://storage.googleapis.com/tensorflow_docs/tfx/docs/tutorials/tfx/components.ipynb\">\n<img width=32px src=\"https://www.tensorflow.org/images/download_logo_32px.png\">Download notebook</a></td>\n</table></div>\n\n\nWarning: Estimators are not recommended for new code. Estimators run v1.Session-style code which is more difficult to write correctly, and can behave unexpectedly, especially when combined with TF 2 code. Estimators do fall under our compatibility guarantees, but will receive no fixes other than security vulnerabilities. See the migration guide for details.\n\nThis Colab-based tutorial will interactively walk through each built-in component of TensorFlow Extended (TFX).\nIt covers every step in an end-to-end machine learning pipeline, from data ingestion to pushing a model to serving.\nWhen you're done, the contents of this notebook can be automatically exported as TFX pipeline source code, which you can orchestrate with Apache Airflow and Apache Beam.\nNote: This notebook and its associated APIs are experimental and are\nin active development. Major changes in functionality, behavior, and\npresentation are expected.\nBackground\nThis notebook demonstrates how to use TFX in a Jupyter/Colab environment. Here, we walk through the Chicago Taxi example in an interactive notebook.\nWorking in an interactive notebook is a useful way to become familiar with the structure of a TFX pipeline. It's also useful when doing development of your own pipelines as a lightweight development environment, but you should be aware that there are differences in the way interactive notebooks are orchestrated, and how they access metadata artifacts.\nOrchestration\nIn a production deployment of TFX, you will use an orchestrator such as Apache Airflow, Kubeflow Pipelines, or Apache Beam to orchestrate a pre-defined pipeline graph of TFX components. In an interactive notebook, the notebook itself is the orchestrator, running each TFX component as you execute the notebook cells.\nMetadata\nIn a production deployment of TFX, you will access metadata through the ML Metadata (MLMD) API. MLMD stores metadata properties in a database such as MySQL or SQLite, and stores the metadata payloads in a persistent store such as on your filesystem. In an interactive notebook, both properties and payloads are stored in an ephemeral SQLite database in the /tmp directory on the Jupyter notebook or Colab server.\nSetup\nFirst, we install and import the necessary packages, set up paths, and download data.\nUpgrade Pip\nTo avoid upgrading Pip in a system when running locally, check to make sure that we're running in Colab. Local systems can of course be upgraded separately.", "try:\n import colab\n !pip install --upgrade pip\nexcept:\n pass", "Install TFX\nNote: In Google Colab, because of package updates, the first time you run this cell you must restart the runtime (Runtime > Restart runtime ...).", "!pip install -U tfx", "Did you restart the runtime?\nIf you are using Google Colab, the first time that you run the cell above, you must restart the runtime (Runtime > Restart runtime ...). This is because of the way that Colab loads packages.\nImport packages\nWe import necessary packages, including standard TFX component classes.", "import os\nimport pprint\nimport tempfile\nimport urllib\n\nimport absl\nimport tensorflow as tf\nimport tensorflow_model_analysis as tfma\ntf.get_logger().propagate = False\npp = pprint.PrettyPrinter()\n\nfrom tfx import v1 as tfx\nfrom tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext\n\n%load_ext tfx.orchestration.experimental.interactive.notebook_extensions.skip", "Let's check the library versions.", "print('TensorFlow version: {}'.format(tf.__version__))\nprint('TFX version: {}'.format(tfx.__version__))", "Set up pipeline paths", "# This is the root directory for your TFX pip package installation.\n_tfx_root = tfx.__path__[0]\n\n# This is the directory containing the TFX Chicago Taxi Pipeline example.\n_taxi_root = os.path.join(_tfx_root, 'examples/chicago_taxi_pipeline')\n\n# This is the path where your model will be pushed for serving.\n_serving_model_dir = os.path.join(\n tempfile.mkdtemp(), 'serving_model/taxi_simple')\n\n# Set up logging.\nabsl.logging.set_verbosity(absl.logging.INFO)", "Download example data\nWe download the example dataset for use in our TFX pipeline.\nThe dataset we're using is the Taxi Trips dataset released by the City of Chicago. The columns in this dataset are:\n<table>\n<tr><td>pickup_community_area</td><td>fare</td><td>trip_start_month</td></tr>\n<tr><td>trip_start_hour</td><td>trip_start_day</td><td>trip_start_timestamp</td></tr>\n<tr><td>pickup_latitude</td><td>pickup_longitude</td><td>dropoff_latitude</td></tr>\n<tr><td>dropoff_longitude</td><td>trip_miles</td><td>pickup_census_tract</td></tr>\n<tr><td>dropoff_census_tract</td><td>payment_type</td><td>company</td></tr>\n<tr><td>trip_seconds</td><td>dropoff_community_area</td><td>tips</td></tr>\n</table>\n\nWith this dataset, we will build a model that predicts the tips of a trip.", "_data_root = tempfile.mkdtemp(prefix='tfx-data')\nDATA_PATH = 'https://raw.githubusercontent.com/tensorflow/tfx/master/tfx/examples/chicago_taxi_pipeline/data/simple/data.csv'\n_data_filepath = os.path.join(_data_root, \"data.csv\")\nurllib.request.urlretrieve(DATA_PATH, _data_filepath)", "Take a quick look at the CSV file.", "!head {_data_filepath}", "Disclaimer: This site provides applications using data that has been modified for use from its original source, www.cityofchicago.org, the official website of the City of Chicago. The City of Chicago makes no claims as to the content, accuracy, timeliness, or completeness of any of the data provided at this site. The data provided at this site is subject to change at any time. It is understood that the data provided at this site is being used at one’s own risk.\nCreate the InteractiveContext\nLast, we create an InteractiveContext, which will allow us to run TFX components interactively in this notebook.", "# Here, we create an InteractiveContext using default parameters. This will\n# use a temporary directory with an ephemeral ML Metadata database instance.\n# To use your own pipeline root or database, the optional properties\n# `pipeline_root` and `metadata_connection_config` may be passed to\n# InteractiveContext. Calls to InteractiveContext are no-ops outside of the\n# notebook.\ncontext = InteractiveContext()", "Run TFX components interactively\nIn the cells that follow, we create TFX components one-by-one, run each of them, and visualize their output artifacts.\nExampleGen\nThe ExampleGen component is usually at the start of a TFX pipeline. It will:\n\nSplit data into training and evaluation sets (by default, 2/3 training + 1/3 eval)\nConvert data into the tf.Example format (learn more here)\nCopy data into the _tfx_root directory for other components to access\n\nExampleGen takes as input the path to your data source. In our case, this is the _data_root path that contains the downloaded CSV.\nNote: In this notebook, we can instantiate components one-by-one and run them with InteractiveContext.run(). By contrast, in a production setting, we would specify all the components upfront in a Pipeline to pass to the orchestrator (see the Building a TFX Pipeline Guide).", "example_gen = tfx.components.CsvExampleGen(input_base=_data_root)\ncontext.run(example_gen)", "Let's examine the output artifacts of ExampleGen. This component produces two artifacts, training examples and evaluation examples:", "artifact = example_gen.outputs['examples'].get()[0]\nprint(artifact.split_names, artifact.uri)", "We can also take a look at the first three training examples:", "# Get the URI of the output artifact representing the training examples, which is a directory\ntrain_uri = os.path.join(example_gen.outputs['examples'].get()[0].uri, 'Split-train')\n\n# Get the list of files in this directory (all compressed TFRecord files)\ntfrecord_filenames = [os.path.join(train_uri, name)\n for name in os.listdir(train_uri)]\n\n# Create a `TFRecordDataset` to read these files\ndataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type=\"GZIP\")\n\n# Iterate over the first 3 records and decode them.\nfor tfrecord in dataset.take(3):\n serialized_example = tfrecord.numpy()\n example = tf.train.Example()\n example.ParseFromString(serialized_example)\n pp.pprint(example)", "Now that ExampleGen has finished ingesting the data, the next step is data analysis.\nStatisticsGen\nThe StatisticsGen component computes statistics over your dataset for data analysis, as well as for use in downstream components. It uses the TensorFlow Data Validation library.\nStatisticsGen takes as input the dataset we just ingested using ExampleGen.", "statistics_gen = tfx.components.StatisticsGen(examples=example_gen.outputs['examples'])\ncontext.run(statistics_gen)", "After StatisticsGen finishes running, we can visualize the outputted statistics. Try playing with the different plots!", "context.show(statistics_gen.outputs['statistics'])", "SchemaGen\nThe SchemaGen component generates a schema based on your data statistics. (A schema defines the expected bounds, types, and properties of the features in your dataset.) It also uses the TensorFlow Data Validation library.\nSchemaGen will take as input the statistics that we generated with StatisticsGen, looking at the training split by default.", "schema_gen = tfx.components.SchemaGen(\n statistics=statistics_gen.outputs['statistics'],\n infer_feature_shape=False)\ncontext.run(schema_gen)", "After SchemaGen finishes running, we can visualize the generated schema as a table.", "context.show(schema_gen.outputs['schema'])", "Each feature in your dataset shows up as a row in the schema table, alongside its properties. The schema also captures all the values that a categorical feature takes on, denoted as its domain.\nTo learn more about schemas, see the SchemaGen documentation.\nExampleValidator\nThe ExampleValidator component detects anomalies in your data, based on the expectations defined by the schema. It also uses the TensorFlow Data Validation library.\nExampleValidator will take as input the statistics from StatisticsGen, and the schema from SchemaGen.", "example_validator = tfx.components.ExampleValidator(\n statistics=statistics_gen.outputs['statistics'],\n schema=schema_gen.outputs['schema'])\ncontext.run(example_validator)", "After ExampleValidator finishes running, we can visualize the anomalies as a table.", "context.show(example_validator.outputs['anomalies'])", "In the anomalies table, we can see that there are no anomalies. This is what we'd expect, since this the first dataset that we've analyzed and the schema is tailored to it. You should review this schema -- anything unexpected means an anomaly in the data. Once reviewed, the schema can be used to guard future data, and anomalies produced here can be used to debug model performance, understand how your data evolves over time, and identify data errors.\nTransform\nThe Transform component performs feature engineering for both training and serving. It uses the TensorFlow Transform library.\nTransform will take as input the data from ExampleGen, the schema from SchemaGen, as well as a module that contains user-defined Transform code.\nLet's see an example of user-defined Transform code below (for an introduction to the TensorFlow Transform APIs, see the tutorial). First, we define a few constants for feature engineering:\nNote: The %%writefile cell magic will save the contents of the cell as a .py file on disk. This allows the Transform component to load your code as a module.", "_taxi_constants_module_file = 'taxi_constants.py'\n\n%%writefile {_taxi_constants_module_file}\n\n# Categorical features are assumed to each have a maximum value in the dataset.\nMAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]\n\nCATEGORICAL_FEATURE_KEYS = [\n 'trip_start_hour', 'trip_start_day', 'trip_start_month',\n 'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',\n 'dropoff_community_area'\n]\n\nDENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']\n\n# Number of buckets used by tf.transform for encoding each feature.\nFEATURE_BUCKET_COUNT = 10\n\nBUCKET_FEATURE_KEYS = [\n 'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',\n 'dropoff_longitude'\n]\n\n# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform\nVOCAB_SIZE = 1000\n\n# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.\nOOV_SIZE = 10\n\nVOCAB_FEATURE_KEYS = [\n 'payment_type',\n 'company',\n]\n\n# Keys\nLABEL_KEY = 'tips'\nFARE_KEY = 'fare'", "Next, we write a preprocessing_fn that takes in raw data as input, and returns transformed features that our model can train on:", "_taxi_transform_module_file = 'taxi_transform.py'\n\n%%writefile {_taxi_transform_module_file}\n\nimport tensorflow as tf\nimport tensorflow_transform as tft\n\nimport taxi_constants\n\n_DENSE_FLOAT_FEATURE_KEYS = taxi_constants.DENSE_FLOAT_FEATURE_KEYS\n_VOCAB_FEATURE_KEYS = taxi_constants.VOCAB_FEATURE_KEYS\n_VOCAB_SIZE = taxi_constants.VOCAB_SIZE\n_OOV_SIZE = taxi_constants.OOV_SIZE\n_FEATURE_BUCKET_COUNT = taxi_constants.FEATURE_BUCKET_COUNT\n_BUCKET_FEATURE_KEYS = taxi_constants.BUCKET_FEATURE_KEYS\n_CATEGORICAL_FEATURE_KEYS = taxi_constants.CATEGORICAL_FEATURE_KEYS\n_FARE_KEY = taxi_constants.FARE_KEY\n_LABEL_KEY = taxi_constants.LABEL_KEY\n\n\ndef preprocessing_fn(inputs):\n \"\"\"tf.transform's callback function for preprocessing inputs.\n Args:\n inputs: map from feature keys to raw not-yet-transformed features.\n Returns:\n Map from string feature key to transformed feature operations.\n \"\"\"\n outputs = {}\n for key in _DENSE_FLOAT_FEATURE_KEYS:\n # If sparse make it dense, setting nan's to 0 or '', and apply zscore.\n outputs[key] = tft.scale_to_z_score(\n _fill_in_missing(inputs[key]))\n\n for key in _VOCAB_FEATURE_KEYS:\n # Build a vocabulary for this feature.\n outputs[key] = tft.compute_and_apply_vocabulary(\n _fill_in_missing(inputs[key]),\n top_k=_VOCAB_SIZE,\n num_oov_buckets=_OOV_SIZE)\n\n for key in _BUCKET_FEATURE_KEYS:\n outputs[key] = tft.bucketize(\n _fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT)\n\n for key in _CATEGORICAL_FEATURE_KEYS:\n outputs[key] = _fill_in_missing(inputs[key])\n\n # Was this passenger a big tipper?\n taxi_fare = _fill_in_missing(inputs[_FARE_KEY])\n tips = _fill_in_missing(inputs[_LABEL_KEY])\n outputs[_LABEL_KEY] = tf.where(\n tf.math.is_nan(taxi_fare),\n tf.cast(tf.zeros_like(taxi_fare), tf.int64),\n # Test if the tip was > 20% of the fare.\n tf.cast(\n tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))\n\n return outputs\n\n\ndef _fill_in_missing(x):\n \"\"\"Replace missing values in a SparseTensor.\n Fills in missing values of `x` with '' or 0, and converts to a dense tensor.\n Args:\n x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1\n in the second dimension.\n Returns:\n A rank 1 tensor where missing values of `x` have been filled in.\n \"\"\"\n if not isinstance(x, tf.sparse.SparseTensor):\n return x\n\n default_value = '' if x.dtype == tf.string else 0\n return tf.squeeze(\n tf.sparse.to_dense(\n tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),\n default_value),\n axis=1)", "Now, we pass in this feature engineering code to the Transform component and run it to transform your data.", "transform = tfx.components.Transform(\n examples=example_gen.outputs['examples'],\n schema=schema_gen.outputs['schema'],\n module_file=os.path.abspath(_taxi_transform_module_file))\ncontext.run(transform)", "Let's examine the output artifacts of Transform. This component produces two types of outputs:\n\ntransform_graph is the graph that can perform the preprocessing operations (this graph will be included in the serving and evaluation models).\ntransformed_examples represents the preprocessed training and evaluation data.", "transform.outputs", "Take a peek at the transform_graph artifact. It points to a directory containing three subdirectories.", "train_uri = transform.outputs['transform_graph'].get()[0].uri\nos.listdir(train_uri)", "The transformed_metadata subdirectory contains the schema of the preprocessed data. The transform_fn subdirectory contains the actual preprocessing graph. The metadata subdirectory contains the schema of the original data.\nWe can also take a look at the first three transformed examples:", "# Get the URI of the output artifact representing the transformed examples, which is a directory\ntrain_uri = os.path.join(transform.outputs['transformed_examples'].get()[0].uri, 'Split-train')\n\n# Get the list of files in this directory (all compressed TFRecord files)\ntfrecord_filenames = [os.path.join(train_uri, name)\n for name in os.listdir(train_uri)]\n\n# Create a `TFRecordDataset` to read these files\ndataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type=\"GZIP\")\n\n# Iterate over the first 3 records and decode them.\nfor tfrecord in dataset.take(3):\n serialized_example = tfrecord.numpy()\n example = tf.train.Example()\n example.ParseFromString(serialized_example)\n pp.pprint(example)", "After the Transform component has transformed your data into features, and the next step is to train a model.\nTrainer\nThe Trainer component will train a model that you define in TensorFlow (either using the Estimator API or the Keras API with model_to_estimator).\nTrainer takes as input the schema from SchemaGen, the transformed data and graph from Transform, training parameters, as well as a module that contains user-defined model code.\nLet's see an example of user-defined model code below (for an introduction to the TensorFlow Estimator APIs, see the tutorial):", "_taxi_trainer_module_file = 'taxi_trainer.py'\n\n%%writefile {_taxi_trainer_module_file}\n\nimport tensorflow as tf\nimport tensorflow_model_analysis as tfma\nimport tensorflow_transform as tft\nfrom tensorflow_transform.tf_metadata import schema_utils\nfrom tfx_bsl.tfxio import dataset_options\n\nimport taxi_constants\n\n_DENSE_FLOAT_FEATURE_KEYS = taxi_constants.DENSE_FLOAT_FEATURE_KEYS\n_VOCAB_FEATURE_KEYS = taxi_constants.VOCAB_FEATURE_KEYS\n_VOCAB_SIZE = taxi_constants.VOCAB_SIZE\n_OOV_SIZE = taxi_constants.OOV_SIZE\n_FEATURE_BUCKET_COUNT = taxi_constants.FEATURE_BUCKET_COUNT\n_BUCKET_FEATURE_KEYS = taxi_constants.BUCKET_FEATURE_KEYS\n_CATEGORICAL_FEATURE_KEYS = taxi_constants.CATEGORICAL_FEATURE_KEYS\n_MAX_CATEGORICAL_FEATURE_VALUES = taxi_constants.MAX_CATEGORICAL_FEATURE_VALUES\n_LABEL_KEY = taxi_constants.LABEL_KEY\n\n\n# Tf.Transform considers these features as \"raw\"\ndef _get_raw_feature_spec(schema):\n return schema_utils.schema_as_feature_spec(schema).feature_spec\n\n\ndef _build_estimator(config, hidden_units=None, warm_start_from=None):\n \"\"\"Build an estimator for predicting the tipping behavior of taxi riders.\n Args:\n config: tf.estimator.RunConfig defining the runtime environment for the\n estimator (including model_dir).\n hidden_units: [int], the layer sizes of the DNN (input layer first)\n warm_start_from: Optional directory to warm start from.\n Returns:\n A dict of the following:\n - estimator: The estimator that will be used for training and eval.\n - train_spec: Spec for training.\n - eval_spec: Spec for eval.\n - eval_input_receiver_fn: Input function for eval.\n \"\"\"\n real_valued_columns = [\n tf.feature_column.numeric_column(key, shape=())\n for key in _DENSE_FLOAT_FEATURE_KEYS\n ]\n categorical_columns = [\n tf.feature_column.categorical_column_with_identity(\n key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)\n for key in _VOCAB_FEATURE_KEYS\n ]\n categorical_columns += [\n tf.feature_column.categorical_column_with_identity(\n key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)\n for key in _BUCKET_FEATURE_KEYS\n ]\n categorical_columns += [\n tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension\n key,\n num_buckets=num_buckets,\n default_value=0) for key, num_buckets in zip(\n _CATEGORICAL_FEATURE_KEYS,\n _MAX_CATEGORICAL_FEATURE_VALUES)\n ]\n return tf.estimator.DNNLinearCombinedClassifier(\n config=config,\n linear_feature_columns=categorical_columns,\n dnn_feature_columns=real_valued_columns,\n dnn_hidden_units=hidden_units or [100, 70, 50, 25],\n warm_start_from=warm_start_from)\n\n\ndef _example_serving_receiver_fn(tf_transform_graph, schema):\n \"\"\"Build the serving in inputs.\n Args:\n tf_transform_graph: A TFTransformOutput.\n schema: the schema of the input data.\n Returns:\n Tensorflow graph which parses examples, applying tf-transform to them.\n \"\"\"\n raw_feature_spec = _get_raw_feature_spec(schema)\n raw_feature_spec.pop(_LABEL_KEY)\n\n raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(\n raw_feature_spec, default_batch_size=None)\n serving_input_receiver = raw_input_fn()\n\n transformed_features = tf_transform_graph.transform_raw_features(\n serving_input_receiver.features)\n\n return tf.estimator.export.ServingInputReceiver(\n transformed_features, serving_input_receiver.receiver_tensors)\n\n\ndef _eval_input_receiver_fn(tf_transform_graph, schema):\n \"\"\"Build everything needed for the tf-model-analysis to run the model.\n Args:\n tf_transform_graph: A TFTransformOutput.\n schema: the schema of the input data.\n Returns:\n EvalInputReceiver function, which contains:\n - Tensorflow graph which parses raw untransformed features, applies the\n tf-transform preprocessing operators.\n - Set of raw, untransformed features.\n - Label against which predictions will be compared.\n \"\"\"\n # Notice that the inputs are raw features, not transformed features here.\n raw_feature_spec = _get_raw_feature_spec(schema)\n\n serialized_tf_example = tf.compat.v1.placeholder(\n dtype=tf.string, shape=[None], name='input_example_tensor')\n\n # Add a parse_example operator to the tensorflow graph, which will parse\n # raw, untransformed, tf examples.\n features = tf.io.parse_example(serialized_tf_example, raw_feature_spec)\n\n # Now that we have our raw examples, process them through the tf-transform\n # function computed during the preprocessing step.\n transformed_features = tf_transform_graph.transform_raw_features(\n features)\n\n # The key name MUST be 'examples'.\n receiver_tensors = {'examples': serialized_tf_example}\n\n # NOTE: Model is driven by transformed features (since training works on the\n # materialized output of TFT, but slicing will happen on raw features.\n features.update(transformed_features)\n\n return tfma.export.EvalInputReceiver(\n features=features,\n receiver_tensors=receiver_tensors,\n labels=transformed_features[_LABEL_KEY])\n\n\ndef _input_fn(file_pattern, data_accessor, tf_transform_output, batch_size=200):\n \"\"\"Generates features and label for tuning/training.\n\n Args:\n file_pattern: List of paths or patterns of input tfrecord files.\n data_accessor: DataAccessor for converting input to RecordBatch.\n tf_transform_output: A TFTransformOutput.\n batch_size: representing the number of consecutive elements of returned\n dataset to combine in a single batch\n\n Returns:\n A dataset that contains (features, indices) tuple where features is a\n dictionary of Tensors, and indices is a single Tensor of label indices.\n \"\"\"\n return data_accessor.tf_dataset_factory(\n file_pattern,\n dataset_options.TensorFlowDatasetOptions(\n batch_size=batch_size, label_key=_LABEL_KEY),\n tf_transform_output.transformed_metadata.schema)\n\n\n# TFX will call this function\ndef trainer_fn(trainer_fn_args, schema):\n \"\"\"Build the estimator using the high level API.\n Args:\n trainer_fn_args: Holds args used to train the model as name/value pairs.\n schema: Holds the schema of the training examples.\n Returns:\n A dict of the following:\n - estimator: The estimator that will be used for training and eval.\n - train_spec: Spec for training.\n - eval_spec: Spec for eval.\n - eval_input_receiver_fn: Input function for eval.\n \"\"\"\n # Number of nodes in the first layer of the DNN\n first_dnn_layer_size = 100\n num_dnn_layers = 4\n dnn_decay_factor = 0.7\n\n train_batch_size = 40\n eval_batch_size = 40\n\n tf_transform_graph = tft.TFTransformOutput(trainer_fn_args.transform_output)\n\n train_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda\n trainer_fn_args.train_files,\n trainer_fn_args.data_accessor,\n tf_transform_graph,\n batch_size=train_batch_size)\n\n eval_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda\n trainer_fn_args.eval_files,\n trainer_fn_args.data_accessor,\n tf_transform_graph,\n batch_size=eval_batch_size)\n\n train_spec = tf.estimator.TrainSpec( # pylint: disable=g-long-lambda\n train_input_fn,\n max_steps=trainer_fn_args.train_steps)\n\n serving_receiver_fn = lambda: _example_serving_receiver_fn( # pylint: disable=g-long-lambda\n tf_transform_graph, schema)\n\n exporter = tf.estimator.FinalExporter('chicago-taxi', serving_receiver_fn)\n eval_spec = tf.estimator.EvalSpec(\n eval_input_fn,\n steps=trainer_fn_args.eval_steps,\n exporters=[exporter],\n name='chicago-taxi-eval')\n\n run_config = tf.estimator.RunConfig(\n save_checkpoints_steps=999, keep_checkpoint_max=1)\n\n run_config = run_config.replace(model_dir=trainer_fn_args.serving_model_dir)\n\n estimator = _build_estimator(\n # Construct layers sizes with exponetial decay\n hidden_units=[\n max(2, int(first_dnn_layer_size * dnn_decay_factor**i))\n for i in range(num_dnn_layers)\n ],\n config=run_config,\n warm_start_from=trainer_fn_args.base_model)\n\n # Create an input receiver for TFMA processing\n receiver_fn = lambda: _eval_input_receiver_fn( # pylint: disable=g-long-lambda\n tf_transform_graph, schema)\n\n return {\n 'estimator': estimator,\n 'train_spec': train_spec,\n 'eval_spec': eval_spec,\n 'eval_input_receiver_fn': receiver_fn\n }", "Now, we pass in this model code to the Trainer component and run it to train the model.", "from tfx.components.trainer.executor import Executor\nfrom tfx.dsl.components.base import executor_spec\n\ntrainer = tfx.components.Trainer(\n module_file=os.path.abspath(_taxi_trainer_module_file),\n custom_executor_spec=executor_spec.ExecutorClassSpec(Executor),\n examples=transform.outputs['transformed_examples'],\n schema=schema_gen.outputs['schema'],\n transform_graph=transform.outputs['transform_graph'],\n train_args=tfx.proto.TrainArgs(num_steps=10000),\n eval_args=tfx.proto.EvalArgs(num_steps=5000))\ncontext.run(trainer)", "Analyze Training with TensorBoard\nOptionally, we can connect TensorBoard to the Trainer to analyze our model's training curves.", "# Get the URI of the output artifact representing the training logs, which is a directory\nmodel_run_dir = trainer.outputs['model_run'].get()[0].uri\n\n%load_ext tensorboard\n%tensorboard --logdir {model_run_dir}", "Evaluator\nThe Evaluator component computes model performance metrics over the evaluation set. It uses the TensorFlow Model Analysis library. The Evaluator can also optionally validate that a newly trained model is better than the previous model. This is useful in a production pipeline setting where you may automatically train and validate a model every day. In this notebook, we only train one model, so the Evaluator automatically will label the model as \"good\". \nEvaluator will take as input the data from ExampleGen, the trained model from Trainer, and slicing configuration. The slicing configuration allows you to slice your metrics on feature values (e.g. how does your model perform on taxi trips that start at 8am versus 8pm?). See an example of this configuration below:", "eval_config = tfma.EvalConfig(\n model_specs=[\n # Using signature 'eval' implies the use of an EvalSavedModel. To use\n # a serving model remove the signature to defaults to 'serving_default'\n # and add a label_key.\n tfma.ModelSpec(signature_name='eval')\n ],\n metrics_specs=[\n tfma.MetricsSpec(\n # The metrics added here are in addition to those saved with the\n # model (assuming either a keras model or EvalSavedModel is used).\n # Any metrics added into the saved model (for example using\n # model.compile(..., metrics=[...]), etc) will be computed\n # automatically.\n metrics=[\n tfma.MetricConfig(class_name='ExampleCount')\n ],\n # To add validation thresholds for metrics saved with the model,\n # add them keyed by metric name to the thresholds map.\n thresholds = {\n 'accuracy': tfma.MetricThreshold(\n value_threshold=tfma.GenericValueThreshold(\n lower_bound={'value': 0.5}),\n # Change threshold will be ignored if there is no\n # baseline model resolved from MLMD (first run).\n change_threshold=tfma.GenericChangeThreshold(\n direction=tfma.MetricDirection.HIGHER_IS_BETTER,\n absolute={'value': -1e-10}))\n }\n )\n ],\n slicing_specs=[\n # An empty slice spec means the overall slice, i.e. the whole dataset.\n tfma.SlicingSpec(),\n # Data can be sliced along a feature column. In this case, data is\n # sliced along feature column trip_start_hour.\n tfma.SlicingSpec(feature_keys=['trip_start_hour'])\n ])", "Next, we give this configuration to Evaluator and run it.", "# Use TFMA to compute a evaluation statistics over features of a model and\n# validate them against a baseline.\n\n# The model resolver is only required if performing model validation in addition\n# to evaluation. In this case we validate against the latest blessed model. If\n# no model has been blessed before (as in this case) the evaluator will make our\n# candidate the first blessed model.\nmodel_resolver = tfx.dsl.Resolver(\n strategy_class=tfx.dsl.experimental.LatestBlessedModelStrategy,\n model=tfx.dsl.Channel(type=tfx.types.standard_artifacts.Model),\n model_blessing=tfx.dsl.Channel(\n type=tfx.types.standard_artifacts.ModelBlessing)).with_id(\n 'latest_blessed_model_resolver')\ncontext.run(model_resolver)\n\nevaluator = tfx.components.Evaluator(\n examples=example_gen.outputs['examples'],\n model=trainer.outputs['model'],\n eval_config=eval_config)\ncontext.run(evaluator)", "Now let's examine the output artifacts of Evaluator.", "evaluator.outputs", "Using the evaluation output we can show the default visualization of global metrics on the entire evaluation set.", "context.show(evaluator.outputs['evaluation'])", "To see the visualization for sliced evaluation metrics, we can directly call the TensorFlow Model Analysis library.", "import tensorflow_model_analysis as tfma\n\n# Get the TFMA output result path and load the result.\nPATH_TO_RESULT = evaluator.outputs['evaluation'].get()[0].uri\ntfma_result = tfma.load_eval_result(PATH_TO_RESULT)\n\n# Show data sliced along feature column trip_start_hour.\ntfma.view.render_slicing_metrics(\n tfma_result, slicing_column='trip_start_hour')", "This visualization shows the same metrics, but computed at every feature value of trip_start_hour instead of on the entire evaluation set.\nTensorFlow Model Analysis supports many other visualizations, such as Fairness Indicators and plotting a time series of model performance. To learn more, see the tutorial.\nSince we added thresholds to our config, validation output is also available. The precence of a blessing artifact indicates that our model passed validation. Since this is the first validation being performed the candidate is automatically blessed.", "blessing_uri = evaluator.outputs['blessing'].get()[0].uri\n!ls -l {blessing_uri}", "Now can also verify the success by loading the validation result record:", "PATH_TO_RESULT = evaluator.outputs['evaluation'].get()[0].uri\nprint(tfma.load_validation_result(PATH_TO_RESULT))", "Pusher\nThe Pusher component is usually at the end of a TFX pipeline. It checks whether a model has passed validation, and if so, exports the model to _serving_model_dir.", "pusher = tfx.components.Pusher(\n model=trainer.outputs['model'],\n model_blessing=evaluator.outputs['blessing'],\n push_destination=tfx.proto.PushDestination(\n filesystem=tfx.proto.PushDestination.Filesystem(\n base_directory=_serving_model_dir)))\ncontext.run(pusher)", "Let's examine the output artifacts of Pusher.", "pusher.outputs", "In particular, the Pusher will export your model in the SavedModel format, which looks like this:", "push_uri = pusher.outputs['pushed_model'].get()[0].uri\nmodel = tf.saved_model.load(push_uri)\n\nfor item in model.signatures.items():\n pp.pprint(item)", "We're finished our tour of built-in TFX components!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]