repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
cells
list
types
list
tritemio/multispot_paper
out_notebooks/usALEX-5samples-PR-raw-out-DexDem-12d.ipynb
mit
[ "Executed: Mon Mar 27 11:35:00 2017\nDuration: 8 seconds.\nusALEX-5samples - Template\n\nThis notebook is executed through 8-spots paper analysis.\nFor a direct execution, uncomment the cell below.", "ph_sel_name = \"DexDem\"\n\ndata_id = \"12d\"\n\n# ph_sel_name = \"all-ph\"\n# data_id = \"7d\"", "Load software and filenames definitions", "from fretbursts import *\n\ninit_notebook()\nfrom IPython.display import display", "Data folder:", "data_dir = './data/singlespot/'\n\nimport os\ndata_dir = os.path.abspath(data_dir) + '/'\nassert os.path.exists(data_dir), \"Path '%s' does not exist.\" % data_dir", "List of data files:", "from glob import glob\nfile_list = sorted(f for f in glob(data_dir + '*.hdf5') if '_BKG' not in f)\n## Selection for POLIMI 2012-11-26 datatset\nlabels = ['17d', '27d', '7d', '12d', '22d']\nfiles_dict = {lab: fname for lab, fname in zip(labels, file_list)}\nfiles_dict\n\nph_sel_map = {'all-ph': Ph_sel('all'), 'Dex': Ph_sel(Dex='DAem'), \n 'DexDem': Ph_sel(Dex='Dem')}\nph_sel = ph_sel_map[ph_sel_name]\n\ndata_id, ph_sel_name", "Data load\nInitial loading of the data:", "d = loader.photon_hdf5(filename=files_dict[data_id])", "Laser alternation selection\nAt this point we have only the timestamps and the detector numbers:", "d.ph_times_t, d.det_t", "We need to define some parameters: donor and acceptor ch, excitation period and donor and acceptor excitiations:", "d.add(det_donor_accept=(0, 1), alex_period=4000, D_ON=(2850, 580), A_ON=(900, 2580), offset=0)", "We should check if everithing is OK with an alternation histogram:", "plot_alternation_hist(d)", "If the plot looks good we can apply the parameters with:", "loader.alex_apply_period(d)", "Measurements infos\nAll the measurement data is in the d variable. We can print it:", "d", "Or check the measurements duration:", "d.time_max", "Compute background\nCompute the background using automatic threshold:", "d.calc_bg(bg.exp_fit, time_s=60, tail_min_us='auto', F_bg=1.7)\n\ndplot(d, timetrace_bg)\n\nd.rate_m, d.rate_dd, d.rate_ad, d.rate_aa", "Burst search and selection", "bs_kws = dict(L=10, m=10, F=7, ph_sel=ph_sel)\nd.burst_search(**bs_kws)\n\nth1 = 30\nds = d.select_bursts(select_bursts.size, th1=30)\n\nbursts = (bext.burst_data(ds, include_bg=True, include_ph_index=True)\n .round({'E': 6, 'S': 6, 'bg_d': 3, 'bg_a': 3, 'bg_aa': 3, 'nd': 3, 'na': 3, 'naa': 3, 'nda': 3, 'nt': 3, 'width_ms': 4}))\n\nbursts.head()\n\nburst_fname = ('results/bursts_usALEX_{sample}_{ph_sel}_F{F:.1f}_m{m}_size{th}.csv'\n .format(sample=data_id, th=th1, **bs_kws))\nburst_fname\n\nbursts.to_csv(burst_fname)\n\nassert d.dir_ex == 0\nassert d.leakage == 0\n\nprint(d.ph_sel)\ndplot(d, hist_fret);\n\n# if data_id in ['7d', '27d']:\n# ds = d.select_bursts(select_bursts.size, th1=20)\n# else:\n# ds = d.select_bursts(select_bursts.size, th1=30)\n\nds = d.select_bursts(select_bursts.size, add_naa=False, th1=30)\n\nn_bursts_all = ds.num_bursts[0]\n\ndef select_and_plot_ES(fret_sel, do_sel):\n ds_fret= ds.select_bursts(select_bursts.ES, **fret_sel)\n ds_do = ds.select_bursts(select_bursts.ES, **do_sel)\n bpl.plot_ES_selection(ax, **fret_sel)\n bpl.plot_ES_selection(ax, **do_sel) \n return ds_fret, ds_do\n\nax = dplot(ds, hist2d_alex, S_max_norm=2, scatter_alpha=0.1)\n\nif data_id == '7d':\n fret_sel = dict(E1=0.60, E2=1.2, S1=0.2, S2=0.9, rect=False)\n do_sel = dict(E1=-0.2, E2=0.5, S1=0.8, S2=2, rect=True) \n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)\n \nelif data_id == '12d':\n fret_sel = dict(E1=0.30,E2=1.2,S1=0.131,S2=0.9, rect=False)\n do_sel = dict(E1=-0.4, E2=0.4, S1=0.8, S2=2, rect=False)\n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)\n\nelif data_id == '17d':\n fret_sel = dict(E1=0.01, E2=0.98, S1=0.14, S2=0.88, rect=False)\n do_sel = dict(E1=-0.4, E2=0.4, S1=0.80, S2=2, rect=False)\n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)\n\nelif data_id == '22d':\n fret_sel = dict(E1=-0.16, E2=0.6, S1=0.2, S2=0.80, rect=False)\n do_sel = dict(E1=-0.2, E2=0.4, S1=0.85, S2=2, rect=True)\n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel) \n\nelif data_id == '27d':\n fret_sel = dict(E1=-0.1, E2=0.5, S1=0.2, S2=0.82, rect=False)\n do_sel = dict(E1=-0.2, E2=0.4, S1=0.88, S2=2, rect=True)\n ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel) \n\nn_bursts_do = ds_do.num_bursts[0]\nn_bursts_fret = ds_fret.num_bursts[0]\n\nn_bursts_do, n_bursts_fret\n\nd_only_frac = 1.*n_bursts_do/(n_bursts_do + n_bursts_fret)\nprint ('D-only fraction:', d_only_frac)\n\ndplot(ds_fret, hist2d_alex, scatter_alpha=0.1);\n\ndplot(ds_do, hist2d_alex, S_max_norm=2, scatter=False);", "Donor Leakage fit\nHalf-Sample Mode\nFit peak usng the mode computed with the half-sample algorithm (Bickel 2005).", "def hsm_mode(s):\n \"\"\"\n Half-sample mode (HSM) estimator of `s`.\n\n `s` is a sample from a continuous distribution with a single peak.\n \n Reference:\n Bickel, Fruehwirth (2005). arXiv:math/0505419\n \"\"\"\n s = memoryview(np.sort(s))\n i1 = 0\n i2 = len(s)\n\n while i2 - i1 > 3:\n n = (i2 - i1) // 2\n w = [s[n-1+i+i1] - s[i+i1] for i in range(n)]\n i1 = w.index(min(w)) + i1\n i2 = i1 + n\n\n if i2 - i1 == 3:\n if s[i1+1] - s[i1] < s[i2] - s[i1 + 1]:\n i2 -= 1\n elif s[i1+1] - s[i1] > s[i2] - s[i1 + 1]:\n i1 += 1\n else:\n i1 = i2 = i1 + 1\n\n return 0.5*(s[i1] + s[i2])\n\nE_pr_do_hsm = hsm_mode(ds_do.E[0])\nprint (\"%s: E_peak(HSM) = %.2f%%\" % (ds.ph_sel, E_pr_do_hsm*100))", "Gaussian Fit\nFit the histogram with a gaussian:", "E_fitter = bext.bursts_fitter(ds_do, weights=None)\nE_fitter.histogram(bins=np.arange(-0.2, 1, 0.03))\n\nE_fitter.fit_histogram(model=mfit.factory_gaussian())\nE_fitter.params\n\nres = E_fitter.fit_res[0]\nres.params.pretty_print()\n\nE_pr_do_gauss = res.best_values['center']\nE_pr_do_gauss", "KDE maximum", "bandwidth = 0.03\nE_range_do = (-0.1, 0.15)\nE_ax = np.r_[-0.2:0.401:0.0002]\n\nE_fitter.calc_kde(bandwidth=bandwidth)\nE_fitter.find_kde_max(E_ax, xmin=E_range_do[0], xmax=E_range_do[1])\nE_pr_do_kde = E_fitter.kde_max_pos[0]\nE_pr_do_kde", "Leakage summary", "mfit.plot_mfit(ds_do.E_fitter, plot_kde=True, plot_model=False)\nplt.axvline(E_pr_do_hsm, color='m', label='HSM')\nplt.axvline(E_pr_do_gauss, color='k', label='Gauss')\nplt.axvline(E_pr_do_kde, color='r', label='KDE')\nplt.xlim(0, 0.3)\nplt.legend()\nprint('Gauss: %.2f%%\\n KDE: %.2f%%\\n HSM: %.2f%%' % \n (E_pr_do_gauss*100, E_pr_do_kde*100, E_pr_do_hsm*100))", "Burst size distribution", "nt_th1 = 50\n\ndplot(ds_fret, hist_size, which='all', add_naa=False)\nxlim(-0, 250)\nplt.axvline(nt_th1)\n\nTh_nt = np.arange(35, 120)\nnt_th = np.zeros(Th_nt.size)\nfor i, th in enumerate(Th_nt):\n ds_nt = ds_fret.select_bursts(select_bursts.size, th1=th)\n nt_th[i] = (ds_nt.nd[0] + ds_nt.na[0]).mean() - th\n\nplt.figure()\nplot(Th_nt, nt_th)\nplt.axvline(nt_th1)\n\nnt_mean = nt_th[np.where(Th_nt == nt_th1)][0]\nnt_mean", "Fret fit\nMax position of the Kernel Density Estimation (KDE):", "E_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, bandwidth=bandwidth, weights='size')\nE_fitter = ds_fret.E_fitter\n\nE_fitter.histogram(bins=np.r_[-0.1:1.1:0.03])\nE_fitter.fit_histogram(mfit.factory_gaussian(center=0.5))\n\nE_fitter.fit_res[0].params.pretty_print()\n\nfig, ax = plt.subplots(1, 2, figsize=(14, 4.5))\nmfit.plot_mfit(E_fitter, ax=ax[0])\nmfit.plot_mfit(E_fitter, plot_model=False, plot_kde=True, ax=ax[1])\nprint('%s\\nKDE peak %.2f ' % (ds_fret.ph_sel, E_pr_fret_kde*100))\ndisplay(E_fitter.params*100)", "Weighted mean of $E$ of each burst:", "ds_fret.fit_E_m(weights='size')", "Gaussian fit (no weights):", "ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.03], weights=None)", "Gaussian fit (using burst size as weights):", "ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.005], weights='size')\n\nE_kde_w = E_fitter.kde_max_pos[0]\nE_gauss_w = E_fitter.params.loc[0, 'center']\nE_gauss_w_sig = E_fitter.params.loc[0, 'sigma']\nE_gauss_w_err = float(E_gauss_w_sig/np.sqrt(ds_fret.num_bursts[0]))\nE_gauss_w_fiterr = E_fitter.fit_res[0].params['center'].stderr\nE_kde_w, E_gauss_w, E_gauss_w_sig, E_gauss_w_err, E_gauss_w_fiterr", "Stoichiometry fit\nMax position of the Kernel Density Estimation (KDE):", "S_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, burst_data='S', bandwidth=0.03) #weights='size', add_naa=True)\nS_fitter = ds_fret.S_fitter\n\nS_fitter.histogram(bins=np.r_[-0.1:1.1:0.03])\nS_fitter.fit_histogram(mfit.factory_gaussian(), center=0.5)\n\nfig, ax = plt.subplots(1, 2, figsize=(14, 4.5))\nmfit.plot_mfit(S_fitter, ax=ax[0])\nmfit.plot_mfit(S_fitter, plot_model=False, plot_kde=True, ax=ax[1])\nprint('%s\\nKDE peak %.2f ' % (ds_fret.ph_sel, S_pr_fret_kde*100))\ndisplay(S_fitter.params*100)\n\nS_kde = S_fitter.kde_max_pos[0]\nS_gauss = S_fitter.params.loc[0, 'center']\nS_gauss_sig = S_fitter.params.loc[0, 'sigma']\nS_gauss_err = float(S_gauss_sig/np.sqrt(ds_fret.num_bursts[0]))\nS_gauss_fiterr = S_fitter.fit_res[0].params['center'].stderr\nS_kde, S_gauss, S_gauss_sig, S_gauss_err, S_gauss_fiterr", "The Maximum likelihood fit for a Gaussian population is the mean:", "S = ds_fret.S[0]\nS_ml_fit = (S.mean(), S.std())\nS_ml_fit", "Computing the weighted mean and weighted standard deviation we get:", "weights = bl.fret_fit.get_weights(ds_fret.nd[0], ds_fret.na[0], weights='size', naa=ds_fret.naa[0], gamma=1.)\nS_mean = np.dot(weights, S)/weights.sum()\nS_std_dev = np.sqrt(\n np.dot(weights, (S - S_mean)**2)/weights.sum())\nS_wmean_fit = [S_mean, S_std_dev]\nS_wmean_fit", "Save data to file", "sample = data_id", "The following string contains the list of variables to be saved. When saving, the order of the variables is preserved.", "variables = ('sample n_bursts_all n_bursts_do n_bursts_fret '\n 'E_kde_w E_gauss_w E_gauss_w_sig E_gauss_w_err E_gauss_w_fiterr '\n 'S_kde S_gauss S_gauss_sig S_gauss_err S_gauss_fiterr '\n 'E_pr_do_kde E_pr_do_hsm E_pr_do_gauss nt_mean\\n')", "This is just a trick to format the different variables:", "variables_csv = variables.replace(' ', ',')\nfmt_float = '{%s:.6f}'\nfmt_int = '{%s:d}'\nfmt_str = '{%s}'\nfmt_dict = {**{'sample': fmt_str}, \n **{k: fmt_int for k in variables.split() if k.startswith('n_bursts')}}\nvar_dict = {name: eval(name) for name in variables.split()}\nvar_fmt = ', '.join([fmt_dict.get(name, fmt_float) % name for name in variables.split()]) + '\\n'\ndata_str = var_fmt.format(**var_dict)\n\nprint(variables_csv)\nprint(data_str)\n\n# NOTE: The file name should be the notebook name but with .csv extension\nwith open('results/usALEX-5samples-PR-raw-%s.csv' % ph_sel_name, 'a') as f:\n f.seek(0, 2)\n if f.tell() == 0:\n f.write(variables_csv)\n f.write(data_str)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
DamienIrving/ocean-analysis
development/quantiles.ipynb
mit
[ "Calculating percentiles / quantiles\nRandom examples from Stack Overflow\nhttps://stackoverflow.com/questions/21844024/weighted-percentile-using-numpy", "import numpy as np\n\ndata = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\nweights = np.array([1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5])\n\nquantile = 0.25\n\nind_sorted = np.argsort(data)\nsorted_data = data[ind_sorted]\nsorted_weights = weights[ind_sorted]\n\nSn = np.cumsum(sorted_weights)\nPn = (Sn-0.5*sorted_weights)/Sn[-1]\nnp.interp(quantile, Pn, sorted_data)\n\nnp.quantile(data, 0.25)\n\ndef weighted_percentile(a, q=np.array([75, 25]), w=None):\n \"\"\"\n Calculates percentiles associated with a (possibly weighted) array\n\n Parameters\n ----------\n a : array-like\n The input array from which to calculate percents\n q : array-like\n The percentiles to calculate (0.0 - 100.0)\n w : array-like, optional\n The weights to assign to values of a. Equal weighting if None\n is specified\n\n Returns\n -------\n values : np.array\n The values associated with the specified percentiles. \n \"\"\"\n # Standardize and sort based on values in a\n q = np.array(q) / 100.0\n if w is None:\n w = np.ones(a.size)\n idx = np.argsort(a)\n a_sort = a[idx]\n w_sort = w[idx]\n\n # Get the cumulative sum of weights\n ecdf = np.cumsum(w_sort)\n\n # Find the percentile index positions associated with the percentiles\n p = q * (w.sum() - 1)\n\n # Find the bounding indices (both low and high)\n idx_low = np.searchsorted(ecdf, p, side='right')\n idx_high = np.searchsorted(ecdf, p + 1, side='right')\n idx_high[idx_high > ecdf.size - 1] = ecdf.size - 1\n\n # Calculate the weights \n weights_high = p - np.floor(p)\n weights_low = 1.0 - weights_high\n\n # Extract the low/high indexes and multiply by the corresponding weights\n x1 = np.take(a_sort, idx_low) * weights_low\n x2 = np.take(a_sort, idx_high) * weights_high\n\n # Return the average\n return np.add(x1, x2)\n\nweighted_percentile(data, q=np.array([10, 25, 50, 75, 90, 100]), w=weights)\n\ndef weighted_percentile2(data, weights, perc):\n \"\"\"\n perc : percentile in [0-1]!\n \"\"\"\n ix = np.argsort(data)\n data = data[ix] # sort data\n weights = weights[ix] # sort weights\n cdf = (np.cumsum(weights) - 0.5 * weights) / np.sum(weights) # 'like' a CDF function\n return np.interp(perc, cdf, data)\n\nweighted_percentile2(data, weights, 0.1)", "I also found this post about equal frequency binning in Python useful.\nstatsmodels", "from statsmodels.stats.weightstats import DescrStatsW\n\nwq = DescrStatsW(data=np.arange(0, 101), weights=np.ones(101)* 1.5)\nwq.quantile(probs=np.arange(0, 1.01, 0.01), return_pandas=False)", "Source code with details of the calculation of weighted quantiles here." ]
[ "markdown", "code", "markdown", "code", "markdown" ]
jakobj/nest-simulator
doc/model_details/HillTononiModels.ipynb
gpl-2.0
[ "The Hill-Tononi Neuron and Synapse Models\nHans Ekkehard Plesser, NMBU/FZ Jülich/U Oslo, 2016-12-01\nBackground\nThis notebook describes the neuron and synapse model proposed by Hill and Tononi in J Neurophysiol 93:1671-1698, 2005 (doi:10.1152/jn.00915.2004) and their implementation in NEST. The notebook also contains some tests.\nThis description is based on the original publication and publications cited therein, an analysis of the source code of the original Synthesis implementation kindly provided by Sean Hill, and plausiblity arguments.\nIn what follows, I will refer to the original paper as [HT05].\nThe Neuron Model\nIntegration\nThe original Synthesis implementation of the model uses Runge-Kutta integration with fixed 0.25 ms step size, and integrates channels dynamics first, followed by integration of membrane potential and threshold.\nNEST, in contrast, integrates the complete 16-dimensional state using a single adaptive-stepsize Runge-Kutta-Fehlberg-4(5) solver from the GNU Science Library (gsl_odeiv_step_rkf45).\nMembrane potential\nMembrane potential evolution is governed by [HT05, p 1677]\n\\begin{equation}\n\\frac{\\text{d}V}{\\text{d}t} = \\frac{-g_{\\text{NaL}}(V-E_{\\text{Na}})\n-g_{\\text{KL}}(V-E_{\\text{K}})+I_{\\text{syn}}+I_{\\text{int}}}{\\tau_{\\text{m}}}\n-\\frac{g_{\\text{spike}}(V-E_{\\text{K}})}{\\tau_{\\text{spike}}}\n\\end{equation}\n\nThe equation does not contain membrane capacitance. As a side-effect, all conductances are dimensionless.\nNa and K leak conductances $g_{\\text{NaL}}$ and $g_{\\text{KL}}$ are constant, although $g_{\\text{KL}}$ may be adjusted on slow time scales to mimic neuromodulatory effects.\nReversal potentials $E_{\\text{Na}}$ and $E_{\\text{K}}$ are assumed constant.\nSynaptic currents $I_{\\text{syn}}$ and intrinsic currents $I_{\\text{int}}$ are discussed below. In contrast to the paper, they are shown with positive sign here (just change in notation).\nThe last term is a re-polarizing current only active during the refractory period, see below. Note that it has a different (faster) time constant than the other currents. It might have been more natural to use the same time constant for all currents and instead adjust $g_{\\text{spike}}$. We follow the original approach here.\n\nThreshold, Spike generation and refractory effects\nThe threshold evolves according to [HT05, p 1677]\n\\begin{equation}\n\\frac{\\text{d}\\theta}{\\text{d}t} = -\\frac{\\theta-\\theta_{\\text{eq}}}{\\tau_{\\theta}}\n\\end{equation}\nThe neuron emits a single spike if \n- it is not refractory\n- membrane potential crosses the threshold, $V\\geq\\theta$\nUpon spike emission,\n- $V \\leftarrow E_{\\text{Na}}$\n- $\\theta \\leftarrow E_{\\text{Na}}$\n- the neuron becomes refractory for time $t_{\\text{spike}}$ (t_ref in NEST)\nThe repolarizing current is active during, and only during the refractory period:\n\\begin{equation}\ng_{\\text{spike}} = \\begin{cases} 1 & \\text{neuron is refractory}\\\n 0 & \\text{else} \\end{cases}\n\\end{equation}\nDuring the refractory period, the neuron cannot fire new spikes, but all state variables evolve freely, nothing is clamped. \nThe model of spiking and refractoriness is based on Synthesis model PulseIntegrateAndFire.\nIntrinsic currents\nNote that not all intrinsic currents are active in all populations of the network model presented in [HT05, p1678f].\nIntrinsic currents are based on the Hodgkin-Huxley description, i.e.,\n\\begin{align}\nI_X &= g_{\\text{peak}, X} m_X(V, t)^N_X h_X(V, t)(V-E_X) \\\n\\frac{\\text{d}m_X}{\\text{d}t} &= \\frac{m_X^{\\infty}-m_X}{\\tau_{m,X}(V)}\\\n\\frac{\\text{d}h_X}{\\text{d}t} &= \\frac{h_X^{\\infty}-h_X}{\\tau_{h,X}(V)}\n\\end{align}\nwhere $I_X$ is the current through channel $X$ and $m_X$ and $h_X$ the activation and inactivation variables for channel $X$.\nPacemaker current $I_h$\nSynthesis: IhChannel\n\\begin{align}\nN_h & = 1 \\\nm_h^{\\infty}(V) &= \\frac{1}{1+\\exp\\left(\\frac{V+75\\text{mV}}{5.5\\text{mV}}\\right)} \\\n\\tau_{m,h}(V) &= \\frac{1}{\\exp(-14.59-0.086V) + \\exp(-1.87 + 0.0701V)} \\\nh_h(V, t) &\\equiv 1 \n\\end{align}\nNote that subscript $h$ in some cases above marks the $I_h$ channel.\nLow-threshold calcium current $I_T$\nSynthesis: ItChannel\nEquations given in paper\n\\begin{align}\nN_T & \\quad \\text{not given} \\\nm_T^{\\infty}(V) &= 1/{1 + \\exp[ -(V + 59.0)/6.2]} \\\n\\tau_{m,T}(V) &= {0.22/\\exp[ -(V + 132.0)/ 16.7]} + \\exp[(V + 16.8)/18.2] + 0.13\\\nh_T^{\\infty}(V) &= 1/{1 + \\exp[(V + 83.0)/4.0]} \\\n\\tau_{h,T}(V) &= \\langle 8.2 + {56.6 + 0.27 \\exp[(V + 115.2)/5.0]}\\rangle / {1.0 + \\exp[(V + 86.0)/3.2]}\n\\end{align}\nNote the following:\n- The channel model is based on Destexhe et al, J Neurophysiol 76:2049 (1996).\n- In the equation for $\\tau_{m,T}$, the second exponential term must be added to the first (in the denominator) to make dimensional sense; 0.13 and 0.22 have unit ms.\n- In the equation for $\\tau_{h,T}$, the $\\langle \\rangle$ brackets should be dropped, so that $8.2$ is not divided by the $1+\\exp$ term. Otherwise, it could have been combined with the $56.6$.\n- This analysis is confirmed by code analysis and comparison with Destexhe et al, J Neurophysiol 76:2049 (1996), Eq 5.\n- From Destexhe et al we also find $N_T=2$.\nCorrected equations\nThis leads to the following equations, which are implemented in Synthesis and NEST.\n\\begin{align}\nN_T &= 2 \\\nm_T^{\\infty}(V) &= \\frac{1}{1+\\exp\\left(-\\frac{V+59\\text{mV}}{6.2\\text{mV}}\\right)}\\\n\\tau_{m,T}(V) &= 0.13\\text{ms} \n + \\frac{0.22\\text{ms}}{\\exp\\left(-\\frac{V + 132\\text{mV}}{16.7\\text{mV}}\\right) + \\exp\\left(\\frac{V + 16.8\\text{mV}}{18.2\\text{mV}}\\right)} \\ \nh_T^{\\infty}(V) &= \\frac{1}{1+\\exp\\left(\\frac{V+83\\text{mV}}{4\\text{mV}}\\right)}\\\n\\tau_{h,T}(V) &= 8.2\\text{ms} + \\frac{56.6\\text{ms} + 0.27\\text{ms} \\exp\\left(\\frac{V + 115.2\\text{mV}}{5\\text{mV}}\\right)}{1 + \\exp\\left(\\frac{V + 86\\text{mV}}{3.2\\text{mV}}\\right)}\n\\end{align}\nPersistent Sodium Current $I_{NaP}$\nSynthesis: INaPChannel\nThis model has only activation ($m$) and uses the steady-state value, so the only relevant equation is that for $m$. In the paper, it is given as\n\\begin{equation}\nm_{NaP}^{\\infty}(V) = 1/[1+\\exp(-V+55.7)/7.7]\n\\end{equation}\nDimensional analysis indicates that the division by $7.7$ should be in the argument of the exponential, and the minus sign needs to be moved so that the current activates as the neuron depolarizes leading to the corrected equation\n\\begin{equation}\nm_{NaP}^{\\infty}(V) = \\frac{1}{1+\\exp\\left(-\\frac{V+55.7\\text{mV}}{7.7\\text{mV}}\\right)}\n\\end{equation}\nThis equation is implemented in NEST and Synthesis and is the one found in Compte et al (2003), cited by [HT05, p 1679].\nCorrected exponent\nAccording to Compte et al (2003), $N_{NaP}=3$, i.e.,\n\\begin{equation}\nI_{NaP} = g_{\\text{peak,NaP}}(m_{NaP}^{\\infty}(V))^3(V-E_{NaP})\n\\end{equation}\nThis equation is also given in a comment in Synthesis, but is missing from the implementation.\nNote: NEST implements the equation according to Compte et al (2003) with $N_{NaP}=3$, while Synthesis uses $N_{NaP}=1$.\nDepolarization-activated Potassium Current $I_{DK}$\nSynthesis: IKNaChannel\nThis model also only has a single activation variable $m$, following more complicated dynamics expressed by $D$.\nEquations in paper\n\\begin{align}\n dD/dt &= D_{\\text{influx}} - D(1-D_{\\text{eq}})/\\tau_D \\\n D_{\\text{influx}} &= 1/{1+ \\exp[-(V-D_{\\theta})/\\sigma_D]} \\\n m_{DK}^{\\infty} &= 1/1 + (d_{1/2}D)^{3.5}\n\\end{align}\nThere are several problems with these equations.\nIn the steady state the first equation becomes\n\\begin{equation}\n 0 = - D(1-D_{\\text{eq}})/\\tau_D \n \\end{equation}\n with solution\n \\begin{equation}\n D = 0\n\\end{equation}\nThis contradicts both the statement [HT05, p. 1679] that $D\\to D_{\\text{eq}}$ in this case, and the requirement that $D>0$ to avoid a singluarity in the equation for $m_{DK}^{\\infty}$. The most plausible correction is\n\\begin{equation}\n dD/dt = D_{\\text{influx}} - (D-D_{\\text{eq}})/\\tau_D \n\\end{equation}\nThe third equation appears incorrect and logic as well as Wang et al, J Neurophysiol 89:3279–3293, 2003, Eq 9, cited in [HT05, p 1679], indicate that the correct equation is\n\\begin{equation}\n m_{DK}^{\\infty} = 1/(1 + (d_{1/2} / D)^{3.5})\n\\end{equation}\nCorrected equations\nThe equations for this channel implemented in NEST are thus\n\\begin{align}\nI_{DK} &= - g_{\\text{peak},DK} m_{DK}(V,t) (V - E_{DK})\\\n m_{DK} &= \\frac{1}{1 + \\left(\\frac{d_{1/2}}{D}\\right)^{3.5}}\\\n \\frac{dD}{dt} &= D_{\\text{influx}}(V) - \\frac{D-D_{\\text{eq}}}{\\tau_D} = \\frac{D_{\\infty}(V)-D}{\\tau_D} \\\n D_{\\infty}(V) &= \\tau_D D_{\\text{influx}}(V) + {D_{\\text{eq}}}\\\n D_{\\text{influx}} &= \\frac{D_{\\text{influx,peak}}}{1+ \\exp\\left(-\\frac{V-D_{\\theta}}{\\sigma_D}\\right)} \n\\end{align}\nwith \n|$D_{\\text{influx,peak}}$|$D_{\\text{eq}}$|$\\tau_D$|$D_{\\theta}$|$\\sigma_D$|$d_{1/2}$|\n| --: | --: | --: | --: | --: | --: |\n|$0.025\\text{ms}^{-1}$ |$0.001$|$1250\\text{ms}$|$-10\\text{mV}$|$5\\text{mV}$|$0.25$|\nNote the following:\n- $D_{eq}$ is the equilibrium value only for $D_{\\text{influx}}(V)=0$, i.e., in the limit $V\\to -\\infty$ and $t\\to\\infty$.\n- The actual steady-state value is $D_{\\infty}$.\n- $d_{1/2}$, $D$, $D_{\\infty}$, and $D_{\\text{eq}}$ have identical, but arbitrary units, so we can assume them dimensionless ($D$ is a \"factor\" that in an abstract way represents concentrations).\n- $D_{\\text{influx}}$ and $D_{\\text{influx,peak}}$ are rates of change of $D_{\\infty}$ and thus have units of inverse time.\n- $m_{DK}$ is a steep sigmoid which is almost 0 or 1 except for a narrow window around $d_{1/2}$.\n- To the left of this window, $I_{DK}\\approx 0$.\n- To the right of this window, $I_{DK}\\sim -(V-E_{DK})$.\n- $m_{DK}$ is not integrated over time, instead it is an instantaneous transform of $D$, which is integrated over time.\nNote: The differential equation for $dD/dt$ differs from the one implemented in Synthesis.\nSynaptic channels\nThese are described in [HT05, p 1678]. Synaptic channels are conductance based with double-exponential time course (beta functions) and normalized for peak conductance. NMDA channels are additionally voltage gated, as described below.\nLet ${t_{(j, X)}}$ be the set of all spike arrival times, where $X$ indicates the synapse model and $j$ enumerates spikes. Then the total synaptic input is given by\n\\begin{equation}\nI_{\\text{syn}}(t) = - \\sum_{{t_{(j, X)}}} \\bar{g}X(t-t{(j, X)}) (V-E_X)\n\\end{equation}\nStandard Channels\nSynthesis: SynChannel\nThe conductance change due to a single input spike at time $t=0$ through a channel of type $X$ is given by (see below for exceptions)\n\\begin{align}\n \\bar{g}X(t) &= g_X(t)\\\n g_X(t) &= g{\\text{peak}, X}\\frac{\\exp(-t/\\tau_1) - \\exp(-t/\\tau_2)}{\n \\exp(-t_{\\text{peak}}/\\tau_1) - \\exp(-t_{\\text{peak}}/\\tau_2)} \\Theta(t)\\\n t_{\\text{peak}} &= \\frac{\\tau_2 \\tau_1}{\\tau_2 - \\tau_1} \\ln\\frac{ \\tau_2}{\\tau_1}\n\\end{align} \nwhere $t_{\\text{peak}}$ is the time of the conductance maximum and $\\tau_1$ and $\\tau_2$ are synaptic rise- and decay-time, respectively; $\\Theta(t)$ is the Heaviside step function. The equation is integrated using exact integration in Synthesis; in NEST, it is included in the ODE-system integrated using the Runge-Kutta-Fehlberg 4(5) solver from GSL.\nThe \"indirection\" from $g$ to $\\bar{g}$ is required for consistent notation for NMDA channels below.\nThese channels are used for AMPA, GABA_A and GABA_B channels.\nNMDA Channels\nSynthesis: SynNMDAChannel\nFor the NMDA channel we have\n\\begin{equation}\n\\bar{g}{\\text{NMDA}}(t) = m(V, t) g{\\text{NMDA}}(t)\n\\end{equation}\nwith $g_{\\text{NMDA}}(t)$ from above. \nThe voltage-dependent gating $m(V, t)$ is defined as follows (based on textual description, Vargas-Caballero and Robinson J Neurophysiol 89:2778–2783, 2003, doi:10.1152/jn.01038.2002, and code inspection):\n\\begin{align}\n m(V, t) &= a(V) m_{\\text{fast}}^(V, t) + ( 1 - a(V) ) m_{\\text{slow}}^(V, t)\\\n a(V) &= 0.51 - 0.0028 V \\\n m^{\\infty}(V) &= \\frac{1}{ 1 + \\exp\\left( -S_{\\text{act}} ( V - V_{\\text{act}} ) \\right) } \\\n m_X^*(V, t) &= \\min(m^{\\infty}(V), m_X(V, t))\\\n \\frac{\\text{d}m_X}{\\text{d}t} &= \\frac{m^{\\infty}(V) - m_X }{ \\tau_{\\text{Mg}, X}}\n\\end{align} \nwhere $X$ is \"slow\" or \"fast\". $a(V)$ expresses voltage-dependent weighting between slow and fast unblocking, $m^{\\infty}(V)$ the steady-state value of the proportion of unblocked NMDA-channels, the minimum condition in $m_X^*(V,t)$ the instantaneous blocking and the differential equation for $m_X(V,t)$ the unblocking dynamics.\nSynthesis uses tabluated values for $m^{\\infty}$. NEST uses the best fit of $V_{\\text{act}}$ and $S_{\\text{act}}$ to the tabulated data for conductance table fNMDA.\nNote: NEST also supports instantaneous NMDA dynamics using a boolean switch. In that case $m(V, t)=m^{\\infty}(V)$. \nNo synaptic \"minis\"\nSynaptic \"minis\" due to spontaneous release of neurotransmitter quanta [HT05, p 1679] are not included in the NEST implementation of the Hill-Tononi model, because the total mini input rate for a cell was just 2 Hz and they cause PSP changes by $0.5 \\pm 0.25$mV only and thus should have minimal effect.\nThe Synapse Depression Model\nThe synapse depression model is implemented in NEST as ht_synapse, in Synthesis in SynChannel and VesiclePool.\n$P\\in[0, 1]$ describes the state of the presynaptic vesicle pool. Spikes are transmitted with an effective weight\n\\begin{equation}\nw_{\\text{eff}} = P w\n\\end{equation}\nwhere $w$ is the nominal weight of the synapse.\nEvolution of $P$ in paper and Synthesis implementation\nAccording to [HT05, p 1678], the pool $P$ evolves according to\n\\begin{equation}\n\\frac{\\text{d}P}{\\text{d}t} = -\\:\\text{spike}\\:\\delta_P P+\\frac{P_{\\text{peak}}-P}{\\tau_P}\n\\end{equation}\nwhere\n- $\\text{spike}=1$ while the neuron is in spiking state, 0 otherwise\n- $P_{\\text{peak}}=1$ \n- $\\delta_P = 0.5$ by default\n- $\\tau_P = 500\\text{ms}$ by default\nSince neurons are in spiking state for one integration time step $\\Delta t$, this suggest that the effect of a spike on the vesicle pool is approximately\n\\begin{equation}\nP \\leftarrow ( 1 - \\Delta t \\delta_P ) P\n\\end{equation}\nFor default parameters $\\Delta t=0.25\\text{ms}$ and $\\delta_P=0.5$, this means that a single spike reduces the pool by 1/8 of its current size.\nEvolution of $P$ in the NEST implementation\nIn NEST, we modify the equations above to obtain a definite jump in pool size on transmission of a spike, without any dependence on the integration time step (fixing explicitly $P_{\\text{peak}}$):\n\\begin{align}\n\\frac{\\text{d}P}{\\text{d}t} &= \\frac{1-P}{\\tau_P} \\\nP &\\leftarrow ( 1 - \\delta_P^*) P \n\\end{align}\n$P$ is only updated when a spike passes the synapse, in the following way (where $\\Delta$ is the time since the last spike through the same synapse):\n\nRecuperation: $P\\leftarrow 1 - ( 1 - P ) \\exp( -\\Delta / \\tau_P )$\nSpike transmission with $w_{\\text{eff}} = P w$\nDepletion: $P \\leftarrow ( 1 - \\delta_P^*) P$\n\nTo achieve approximately the same depletion as in Synthesis, use $\\delta_P^*=\\Delta t\\delta_p$.\nTests of the Models", "import sys\nimport math\nimport numpy as np\nimport pandas as pd\nimport scipy.optimize as so\nimport scipy.integrate as si\nimport matplotlib.pyplot as plt\nimport nest\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (12, 3)", "Neuron Model\nPassive properties\nTest relaxation of neuron and threshold to equilibrium values in absence of intrinsic currents and input. We then have\n\\begin{align}\n\\tau_m \\dot{V}&= \\left[-g_{NaL}(V-E_{Na})-g_{KL}(V-E_K)\\right] = -(g_{NaL}+g_{KL})V+(g_{NaL}E_{Na}+g_{KL}E_K)\\\n\\Leftrightarrow\\quad \\tau_{\\text{eff}}\\dot{V} &= -V+V_{\\infty}\\\nV_{\\infty} &= \\frac{g_{NaL}E_{Na}+g_{KL}E_K}{g_{NaL}+g_{KL}}\\\n\\tau_{\\text{eff}}&=\\frac{\\tau_m}{g_{NaL}+g_{KL}}\n\\end{align}\nwith solution\n\\begin{equation}\nV(t) = V_0 e^{-\\frac{t}{\\tau_{\\text{eff}}}} + V_{\\infty}\\left(1-e^{-\\frac{t}{\\tau_{\\text{eff}}}} \\right)\n\\end{equation}\nand for the threshold\n\\begin{equation}\n\\theta(t) = \\theta_0 e^{-\\frac{t}{\\tau_{\\theta}}} + \\theta_{eq}\\left(1-e^{-\\frac{t}{\\tau_{\\theta}}} \\right)\n\\end{equation}", "def Vpass(t, V0, gNaL, ENa, gKL, EK, taum, I=0):\n tau_eff = taum/(gNaL + gKL)\n Vinf = (gNaL*ENa + gKL*EK + I)/(gNaL + gKL)\n return V0*np.exp(-t/tau_eff) + Vinf*(1-np.exp(-t/tau_eff))\n\ndef theta(t, th0, theq, tauth):\n return th0*np.exp(-t/tauth) + theq*(1-np.exp(-t/tauth))\n\nnest.ResetKernel()\nnest.SetDefaults('ht_neuron', {'g_peak_NaP': 0., 'g_peak_KNa': 0.,\n 'g_peak_T': 0., 'g_peak_h': 0.,\n 'tau_theta': 10.})\nhp = nest.GetDefaults('ht_neuron')\n\nV_0 = [-100., -70., -55.]\nth_0 = [-65., -51., -10.]\nT_sim = 20.\n\nnrns = nest.Create('ht_neuron', n=len(V_0), params={'V_m': V_0, 'theta': th_0}) \n\nnest.Simulate(T_sim)\nV_th_sim = nrns.get(['V_m', 'theta'])\n\nfor (V0, th0, Vsim, thsim) in zip(V_0, th_0, V_th_sim['V_m'], V_th_sim['theta']):\n Vex = Vpass(T_sim, V0, hp['g_NaL'], hp['E_Na'], hp['g_KL'], hp['E_K'], hp['tau_m'])\n thex = theta(T_sim, th0, hp['theta_eq'], hp['tau_theta'])\n print('Vex = {:.3f}, Vsim = {:.3f}, Vex-Vsim = {:.3e}'.format(Vex, Vsim, Vex-Vsim))\n print('thex = {:.3f}, thsim = {:.3f}, thex-thsim = {:.3e}'.format(thex, thsim, thex-thsim))", "Agreement is excellent.\nSpiking without intrinsic currents or synaptic input\nThe equations above hold for input current $I(t)$, but with\n\\begin{equation}\nV_{\\infty}(I) = \\frac{g_{NaL}E_{Na}+g_{KL}E_K}{g_{NaL}+g_{KL}} + \\frac{I}{g_{NaL}+g_{KL}}\n\\end{equation}\nIn NEST, we need to inject input current into the ht_neuron with a dc_generator, whence the current will set on only at a later time and we need to take this into account. For simplicity, we assume that $V$ is initialized to $V_{\\infty}(I=0)$ and that current onset is at $t_I$. We then have for $t\\geq t_I$\n\\begin{equation}\nV(t) = V_{\\infty}(0) e^{-\\frac{t-t_I}{\\tau_{\\text{eff}}}} + V_{\\infty}(I)\\left(1-e^{-\\frac{t-t_I}{\\tau_{\\text{eff}}}} \\right)\n\\end{equation}\nIf we also initialize $\\theta=\\theta_{\\text{eq}}$, the threshold is constant and we have the first spike at\n\\begin{align}\nV(t) &= \\theta_{\\text{eq}}\\\n\\Leftrightarrow\\quad t &= t_I -\\tau_{\\text{eff}} \\ln \\frac{\\theta_{\\text{eq}}-V_{\\infty}(I)}{V_{\\infty}(0)-V_{\\infty}(I)}\n\\end{align}", "def t_first_spike(gNaL, ENa, gKL, EK, taum, theq, tI, I):\n tau_eff = taum/(gNaL + gKL)\n Vinf0 = (gNaL*ENa + gKL*EK)/(gNaL + gKL)\n VinfI = (gNaL*ENa + gKL*EK + I)/(gNaL + gKL)\n return tI - tau_eff * np.log((theq-VinfI) / (Vinf0-VinfI))\n\nnest.ResetKernel()\nnest.SetKernelStatus({'resolution': 0.001})\nnest.SetDefaults('ht_neuron', {'g_peak_NaP': 0., 'g_peak_KNa': 0.,\n 'g_peak_T': 0., 'g_peak_h': 0.})\nhp = nest.GetDefaults('ht_neuron')\n\nI = [25., 50., 100.]\ntI = 1.\ndelay = 1.\nT_sim = 40.\n\nnrns = nest.Create('ht_neuron', n=len(I))\ndcgens = nest.Create('dc_generator', n=len(I), params={'amplitude': I, 'start': tI})\nsrs = nest.Create('spike_recorder', n=len(I))\nnest.Connect(dcgens, nrns, 'one_to_one', {'delay': delay})\nnest.Connect(nrns, srs, 'one_to_one')\nnest.Simulate(T_sim)\n\nt_first_sim = [t[0] for t in srs.get('events', 'times')]\n\nfor dc, tf_sim in zip(I, t_first_sim):\n tf_ex = t_first_spike(hp['g_NaL'], hp['E_Na'], hp['g_KL'], hp['E_K'], \n hp['tau_m'], hp['theta_eq'], tI+delay, dc)\n print('tex = {:.4f}, tsim = {:.4f}, tex-tsim = {:.4f}'.format(tf_ex, \n tf_sim, \n tf_ex-tf_sim))\n", "Agreement is as good as possible: All spikes occur in NEST at then end of the time step containing the expected spike time.\nInter-spike interval\nAfter each spike, $V_m = \\theta = E_{Na}$, i.e., all memory is erased. We can thus treat ISIs independently. $\\theta$ relaxes according to the equation above. For $V_m$, we have during $t_{\\text{spike}}$ after a spike\n\\begin{align}\n\\tau_m\\dot{V} &= {-g_{\\text{NaL}}(V-E_{\\text{Na}})\n-g_{\\text{KL}}(V-E_{\\text{K}})+I}\n-\\frac{\\tau_m}{\\tau_{\\text{spike}}}({V-E_{\\text{K}}})\\\n&= -(g_{NaL}+g_{KL}+\\frac{\\tau_m}{\\tau_{\\text{spike}}})V+(g_{NaL}E_{Na}+g_{KL}E_K+\\frac{\\tau_m}{\\tau_{\\text{spike}}}E_K)\n\\end{align}\nthus recovering the same for for the solution but with\n\\begin{align}\n\\tau^{\\text{eff}} &= \\frac{\\tau_m}{g{NaL}+g_{KL}+\\frac{\\tau_m}{\\tau_{\\text{spike}}}}\\\nV^{\\infty} &= \\frac{g{NaL}E_{Na}+g_{KL}E_K+I+\\frac{\\tau_m}{\\tau_{\\text{spike}}}E_K}{g_{NaL}+g_{KL}+\\frac{\\tau_m}{\\tau_{\\text{spike}}}}\n\\end{align}\nAssuming that the ISI is longer than the refractory period $t_{\\text{spike}}$, and we had a spike at time $t_s$, then we have at $t_s+t_{\\text{spike}}$\n\\begin{align}\nV^ &= V(t_s+t_{\\text{spike}}) = E_{Na} e^{-\\frac{t_{\\text{spike}}}{\\tau^{\\text{eff}}}} + V^{\\infty}(I)\\left(1-e^{-\\frac{t{\\text{spike}}}{\\tau^{\\text{eff}}}} \\right)\\\n\\theta^ &= \\theta(t_s+t_{\\text{spike}}) = E_{Na} e^{-\\frac{t_{\\text{spike}}}{\\tau_{\\theta}}} + \\theta_{eq}\\left(1-e^{-\\frac{t_{\\text{spike}}}{\\tau_{\\theta}}} \\right)\\\nt^ &= t_s+t_{\\text{spike}}\n\\end{align}\nFor $t>t^$, the normal equations apply again, i.e.,\n\\begin{align}\nV(t) &= V^ e^{-\\frac{t-t^}{\\tau_{\\text{eff}}}} + V_{\\infty}(I)\\left(1-e^{-\\frac{t-t^}{\\tau_{\\text{eff}}}} \\right)\\\n\\theta(t) &= \\theta^ e^{-\\frac{t-t^}{\\tau_{\\theta}}} + \\theta_{\\infty}\\left(1-e^{-\\frac{t-t^*}{\\tau_{\\theta}}}\\right)\n\\end{align}\nThe time of the next spike is then given by\n\\begin{equation}\nV(\\hat{t}) = \\theta(\\hat{t})\n\\end{equation}\nwhich can only be solved numerically. The ISI is then obtained as $\\hat{t}-t_s$.", "def Vspike(tspk, gNaL, ENa, gKL, EK, taum, tauspk, I=0):\n tau_eff = taum/(gNaL + gKL + taum/tauspk)\n Vinf = (gNaL*ENa + gKL*EK + I + taum/tauspk*EK)/(gNaL + gKL + taum/tauspk)\n return ENa*np.exp(-tspk/tau_eff) + Vinf*(1-np.exp(-tspk/tau_eff))\n\ndef thetaspike(tspk, ENa, theq, tauth):\n return ENa*np.exp(-tspk/tauth) + theq*(1-np.exp(-tspk/tauth))\n\ndef Vpost(t, tspk, gNaL, ENa, gKL, EK, taum, tauspk, I=0):\n Vsp = Vspike(tspk, gNaL, ENa, gKL, EK, taum, tauspk, I)\n return Vpass(t-tspk, Vsp, gNaL, ENa, gKL, EK, taum, I)\n\ndef thetapost(t, tspk, ENa, theq, tauth):\n thsp = thetaspike(tspk, ENa, theq, tauth)\n return theta(t-tspk, thsp, theq, tauth)\n\ndef threshold(t, tspk, gNaL, ENa, gKL, EK, taum, tauspk, I, theq, tauth):\n return Vpost(t, tspk, gNaL, ENa, gKL, EK, taum, tauspk, I) - thetapost(t, tspk, ENa, theq, tauth)\n\nnest.ResetKernel()\nnest.SetKernelStatus({'resolution': 0.001})\nnest.SetDefaults('ht_neuron', {'g_peak_NaP': 0., 'g_peak_KNa': 0.,\n 'g_peak_T': 0., 'g_peak_h': 0.})\nhp = nest.GetDefaults('ht_neuron')\n\nI = [25., 50., 100.]\ntI = 1.\ndelay = 1.\nT_sim = 1000.\n\nnrns = nest.Create('ht_neuron', n=len(I))\ndcgens = nest.Create('dc_generator', n=len(I), params={'amplitude': I, 'start': tI})\nsrs = nest.Create('spike_recorder', n=len(I))\nnest.Connect(dcgens, nrns, 'one_to_one', {'delay': delay})\nnest.Connect(nrns, srs, 'one_to_one')\nnest.Simulate(T_sim)\n\nisi_sim = []\nfor ev in srs.events:\n t_spk = ev['times']\n isi = np.diff(t_spk)\n isi_sim.append((np.min(isi), np.mean(isi), np.max(isi)))\n\nfor dc, (isi_min, isi_mean, isi_max) in zip(I, isi_sim):\n isi_ex = so.bisect(threshold, hp['t_ref'], 50, \n args=(hp['t_ref'], hp['g_NaL'], hp['E_Na'], hp['g_KL'], hp['E_K'], \n hp['tau_m'], hp['tau_spike'], dc, hp['theta_eq'], hp['tau_theta']))\n print('isi_ex = {:.4f}, isi_sim (min, mean, max) = ({:.4f}, {:.4f}, {:.4f})'.format(\n isi_ex, isi_min, isi_mean, isi_max))", "ISIs are as predicted: measured ISI is predicted rounded up to next time step\nISIs are perfectly regular as expected\n\nIntrinsic Currents\nPreparations", "nest.ResetKernel()\nclass Channel:\n \"\"\"\n Base class for channel models in Python.\n \"\"\"\n def tau_m(self, V):\n raise NotImplementedError()\n def tau_h(self, V):\n raise NotImplementedError()\n def m_inf(self, V):\n raise NotImplementedError()\n def h_inf(self, V):\n raise NotImplementedError()\n def D_inf(self, V):\n raise NotImplementedError()\n def dh(self, h, t, V):\n return (self.h_inf(V)-h)/self.tau_h(V)\n def dm(self, m, t, V):\n return (self.m_inf(V)-m)/self.tau_m(V)\n\ndef voltage_clamp(channel, DT_V_seq, nest_dt=0.1):\n \"Run voltage clamp with voltage V through intervals DT.\"\n\n # NEST part\n nest_g_0 = {'g_peak_h': 0., 'g_peak_T': 0., 'g_peak_NaP': 0., 'g_peak_KNa': 0.}\n nest_g_0[channel.nest_g] = 1.\n \n nest.ResetKernel()\n nest.SetKernelStatus({'resolution': nest_dt})\n nrn = nest.Create('ht_neuron', params=nest_g_0)\n mm = nest.Create('multimeter', params={'record_from': ['V_m', 'theta', channel.nest_I],\n 'interval': nest_dt})\n nest.Connect(mm, nrn)\n\n # ensure we start from equilibrated state\n nrn.set(V_m=DT_V_seq[0][1], equilibrate=True, voltage_clamp=True)\n for DT, V in DT_V_seq:\n nrn.set(V_m=V, voltage_clamp=True)\n nest.Simulate(DT)\n t_end = nest.GetKernelStatus('time')\n \n # simulate a little more so we get all data up to t_end to multimeter\n nest.Simulate(2 * nest.GetKernelStatus('min_delay'))\n \n tmp = pd.DataFrame(mm.events)\n nest_res = tmp[tmp.times <= t_end]\n \n # Control part\n t_old = 0.\n try:\n m_old = channel.m_inf(DT_V_seq[0][1])\n except NotImplementedError:\n m_old = None\n try:\n h_old = channel.h_inf(DT_V_seq[0][1])\n except NotImplementedError:\n h_old = None\n try:\n D_old = channel.D_inf(DT_V_seq[0][1])\n except NotImplementedError:\n D_old = None\n \n t_all, I_all = [], []\n if D_old is not None:\n D_all = []\n \n for DT, V in DT_V_seq:\n t_loc = np.arange(0., DT+0.1*nest_dt, nest_dt)\n I_loc = channel.compute_I(t_loc, V, m_old, h_old, D_old)\n t_all.extend(t_old + t_loc[1:])\n I_all.extend(I_loc[1:])\n if D_old is not None:\n D_all.extend(channel.D[1:])\n m_old = channel.m[-1] if m_old is not None else None\n h_old = channel.h[-1] if h_old is not None else None\n D_old = channel.D[-1] if D_old is not None else None\n t_old = t_all[-1]\n \n if D_old is None:\n ctrl_res = pd.DataFrame({'times': t_all, channel.nest_I: I_all})\n else:\n ctrl_res = pd.DataFrame({'times': t_all, channel.nest_I: I_all, 'D': D_all})\n\n return nest_res, ctrl_res", "I_h channel\nThe $I_h$ current is governed by\n\\begin{align}\nI_h &= g_{\\text{peak}, h} m_h(V, t) (V-E_h) \\\n\\frac{\\text{d}m_h}{\\text{d}t} &= \\frac{m_h^{\\infty}-m_h}{\\tau_{m,h}(V)}\\\nm_h^{\\infty}(V) &= \\frac{1}{1+\\exp\\left(\\frac{V+75\\text{mV}}{5.5\\text{mV}}\\right)} \\\n\\tau_{m,h}(V) &= \\frac{1}{\\exp(-14.59-0.086V) + \\exp(-1.87 + 0.0701V)}\n\\end{align}\nWe first inspect $m_h^{\\infty}(V)$ and $\\tau_{m,h}(V)$ to prepare for testing", "nest.ResetKernel()\nclass Ih(Channel):\n \n nest_g = 'g_peak_h'\n nest_I = 'I_h'\n \n def __init__(self, ht_params):\n self.hp = ht_params\n \n def tau_m(self, V):\n return 1/(np.exp(-14.59-0.086*V) + np.exp(-1.87 + 0.0701*V))\n \n def m_inf(self, V):\n return 1/(1+np.exp((V+75)/5.5))\n\n def compute_I(self, t, V, m0, h0, D0):\n self.m = si.odeint(self.dm, m0, t, args=(V,))\n return - self.hp['g_peak_h'] * self.m * (V - self.hp['E_rev_h'])\n\nih = Ih(nest.GetDefaults('ht_neuron'))\n\nV = np.linspace(-110, 30, 100)\nplt.plot(V, ih.tau_m(V));\nax = plt.gca();\nax.set_xlabel('Voltage V [mV]');\nax.set_ylabel('Time constant tau_m [ms]', color='b');\nax2 = ax.twinx()\nax2.plot(V, ih.m_inf(V), 'g');\nax2.set_ylabel('Steady-state m_h^inf', color='g');", "The time constant is extremely long, up to 1s, for relevant voltages where $I_h$ is perceptible. We thus need long test runs.\nCurves are in good agreement with Fig 5 of Huguenard and McCormick, J Neurophysiol 68:1373, 1992, cited in [HT05]. I_h data there was from guinea pig slices at 35.5 C and needed no temperature adjustment.\n\nWe now run a voltage clamp experiment starting from the equilibrium value.", "ih = Ih(nest.GetDefaults('ht_neuron'))\nnr, cr = voltage_clamp(ih, [(500, -65.), (500, -80.), (500, -100.), (500, -90.), (500, -55.)]) \n\nplt.subplot(1, 2, 1)\nplt.plot(nr.times, nr.I_h, label='NEST');\nplt.plot(cr.times, cr.I_h, label='Control');\nplt.legend(loc='upper left');\nplt.xlabel('Time [ms]');\nplt.ylabel('I_h [mV]');\nplt.title('I_h current')\n\nplt.subplot(1, 2, 2)\nplt.plot(nr.times, (nr.I_h-cr.I_h)/np.abs(cr.I_h));\nplt.title('Relative I_h error')\nplt.xlabel('Time [ms]');\nplt.ylabel('Rel. error (NEST-Control)/|Control|');", "Agreement is very good\nNote that currents have units of $mV$ due to choice of dimensionless conductances.\n\nI_T Channel\nThe corrected equations used for the $I_T$ channel in NEST are\n\\begin{align}\nI_T &= g_{\\text{peak}, T} m_T^2(V, t) h_T(V,t) (V-E_T) \\\nm_T^{\\infty}(V) &= \\frac{1}{1+\\exp\\left(-\\frac{V+59\\text{mV}}{6.2\\text{mV}}\\right)}\\\n\\tau_{m,T}(V) &= 0.13\\text{ms} \n + \\frac{0.22\\text{ms}}{\\exp\\left(-\\frac{V + 132\\text{mV}}{16.7\\text{mV}}\\right) + \\exp\\left(\\frac{V + 16.8\\text{mV}}{18.2\\text{mV}}\\right)} \\ \nh_T^{\\infty}(V) &= \\frac{1}{1+\\exp\\left(\\frac{V+83\\text{mV}}{4\\text{mV}}\\right)}\\\n\\tau_{h,T}(V) &= 8.2\\text{ms} + \\frac{56.6\\text{ms} + 0.27\\text{ms} \\exp\\left(\\frac{V + 115.2\\text{mV}}{5\\text{mV}}\\right)}{1 + \\exp\\left(\\frac{V + 86\\text{mV}}{3.2\\text{mV}}\\right)}\n\\end{align}", "nest.ResetKernel()\nclass IT(Channel):\n \n nest_g = 'g_peak_T'\n nest_I = 'I_T'\n \n def __init__(self, ht_params):\n self.hp = ht_params\n \n def tau_m(self, V):\n return 0.13 + 0.22/(np.exp(-(V+132)/16.7) + np.exp((V+16.8)/18.2))\n\n def tau_h(self, V):\n return 8.2 + (56.6 + 0.27 * np.exp((V+115.2)/5.0)) /(1 + np.exp((V+86.0)/3.2))\n\n def m_inf(self, V):\n return 1/(1+np.exp(-(V+59.0)/6.2))\n\n def h_inf(self, V):\n return 1/(1+np.exp((V+83.0)/4.0))\n\n def compute_I(self, t, V, m0, h0, D0):\n self.m = si.odeint(self.dm, m0, t, args=(V,))\n self.h = si.odeint(self.dh, h0, t, args=(V,))\n return - self.hp['g_peak_T'] * self.m**2 * self.h * (V - self.hp['E_rev_T'])\n\niT = IT(nest.GetDefaults('ht_neuron'))\n\nV = np.linspace(-110, 30, 100)\nplt.plot(V, 10 * iT.tau_m(V), 'b-', label='10 * tau_m');\nplt.plot(V, iT.tau_h(V), 'b--', label='tau_h');\nax1 = plt.gca();\nax1.set_xlabel('Voltage V [mV]');\nax1.set_ylabel('Time constants [ms]', color='b');\nax2 = ax1.twinx()\nax2.plot(V, iT.m_inf(V), 'g-', label='m_inf');\nax2.plot(V, iT.h_inf(V), 'g--', label='h_inf');\nax2.set_ylabel('Steady-state', color='g');\nln1, lb1 = ax1.get_legend_handles_labels()\nln2, lb2 = ax2.get_legend_handles_labels()\nplt.legend(ln1+ln2, lb1+lb2, loc='upper right');", "Time constants here are much shorter than for I_h\nTime constants are about five times shorter than in Fig 1 of Huguenard and McCormick, J Neurophysiol 68:1373, 1992, cited in [HT05], but that may be due to the fact that the original data was collected at 23-25C and parameters have been adjusted to 36C.\nSteady-state activation and inactivation look much like in Huguenard and McCormick.\nNote: Most detailed paper on data is Huguenard and Prince, J Neurosci 12:3804-3817, 1992. The parameters given for h_inf here are for VB cells, not nRT cells in that paper (Fig 5B), parameters for m_inf are similar to but not exactly those of Fig 4B for either VB or nRT.", "iT = IT(nest.GetDefaults('ht_neuron'))\nnr, cr = voltage_clamp(iT, [(200, -65.), (200, -80.), (200, -100.), (200, -90.), (200, -70.),\n (200, -55.)],\n nest_dt=0.1) \n\nplt.subplot(1, 2, 1)\nplt.plot(nr.times, nr.I_T, label='NEST');\nplt.plot(cr.times, cr.I_T, label='Control');\nplt.legend(loc='upper left');\nplt.xlabel('Time [ms]');\nplt.ylabel('I_T [mV]');\nplt.title('I_T current')\n\nplt.subplot(1, 2, 2)\nplt.plot(nr.times, (nr.I_T-cr.I_T)/np.abs(cr.I_T));\nplt.title('Relative I_T error')\nplt.xlabel('Time [ms]');\nplt.ylabel('Rel. error (NEST-Control)/|Control|');", "Also here the results are in good agreement and the error appears acceptable.\n\nI_NaP channel\nThis channel adapts instantaneously to changes in membrane potential:\n\\begin{align}\nI_{NaP} &= - g_{\\text{peak}, NaP} (m_{NaP}^{\\infty}(V, t))^3 (V-E_{NaP}) \\\nm_{NaP}^{\\infty}(V) &= \\frac{1}{1+\\exp\\left(-\\frac{V+55.7\\text{mV}}{7.7\\text{mV}}\\right)}\n\\end{align}", "nest.ResetKernel()\nclass INaP(Channel):\n \n nest_g = 'g_peak_NaP'\n nest_I = 'I_NaP'\n \n def __init__(self, ht_params):\n self.hp = ht_params\n \n def m_inf(self, V):\n return 1/(1+np.exp(-(V+55.7)/7.7))\n \n def compute_I(self, t, V, m0, h0, D0):\n return self.I_V_curve(V * np.ones_like(t)) \n\n def I_V_curve(self, V):\n self.m = self.m_inf(V)\n return - self.hp['g_peak_NaP'] * self.m**3 * (V - self.hp['E_rev_NaP'])\n\niNaP = INaP(nest.GetDefaults('ht_neuron'))\nV = np.arange(-110., 30., 1.)\nnr, cr = voltage_clamp(iNaP, [(1, v) for v in V], nest_dt=0.1)\n\nplt.subplot(1, 2, 1)\nplt.plot(nr.times, nr.I_NaP, label='NEST');\nplt.plot(cr.times, cr.I_NaP, label='Control');\nplt.legend(loc='upper left');\nplt.xlabel('Time [ms]');\nplt.ylabel('I_NaP [mV]');\nplt.title('I_NaP current')\n\nplt.subplot(1, 2, 2)\nplt.plot(nr.times, (nr.I_NaP-cr.I_NaP));\nplt.title('I_NaP error')\nplt.xlabel('Time [ms]');\nplt.ylabel('Error (NEST-Control)');", "Perfect agreement\nStep structure is because $V$ changes only every second.\n\nI_KNa channel (aka I_DK)\nEquations for this channel are\n\\begin{align}\nI_{DK} &= - g_{\\text{peak},DK} m_{DK}(V,t) (V - E_{DK})\\\n m_{DK} &= \\frac{1}{1 + \\left(\\frac{d_{1/2}}{D}\\right)^{3.5}}\\\n \\frac{dD}{dt} &= D_{\\text{influx}}(V) - \\frac{D-D_{\\text{eq}}}{\\tau_D} = \\frac{D_{\\infty}(V)-D}{\\tau_D} \\\n D_{\\infty}(V) &= \\tau_D D_{\\text{influx}}(V) + {D_{\\text{eq}}}\\\n D_{\\text{influx}} &= \\frac{D_{\\text{influx,peak}}}{1+ \\exp\\left(-\\frac{V-D_{\\theta}}{\\sigma_D}\\right)} \n\\end{align}\nwith \n|$D_{\\text{influx,peak}}$|$D_{\\text{eq}}$|$\\tau_D$|$D_{\\theta}$|$\\sigma_D$|$d_{1/2}$|\n| --: | --: | --: | --: | --: | --: |\n|$0.025\\text{ms}^{-1}$ |$0.001$|$1250\\text{ms}$|$-10\\text{mV}$|$5\\text{mV}$|$0.25$|\nNote the following:\n- $D_{eq}$ is the equilibrium value only for $D_{\\text{influx}}(V)=0$, i.e., in the limit $V\\to -\\infty$ and $t\\to\\infty$.\n- The actual steady-state value is $D_{\\infty}$.\n- $m_{DK}$ is a steep sigmoid which is almost 0 or 1 except for a narrow window around $d_{1/2}$.\n- To the left of this window, $I_{DK}\\approx 0$.\n- To the right of this window, $I_{DK}\\sim -(V-E_{DK})$.\n- $m_{DK}$ is not integrated over time, instead it is an instantaneous transform of $D$, which is integrated over time.", "nest.ResetKernel()\nclass IDK(Channel):\n \n nest_g = 'g_peak_KNa'\n nest_I = 'I_KNa'\n \n def __init__(self, ht_params):\n self.hp = ht_params\n \n def m_DK(self, D):\n return 1/(1+(0.25/D)**3.5)\n\n def D_inf(self, V):\n return 1250. * self.D_influx(V) + 0.001\n \n def D_influx(self, V):\n return 0.025 / ( 1 + np.exp(-(V+10)/5.) )\n \n def dD(self, D, t, V):\n return (self.D_inf(V) - D)/1250.\n \n def compute_I(self, t, V, m0, h0, D0):\n self.D = si.odeint(self.dD, D0, t, args=(V,))\n self.m = self.m_DK(self.D)\n return - self.hp['g_peak_KNa'] * self.m * (V - self.hp['E_rev_KNa'])", "Properties of I_DK", "iDK = IDK(nest.GetDefaults('ht_neuron'))\n\nD=np.linspace(0.01, 1.5,num=200);\nV=np.linspace(-110, 30, num=200);\n\nax1 = plt.subplot2grid((1, 9), (0, 0), colspan=4);\nax2 = ax1.twinx()\nax3 = plt.subplot2grid((1, 9), (0, 6), colspan=3);\n\nax1.plot(V, -iDK.m_DK(iDK.D_inf(V))*(V - iDK.hp['E_rev_KNa']), 'g');\nax1.set_ylabel('Current I_inf(V)', color='g');\nax2.plot(V, iDK.m_DK(iDK.D_inf(V)), 'b');\nax2.set_ylabel('Activation m_inf(D_inf(V))', color='b');\nax1.set_xlabel('Membrane potential V [mV]');\nax2.set_title('Steady-state activation and current');\n\nax3.plot(D, iDK.m_DK(D), 'b');\nax3.set_xlabel('D');\nax3.set_ylabel('Activation m_inf(D)', color='b');\nax3.set_title('Activation as function of D');", "Note that current in steady state is \n$\\approx 0$ for $V < -40$mV\n$\\sim -(V-E_{DK})$ for $V> -30$mV\n\n\n\nVoltage clamp", "nr, cr = voltage_clamp(iDK, [(500, -65.), (500, -35.), (500, -25.), (500, 0.), (5000, -70.)],\n nest_dt=1.) \n\nax1 = plt.subplot2grid((1, 9), (0, 0), colspan=4);\nax2 = plt.subplot2grid((1, 9), (0, 6), colspan=3);\n\nax1.plot(nr.times, nr.I_KNa, label='NEST');\nax1.plot(cr.times, cr.I_KNa, label='Control');\nax1.legend(loc='lower right');\nax1.set_xlabel('Time [ms]');\nax1.set_ylabel('I_DK [mV]');\nax1.set_title('I_DK current');\n\nax2.plot(nr.times, (nr.I_KNa-cr.I_KNa)/np.abs(cr.I_KNa));\nax2.set_title('Relative I_DK error')\nax2.set_xlabel('Time [ms]');\nax2.set_ylabel('Rel. error (NEST-Control)/|Control|');", "Looks very fine.\nNote that the current gets appreviable only when $V>-35$ mV\nOnce that threshold is crossed, the current adjust instantaneously to changes in $V$, since it is in the linear regime.\nWhen returning from $V=0$ to $V=-70$ mV, the current remains large for a long time since $D$ has to drop below 1 before $m_{\\infty}$ changes appreciably\n\nSynaptic channels\nFor synaptic channels, NEST allows recording of conductances, so we test conductances directly. Due to the voltage-dependence of the NMDA channels, we still do this in voltage clamp.", "nest.ResetKernel()\nclass SynChannel:\n \"\"\"\n Base class for synapse channel models in Python.\n \"\"\"\n\n def t_peak(self):\n return self.tau_1 * self.tau_2 / (self.tau_2 - self.tau_1) * np.log(self.tau_2/self.tau_1)\n \n def beta(self, t):\n val = ( ( np.exp(-t/self.tau_1) - np.exp(-t/self.tau_2) ) /\n ( np.exp(-self.t_peak()/self.tau_1) - np.exp(-self.t_peak()/self.tau_2) ) )\n val[t < 0] = 0\n return val\n\ndef syn_voltage_clamp(channel, DT_V_seq, nest_dt=0.1):\n \"Run voltage clamp with voltage V through intervals DT with single spike at time 1\"\n\n spike_time = 1.0\n delay = 1.0\n \n nest.ResetKernel()\n nest.SetKernelStatus({'resolution': nest_dt})\n try:\n nrn = nest.Create('ht_neuron', params={'theta': 1e6, 'theta_eq': 1e6,\n 'instant_unblock_NMDA': channel.instantaneous})\n except:\n nrn = nest.Create('ht_neuron', params={'theta': 1e6, 'theta_eq': 1e6})\n\n mm = nest.Create('multimeter', \n params={'record_from': ['g_'+channel.receptor],\n 'interval': nest_dt})\n sg = nest.Create('spike_generator', params={'spike_times': [spike_time]})\n nest.Connect(mm, nrn)\n nest.Connect(sg, nrn, syn_spec={'weight': 1.0, 'delay': delay,\n 'receptor_type': channel.rec_code})\n\n # ensure we start from equilibrated state\n nrn.set(V_m=DT_V_seq[0][1], equilibrate=True, voltage_clamp=True)\n for DT, V in DT_V_seq:\n nrn.set(V_m=V, voltage_clamp=True)\n nest.Simulate(DT)\n t_end = nest.GetKernelStatus('time')\n \n # simulate a little more so we get all data up to t_end to multimeter\n nest.Simulate(2 * nest.GetKernelStatus('min_delay'))\n \n tmp = pd.DataFrame(mm.get('events'))\n nest_res = tmp[tmp.times <= t_end]\n \n # Control part\n t_old = 0.\n t_all, g_all = [], []\n \n m_fast_old = (channel.m_inf(DT_V_seq[0][1]) \n if channel.receptor == 'NMDA' and not channel.instantaneous else None) \n m_slow_old = (channel.m_inf(DT_V_seq[0][1]) \n if channel.receptor == 'NMDA' and not channel.instantaneous else None) \n\n for DT, V in DT_V_seq:\n t_loc = np.arange(0., DT+0.1*nest_dt, nest_dt)\n g_loc = channel.g(t_old+t_loc-(spike_time+delay), V, m_fast_old, m_slow_old)\n t_all.extend(t_old + t_loc[1:])\n g_all.extend(g_loc[1:])\n m_fast_old = channel.m_fast[-1] if m_fast_old is not None else None\n m_slow_old = channel.m_slow[-1] if m_slow_old is not None else None\n t_old = t_all[-1]\n \n ctrl_res = pd.DataFrame({'times': t_all, 'g_'+channel.receptor: g_all})\n\n return nest_res, ctrl_res", "AMPA, GABA_A, GABA_B channels", "nest.ResetKernel()\nclass PlainChannel(SynChannel):\n def __init__(self, hp, receptor):\n self.hp = hp\n self.receptor = receptor\n self.rec_code = hp['receptor_types'][receptor]\n self.tau_1 = hp['tau_rise_'+receptor]\n self.tau_2 = hp['tau_decay_'+receptor]\n self.g_peak = hp['g_peak_'+receptor]\n self.E_rev = hp['E_rev_'+receptor]\n \n def g(self, t, V, mf0, ms0):\n return self.g_peak * self.beta(t)\n \n def I(self, t, V):\n return - self.g(t) * (V-self.E_rev)\n\nampa = PlainChannel(nest.GetDefaults('ht_neuron'), 'AMPA')\nam_n, am_c = syn_voltage_clamp(ampa, [(25, -70.)], nest_dt=0.1)\nplt.subplot(1, 2, 1);\nplt.plot(am_n.times, am_n.g_AMPA, label='NEST');\nplt.plot(am_c.times, am_c.g_AMPA, label='Control');\nplt.xlabel('Time [ms]');\nplt.ylabel('g_AMPA');\nplt.title('AMPA Channel');\nplt.subplot(1, 2, 2);\nplt.plot(am_n.times, (am_n.g_AMPA-am_c.g_AMPA)/am_c.g_AMPA);\nplt.xlabel('Time [ms]');\nplt.ylabel('Rel error');\nplt.title('AMPA rel error');", "Looks quite good, but the error is maybe a bit larger than one would hope.\nBut the synaptic rise time is short (0.5 ms) compared to the integration step in NEST (0.1 ms), which may explain the error.\nReducing the time step reduces the error:", "ampa = PlainChannel(nest.GetDefaults('ht_neuron'), 'AMPA')\nam_n, am_c = syn_voltage_clamp(ampa, [(25, -70.)], nest_dt=0.001)\nplt.subplot(1, 2, 1);\nplt.plot(am_n.times, am_n.g_AMPA, label='NEST');\nplt.plot(am_c.times, am_c.g_AMPA, label='Control');\nplt.xlabel('Time [ms]');\nplt.ylabel('g_AMPA');\nplt.title('AMPA Channel');\nplt.subplot(1, 2, 2);\nplt.plot(am_n.times, (am_n.g_AMPA-am_c.g_AMPA)/am_c.g_AMPA);\nplt.xlabel('Time [ms]');\nplt.ylabel('Rel error');\nplt.title('AMPA rel error');\n\ngaba_a = PlainChannel(nest.GetDefaults('ht_neuron'), 'GABA_A')\nga_n, ga_c = syn_voltage_clamp(gaba_a, [(50, -70.)])\nplt.subplot(1, 2, 1);\nplt.plot(ga_n.times, ga_n.g_GABA_A, label='NEST');\nplt.plot(ga_c.times, ga_c.g_GABA_A, label='Control');\nplt.xlabel('Time [ms]');\nplt.ylabel('g_GABA_A');\nplt.title('GABA_A Channel');\nplt.subplot(1, 2, 2);\nplt.plot(ga_n.times, (ga_n.g_GABA_A-ga_c.g_GABA_A)/ga_c.g_GABA_A);\nplt.xlabel('Time [ms]');\nplt.ylabel('Rel error');\nplt.title('GABA_A rel error');\n\ngaba_b = PlainChannel(nest.GetDefaults('ht_neuron'), 'GABA_B')\ngb_n, gb_c = syn_voltage_clamp(gaba_b, [(750, -70.)])\nplt.subplot(1, 2, 1);\nplt.plot(gb_n.times, gb_n.g_GABA_B, label='NEST');\nplt.plot(gb_c.times, gb_c.g_GABA_B, label='Control');\nplt.xlabel('Time [ms]');\nplt.ylabel('g_GABA_B');\nplt.title('GABA_B Channel');\nplt.subplot(1, 2, 2);\nplt.plot(gb_n.times, (gb_n.g_GABA_B-gb_c.g_GABA_B)/gb_c.g_GABA_B);\nplt.xlabel('Time [ms]');\nplt.ylabel('Rel error');\nplt.title('GABA_B rel error');", "Looks good for all\nFor GABA_B the error is negligible even for dt = 0.1, since the time constants are large.\n\nNMDA Channel\nThe equations for this channel are\n\\begin{align}\n \\bar{g}{\\text{NMDA}}(t) &= m(V, t) g{\\text{NMDA}}(t) m(V, t)\\ &= a(V) m_{\\text{fast}}^(V, t) + ( 1 - a(V) ) m_{\\text{slow}}^(V, t)\\\n a(V) &= 0.51 - 0.0028 V \\\n m^{\\infty}(V) &= \\frac{1}{ 1 + \\exp\\left( -S_{\\text{act}} ( V - V_{\\text{act}} ) \\right) } \\\n m_X^*(V, t) &= \\min(m^{\\infty}(V), m_X(V, t))\\\n \\frac{\\text{d}m_X}{\\text{d}t} &= \\frac{m^{\\infty}(V) - m_X }{ \\tau_{\\text{Mg}, X}}\n\\end{align} \nwhere $g_{\\text{NMDA}}(t)$ is the beta functions as for the other channels. In case of instantaneous unblocking, $m=m^{\\infty}$.\nNMDA with instantaneous unblocking", "class NMDAInstantChannel(SynChannel):\n def __init__(self, hp, receptor):\n self.hp = hp\n self.receptor = receptor\n self.rec_code = hp['receptor_types'][receptor]\n self.tau_1 = hp['tau_rise_'+receptor]\n self.tau_2 = hp['tau_decay_'+receptor]\n self.g_peak = hp['g_peak_'+receptor]\n self.E_rev = hp['E_rev_'+receptor]\n self.S_act = hp['S_act_NMDA']\n self.V_act = hp['V_act_NMDA']\n self.instantaneous = True\n \n def m_inf(self, V):\n return 1. / ( 1. + np.exp(-self.S_act*(V-self.V_act)))\n \n def g(self, t, V, mf0, ms0):\n return self.g_peak * self.m_inf(V) * self.beta(t)\n \n def I(self, t, V):\n return - self.g(t) * (V-self.E_rev)\n\nnmdai = NMDAInstantChannel(nest.GetDefaults('ht_neuron'), 'NMDA')\nni_n, ni_c = syn_voltage_clamp(nmdai, [(50, -60.), (50, -50.), (50, -20.), (50, 0.), (50, -60.)])\nplt.subplot(1, 2, 1);\nplt.plot(ni_n.times, ni_n.g_NMDA, label='NEST');\nplt.plot(ni_c.times, ni_c.g_NMDA, label='Control');\nplt.xlabel('Time [ms]');\nplt.ylabel('g_NMDA');\nplt.title('NMDA Channel (instant unblock)');\nplt.subplot(1, 2, 2);\nplt.plot(ni_n.times, (ni_n.g_NMDA-ni_c.g_NMDA)/ni_c.g_NMDA);\nplt.xlabel('Time [ms]');\nplt.ylabel('Rel error');\nplt.title('NMDA (inst) rel error');", "Looks good\nJumps are due to blocking/unblocking of Mg channels with changes in $V$\n\nNMDA with unblocking over time", "class NMDAChannel(SynChannel):\n def __init__(self, hp, receptor):\n self.hp = hp\n self.receptor = receptor\n self.rec_code = hp['receptor_types'][receptor]\n self.tau_1 = hp['tau_rise_'+receptor]\n self.tau_2 = hp['tau_decay_'+receptor]\n self.g_peak = hp['g_peak_'+receptor]\n self.E_rev = hp['E_rev_'+receptor]\n self.S_act = hp['S_act_NMDA']\n self.V_act = hp['V_act_NMDA']\n self.tau_fast = hp['tau_Mg_fast_NMDA']\n self.tau_slow = hp['tau_Mg_slow_NMDA']\n self.instantaneous = False\n \n def m_inf(self, V):\n return 1. / ( 1. + np.exp(-self.S_act*(V-self.V_act)) )\n \n def dm(self, m, t, V, tau):\n return ( self.m_inf(V) - m ) / tau\n\n def g(self, t, V, mf0, ms0):\n self.m_fast = si.odeint(self.dm, mf0, t, args=(V, self.tau_fast))\n self.m_slow = si.odeint(self.dm, ms0, t, args=(V, self.tau_slow))\n a = 0.51 - 0.0028 * V\n m_inf = self.m_inf(V)\n mfs = self.m_fast[:]\n mfs[mfs > m_inf] = m_inf\n mss = self.m_slow[:]\n mss[mss > m_inf] = m_inf\n m = np.squeeze(a * mfs + ( 1 - a ) * mss)\n return self.g_peak * m * self.beta(t)\n \n def I(self, t, V):\n raise NotImplementedError()\n\nnmda = NMDAChannel(nest.GetDefaults('ht_neuron'), 'NMDA')\nnm_n, nm_c = syn_voltage_clamp(nmda, [(50, -70.), (50, -50.), (50, -20.), (50, 0.), (50, -60.)])\nplt.subplot(1, 2, 1);\nplt.plot(nm_n.times, nm_n.g_NMDA, label='NEST');\nplt.plot(nm_c.times, nm_c.g_NMDA, label='Control');\nplt.xlabel('Time [ms]');\nplt.ylabel('g_NMDA');\nplt.title('NMDA Channel');\nplt.subplot(1, 2, 2);\nplt.plot(nm_n.times, (nm_n.g_NMDA-nm_c.g_NMDA)/nm_c.g_NMDA);\nplt.xlabel('Time [ms]');\nplt.ylabel('Rel error');\nplt.title('NMDA rel error');", "Looks fine, too.\n\nSynapse Model\nWe test the synapse model by placing it between two parrot neurons, sending spikes with differing intervals and compare to expected weights.", "nest.ResetKernel()\nsp = nest.GetDefaults('ht_synapse')\nP0 = sp['P']\ndP = sp['delta_P']\ntP = sp['tau_P']\nspike_times = [10., 12., 20., 20.5, 100., 200., 1000.]\nexpected = [(0., P0, P0)]\nfor idx, t in enumerate(spike_times):\n tlast, Psend, Ppost = expected[idx]\n Psend = 1 - (1-Ppost)*math.exp(-(t-tlast)/tP)\n expected.append((t, Psend, (1-dP)*Psend))\nexpected_weights = list(zip(*expected[1:]))[1]\n\nsg = nest.Create('spike_generator', params={'spike_times': spike_times})\nn = nest.Create('parrot_neuron', 2)\nwr = nest.Create('weight_recorder')\n\nnest.SetDefaults('ht_synapse', {'weight_recorder': wr, 'weight': 1.0})\nnest.Connect(sg, n[:1])\nnest.Connect(n[:1], n[1:], syn_spec='ht_synapse')\nnest.Simulate(1200)\n\nrec_weights = wr.get('events', 'weights')\n\nprint('Recorded weights:', rec_weights)\nprint('Expected weights:', expected_weights)\nprint('Difference :', np.array(rec_weights) - np.array(expected_weights))", "Perfect agreement, synapse model looks fine.\nIntegration test: Neuron driven through all synapses\nWe drive a Hill-Tononi neuron through pulse packets arriving at 1 second intervals, impinging through all synapse types. Compare this to Fig 5 of [HT05].", "nest.ResetKernel()\nnrn = nest.Create('ht_neuron')\nppg = nest.Create('pulsepacket_generator', n=4,\n params={'pulse_times': [700., 1700., 2700., 3700.],\n 'activity': 700, 'sdev': 50.})\npr = nest.Create('parrot_neuron', n=4)\nmm = nest.Create('multimeter', \n params={'interval': 0.1,\n 'record_from': ['V_m', 'theta',\n 'g_AMPA', 'g_NMDA',\n 'g_GABA_A', 'g_GABA_B',\n 'I_NaP', 'I_KNa', 'I_T', 'I_h']})\n\nweights = {'AMPA': 25., 'NMDA': 20., 'GABA_A': 10., 'GABA_B': 1.}\nreceptors = nest.GetDefaults('ht_neuron')['receptor_types']\n\nnest.Connect(ppg, pr, 'one_to_one')\nfor p, (rec_name, rec_wgt) in zip(pr, weights.items()):\n nest.Connect(p, nrn, syn_spec={'synapse_model': 'ht_synapse',\n 'receptor_type': receptors[rec_name],\n 'weight': rec_wgt})\nnest.Connect(mm, nrn)\n\nnest.Simulate(5000)\n\ndata = nest.GetStatus(mm)[0]['events']\nt = data['times']\ndef texify_name(name):\n return r'${}_{{\\mathrm{{{}}}}}$'.format(*name.split('_'))\n\nfig = plt.figure(figsize=(12,10))\n\nVax = fig.add_subplot(311)\nVax.plot(t, data['V_m'], 'k', lw=1, label=r'$V_m$')\nVax.plot(t, data['theta'], 'r', alpha=0.5, lw=1, label=r'$\\Theta$')\nVax.set_ylabel('Potential [mV]')\nVax.legend(fontsize='small')\nVax.set_title('ht_neuron driven by sinousiodal Poisson processes')\n\nIax = fig.add_subplot(312)\nfor iname, color in (('I_h', 'blue'), ('I_KNa', 'green'),\n ('I_NaP', 'red'), ('I_T', 'cyan')):\n Iax.plot(t, data[iname], color=color, lw=1, label=texify_name(iname))\n#Iax.set_ylim(-60, 60)\nIax.legend(fontsize='small')\nIax.set_ylabel('Current [mV]')\n\nGax = fig.add_subplot(313)\nfor gname, sgn, color in (('g_AMPA', 1, 'green'), ('g_GABA_A', -1, 'red'), \n ('g_GABA_B', -1, 'cyan'), ('g_NMDA', 1, 'magenta')):\n Gax.plot(t, sgn*data[gname], lw=1, label=texify_name(gname), color=color)\n#Gax.set_ylim(-150, 150)\nGax.legend(fontsize='small')\nGax.set_ylabel('Conductance')\nGax.set_xlabel('Time [ms]');" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
percyfal/bokeh
examples/howto/notebook_comms/Numba Image Example.ipynb
bsd-3-clause
[ "Interactive Image Processing with Numba and Bokeh\nThis demo shows off how interactive image processing can be done in the notebook, using Numba for numerics, Bokeh for plotting, and Ipython interactors for widgets. The demo runs entirely inside the Ipython notebook, with no Bokeh server required.\nNumba must be installed in order to run this demo. To run, click on, Cell-&gt;Run All in the top menu, then scroll down to individual examples and play around with their controls.", "from __future__ import print_function, division\n\nfrom timeit import default_timer as timer\n\nfrom bokeh.plotting import figure, show, output_notebook\nfrom bokeh.models import GlyphRenderer, LinearColorMapper\nfrom bokeh.io import push_notebook\nfrom numba import jit, njit\n\nfrom ipywidgets import interact\nimport numpy as np\nimport scipy.misc\n\noutput_notebook()", "Gaussian Blur\nThis first section demonstrates performing a simple Gaussian blur on an image. It presents the image, as well as a slider that controls how much blur is applied. Numba is used to compile the python blur kernel, which is invoked when the user modifies the slider. \nNote: This simple example does not handle the edge case, so the edge of the image will remain unblurred as the slider is increased.", "# smaller image\nimg_blur = (scipy.misc.ascent()[::-1,:]/255.0)[:250, :250].copy(order='C')\n\npalette = ['#%02x%02x%02x' %(i,i,i) for i in range(256)]\nwidth, height = img_blur.shape\np_blur = figure(x_range=(0, width), y_range=(0, height))\nr_blur = p_blur.image(image=[img_blur], x=[0], y=[0], dw=[width], dh=[height], palette=palette, name='blur')\n\n@njit\ndef blur(outimg, img, amt):\n iw, ih = img.shape\n for i in range(amt, iw-amt):\n for j in range(amt, ih-amt):\n px = 0.\n for w in range(-amt//2, amt//2):\n for h in range(-amt//2, amt//2):\n px += img[i+w, j+h]\n outimg[i, j]= px/(amt*amt)\n\ndef update(i=0):\n level = 2*i + 1\n \n out = img_blur.copy()\n \n ts = timer()\n blur(out, img_blur, level)\n te = timer()\n print('blur takes:', te - ts)\n \n renderer = p_blur.select(dict(name=\"blur\", type=GlyphRenderer))\n r_blur.data_source.data['image'] = [out]\n push_notebook(handle=t_blur)\n\nt_blur = show(p_blur, notebook_handle=True)\n\ninteract(update, i=(0, 10))", "3x3 Image Kernels\nMany image processing filters can be expressed as 3x3 matrices. This more sophisticated example demonstrates how numba can be used to compile kernels for arbitrary 3x3 kernels, and then provides serveral predefined kernels for the user to experiment with. \nThe UI presents the image to process (along with a dropdown to select a different image) as well as a dropdown that lets the user select which kernel to apply. Additioanlly there are sliders the permit adjustment to the bias and scale of the final greyscale image. \nNote: Right now, adjusting the scale and bias are not as efficient as possible, because the update function always also applies the kernel (even if it has not changed). A better implementation might have a class that keeps track of the current kernal and output image so that bias and scale can be applied by themselves.", "@jit\ndef getitem(img, x, y):\n w, h = img.shape\n if x >= w:\n x = w - 1 - (x - w)\n if y >= h:\n y = h - 1 - (y - h)\n return img[x, y]\n \ndef filter_factory(kernel):\n ksum = np.sum(kernel)\n if ksum == 0:\n ksum = 1\n k9 = kernel / ksum\n \n @jit\n def kernel_apply(img, out, x, y):\n tmp = 0\n for i in range(3):\n for j in range(3):\n tmp += img[x+i-1, y+j-1] * k9[i, j]\n out[x, y] = tmp\n \n @jit\n def kernel_apply_edge(img, out, x, y):\n tmp = 0\n for i in range(3):\n for j in range(3):\n tmp += getitem(img, x+i-1, y+j-1) * k9[i, j]\n out[x, y] = tmp\n \n @jit\n def kernel_k9(img, out):\n # Loop through all internals\n for x in range(1, img.shape[0] -1):\n for y in range(1, img.shape[1] -1):\n kernel_apply(img, out, x, y)\n \n # Loop through all the edges\n for x in range(img.shape[0]):\n kernel_apply_edge(img, out, x, 0)\n kernel_apply_edge(img, out, x, img.shape[1] - 1)\n \n for y in range(img.shape[1]):\n kernel_apply_edge(img, out, 0, y)\n kernel_apply_edge(img, out, img.shape[0] - 1, y)\n \n return kernel_k9\n\naverage = np.array([\n [1, 1, 1],\n [1, 1, 1],\n [1, 1, 1],\n], dtype=np.float32)\n\nsharpen = np.array([\n [-1, -1, -1],\n [-1, 12, -1],\n [-1, -1, -1],\n], dtype=np.float32)\n\nedge = np.array([\n [ 0, -1, 0],\n [-1, 4, -1],\n [ 0, -1, 0],\n], dtype=np.float32)\n\nedge_h = np.array([\n [ 0, 0, 0],\n [-1, 2, -1],\n [ 0, 0, 0],\n], dtype=np.float32)\n\nedge_v = np.array([\n [0, -1, 0],\n [0, 2, 0],\n [0, -1, 0],\n], dtype=np.float32)\n\ngradient_h = np.array([\n [-1, -1, -1],\n [ 0, 0, 0],\n [ 1, 1, 1],\n], dtype=np.float32)\n\ngradient_v = np.array([\n [-1, 0, 1],\n [-1, 0, 1],\n [-1, 0, 1],\n], dtype=np.float32)\n\nsobol_h = np.array([\n [ 1, 2, 1],\n [ 0, 0, 0],\n [-1, -2, -1],\n], dtype=np.float32)\n\nsobol_v = np.array([\n [-1, 0, 1],\n [-2, 0, 2],\n [-1, 0, 1],\n], dtype=np.float32)\n \nemboss = np.array([ \n [-2, -1, 0],\n [-1, 1, 1],\n [ 0, 1, 2],\n], dtype=np.float32)\n\nkernels = {\n \"average\" : filter_factory(average),\n \"sharpen\" : filter_factory(sharpen),\n \"edge (both)\" : filter_factory(edge),\n \"edge (horizontal)\" : filter_factory(edge_h),\n \"edge (vertical)\" : filter_factory(edge_v),\n \"gradient (horizontal)\" : filter_factory(gradient_h),\n \"gradient (vertical)\" : filter_factory(gradient_v),\n \"sobol (horizontal)\" : filter_factory(sobol_h),\n \"sobol (vertical)\" : filter_factory(sobol_v),\n \"emboss\" : filter_factory(emboss),\n}\n\nimages = {\n \"ascent\" : np.copy(scipy.misc.ascent().astype(np.float32)[::-1, :]),\n \"face\" : np.copy(scipy.misc.face(gray=True).astype(np.float32)[::-1, :]),\n}\n\npalette = ['#%02x%02x%02x' %(i,i,i) for i in range(256)]\ncm = LinearColorMapper(palette=palette, low=0, high=256)\nwidth, height = images['ascent'].shape\np_kernel = figure(x_range=(0, width), y_range=(0, height))\nr_kernel = p_kernel.image(image=[images['ascent']], x=[0], y=[0], dw=[width], dh=[height], color_mapper=cm, name=\"kernel\")\n\ndef update(image=\"ascent\", kernel_name=\"none\", scale=100, bias=0):\n global _last_kname\n global _last_out\n \n img_kernel = images.get(image)\n\n kernel = kernels.get(kernel_name, None)\n if kernel == None:\n out = np.copy(img_kernel)\n\n else:\n out = np.zeros_like(img_kernel)\n\n ts = timer()\n kernel(img_kernel, out)\n te = timer()\n print('kernel takes:', te - ts)\n\n out *= scale / 100.0\n out += bias\n\n r_kernel.data_source.data['image'] = [out]\n push_notebook(handle=t_kernel)\n\nt_kernel = show(p_kernel, notebook_handle=True)\n\nknames = [\"none\"] + sorted(kernels.keys())\ninteract(update, image=[\"ascent\" ,\"face\"], kernel_name=knames, scale=(10, 100, 10), bias=(0, 255))", "Wavelet Decomposition\nThis last example demostrates a Haar wavelet decomposition using a Numba-compiled function. Play around with the slider to see differnet levels of decomposition of the image.", "@njit\ndef wavelet_decomposition(img, tmp):\n \"\"\"\n Perform inplace wavelet decomposition on `img` with `tmp` as\n a temporarily buffer.\n\n This is a very simple wavelet for demonstration\n \"\"\"\n w, h = img.shape\n halfwidth, halfheight = w//2, h//2\n \n lefthalf, righthalf = tmp[:halfwidth, :], tmp[halfwidth:, :]\n \n # Along first dimension\n for x in range(halfwidth):\n for y in range(h):\n lefthalf[x, y] = (img[2 * x, y] + img[2 * x + 1, y]) / 2\n righthalf[x, y] = img[2 * x, y] - img[2 * x + 1, y]\n \n # Swap buffer\n img, tmp = tmp, img\n tophalf, bottomhalf = tmp[:, :halfheight], tmp[:, halfheight:]\n \n # Along second dimension\n for y in range(halfheight):\n for x in range(w):\n tophalf[x, y] = (img[x, 2 * y] + img[x, 2 * y + 1]) / 2\n bottomhalf[x, y] = img[x, 2 * y] - img[x, 2 * y + 1]\n \n return halfwidth, halfheight\n\nimg_wavelet = np.copy(scipy.misc.face(gray=True)[::-1, :])\n\npalette = ['#%02x%02x%02x' %(i,i,i) for i in range(256)]\nwidth, height = img_wavelet.shape\np_wavelet = figure(x_range=(0, width), y_range=(0, height))\nr_wavelet = p_wavelet.image(image=[img_wavelet], x=[0], y=[0], dw=[width], dh=[height], palette=palette, name=\"wavelet\")\n\ndef update(level=0):\n\n out = np.copy(img_wavelet)\n tmp = np.zeros_like(img_wavelet)\n\n ts = timer()\n hw, hh = img_wavelet.shape\n while level > 0 and hw > 1 and hh > 1:\n hw, hh = wavelet_decomposition(out[:hw, :hh], tmp[:hw, :hh])\n level -= 1\n te = timer()\n print('wavelet takes:', te - ts)\n\n r_wavelet.data_source.data['image'] = [out]\n push_notebook(handle=t_wavelet)\n\nt_wavelet = show(p_wavelet, notebook_handle=True)\n\ninteract(update, level=(0, 7))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
michaelaye/planetpy
notebooks/planetary constants.ipynb
bsd-3-clause
[ "Quick show on how to use the planetary constants module\nplanets_pretty shows a pretty table with the units included.", "from planetarypy.constants import planets_pretty\n\nplanets_pretty\n\nplanets_pretty.dtypes", "... while planets provides a dataframe that has an easier to use dataframe index with\n* units removed\n* spaces replaced by underscore\n* all lower case", "from planetarypy.constants import planets\n\nplanets\n\nplanets.dtypes", "One can also directly import a planet which will be a pandas Series and is in fact just a column of above table.", "from planetarypy.constants import mars\nmars" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
yl565/statsmodels
examples/notebooks/wls.ipynb
bsd-3-clause
[ "Weighted Least Squares", "%matplotlib inline\n\nfrom __future__ import print_function\nimport numpy as np\nfrom scipy import stats\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\nfrom statsmodels.sandbox.regression.predstd import wls_prediction_std\nfrom statsmodels.iolib.table import (SimpleTable, default_txt_fmt)\nnp.random.seed(1024)", "WLS Estimation\nArtificial data: Heteroscedasticity 2 groups\nModel assumptions:\n\nMisspecification: true model is quadratic, estimate only linear\nIndependent noise/error term\nTwo groups for error variance, low and high variance groups", "nsample = 50\nx = np.linspace(0, 20, nsample)\nX = np.column_stack((x, (x - 5)**2))\nX = sm.add_constant(X)\nbeta = [5., 0.5, -0.01]\nsig = 0.5\nw = np.ones(nsample)\nw[nsample * 6//10:] = 3\ny_true = np.dot(X, beta)\ne = np.random.normal(size=nsample)\ny = y_true + sig * w * e \nX = X[:,[0,1]]", "WLS knowing the true variance ratio of heteroscedasticity", "mod_wls = sm.WLS(y, X, weights=1./w)\nres_wls = mod_wls.fit()\nprint(res_wls.summary())", "OLS vs. WLS\nEstimate an OLS model for comparison:", "res_ols = sm.OLS(y, X).fit()\nprint(res_ols.params)\nprint(res_wls.params)", "Compare the WLS standard errors to heteroscedasticity corrected OLS standard errors:", "se = np.vstack([[res_wls.bse], [res_ols.bse], [res_ols.HC0_se], \n [res_ols.HC1_se], [res_ols.HC2_se], [res_ols.HC3_se]])\nse = np.round(se,4)\ncolnames = ['x1', 'const']\nrownames = ['WLS', 'OLS', 'OLS_HC0', 'OLS_HC1', 'OLS_HC3', 'OLS_HC3']\ntabl = SimpleTable(se, colnames, rownames, txt_fmt=default_txt_fmt)\nprint(tabl)", "Calculate OLS prediction interval:", "covb = res_ols.cov_params()\nprediction_var = res_ols.mse_resid + (X * np.dot(covb,X.T).T).sum(1)\nprediction_std = np.sqrt(prediction_var)\ntppf = stats.t.ppf(0.975, res_ols.df_resid)\n\nprstd_ols, iv_l_ols, iv_u_ols = wls_prediction_std(res_ols)", "Draw a plot to compare predicted values in WLS and OLS:", "prstd, iv_l, iv_u = wls_prediction_std(res_wls)\n\nfig, ax = plt.subplots(figsize=(8,6))\nax.plot(x, y, 'o', label=\"Data\")\nax.plot(x, y_true, 'b-', label=\"True\")\n# OLS\nax.plot(x, res_ols.fittedvalues, 'r--')\nax.plot(x, iv_u_ols, 'r--', label=\"OLS\")\nax.plot(x, iv_l_ols, 'r--')\n# WLS\nax.plot(x, res_wls.fittedvalues, 'g--.')\nax.plot(x, iv_u, 'g--', label=\"WLS\")\nax.plot(x, iv_l, 'g--')\nax.legend(loc=\"best\");", "Feasible Weighted Least Squares (2-stage FWLS)", "resid1 = res_ols.resid[w==1.]\nvar1 = resid1.var(ddof=int(res_ols.df_model)+1)\nresid2 = res_ols.resid[w!=1.]\nvar2 = resid2.var(ddof=int(res_ols.df_model)+1)\nw_est = w.copy()\nw_est[w!=1.] = np.sqrt(var2) / np.sqrt(var1)\nres_fwls = sm.WLS(y, X, 1./w_est).fit()\nprint(res_fwls.summary())" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ocelot-collab/ocelot
demos/ipython_tutorials/2_tracking.ipynb
gpl-3.0
[ "This notebook was created by Sergey Tomin (sergey.tomin@desy.de). Source and license info is on GitHub. January 2018.\nTutorial N2. Tracking.\nAs an example, we will use lattice file (converted to Ocelot format) of the European XFEL Injector. \nThis example will cover the following topics:\n\ncalculation of the linear optics for the European XFEL Injector.\nTracking of the particles in first and second order approximation without collective effects.\n\nCoordiantes\nCoordinates in Ocelot are following:\n$$ \\left (x, \\quad x' = \\frac{p_x}{p_0} \\right), \\qquad \\left (y, \\quad y' = \\frac{p_y}{p_0} \\right), \\qquad \\left (\\tau = c\\Delta t, \\quad p = \\frac{\\Delta E}{p_0 c} \\right)$$\nLet's have a look on the new variable $\\tau = c t - \\frac{s}{\\beta_0}$. $s$ is independent variable which is the distance along the beam line (which, in turn, is the path length of the reference particle) and $v_0$ is the velocity of the reference particle, $t$ is the time at which a particle arrives at the position $s$ along the beam line. For the reference particle $\\tau = 0$ for all $s$. A particle\narriving at a particular location at an earlier time than the reference particle has $\\tau < 0$, and a particle arriving later than the reference particle has $\\tau > 0$.\nRequirements\n\ninjector_lattice.py - input file, the Injector lattice.\nbeam_130MeV.ast - input file, initial beam distribution in ASTRA format.", "# the output of plotting commands is displayed inline within frontends, \n# directly below the code cell that produced it\n%matplotlib inline\n\n# this python library provides generic shallow (copy) and \n# deep copy (deepcopy) operations \nfrom copy import deepcopy\n\nimport time\n\n# import from Ocelot main modules and functions\nfrom ocelot import *\n\n# import from Ocelot graphical modules\nfrom ocelot.gui.accelerator import *\n\n# import injector lattice\nfrom injector_lattice import *", "If you want to see injector_lattice.py file you can run following command (lattice file is very large):\n$ %load injector_lattice.py\nThe variable cell contains all the elements of the lattice in right order.\nAnd again Ocelot will work with class MagneticLattice instead of simple sequence of element. So we have to run following command.", "lat = MagneticLattice(cell, stop=None)", "1. Design optics calculation of the European XFEL Injector\nRemark\nFor convenience reasons, we define optical functions starting at the gun by backtracking of the optical functions derived from ASTRA (or similar space charge code) at 130 MeV at the entrance to the first quadrupole. The optical functions we thus obtain have obviously nothing to do with the actual beam envelope form the gun to the 130 MeV point.\nBecause we work with linear accelerator we have to define initial energy and initial twiss paramters in order to get \ncorrect twiss functions along the Injector.", "# initialization of Twiss object\ntws0 = Twiss()\n# defining initial twiss parameters\ntws0.beta_x = 29.171\ntws0.beta_y = 29.171\ntws0.alpha_x = 10.955\ntws0.alpha_y = 10.955\n# defining initial electron energy in GeV\ntws0.E = 0.005 \n\n# calculate optical functions with initial twiss parameters\ntws = twiss(lat, tws0, nPoints=None)\n\n# ploting twiss paramentrs.\nplot_opt_func(lat, tws, top_plot=[\"Dx\", \"Dy\"], fig_name=\"i1\", legend=False)\nplt.show()", "2. Tracking in first and second order approximation without any collective effects\nRemark\nBecause of the reasons mentioned above, we start the beam tracking from the first quadrupole after RF cavities.\nLoading of beam distribution\nIn order to perform tracking we have to have beam distribution. We will load beam distribution from a ASTRA file ('beam_distrib.ast'). And we convert the Astra beam distribution to Ocelot format - ParticleArray. ParticleArray is designed for tracking.\nIn order to work with converters we have to import specific module from ocelot.adaptors\n\nfrom ocelot.adaptors.astra2ocelot import *\n\nAfter importing ocelot.adaptors.astra2ocelot we can use converter astraBeam2particleArray() to load and convert.\nAs you will see beam distribution consists of 200 000 particles (that is why loading can take a few second), charge 250 pC, initial energy is about 6.5 MeV.\nParticleArray is a class which includes several parameters and methods. \n* ParticleArray.rparticles is a 2D numpy array (6 x N) with coordinates of particles in \n$$ParticleArray.rparticles = [\\vec{x_0}, \\vec{x_1}, ..., \\vec{x_n}], $$ where $$\\vec{x_n} = (x_n, x_n', y_n, y_n', \\tau_n, p_n)^T$$\n* ParticleArray.s is the longitudinal coordinate of the reference particle in [m]. \n* ParticleArray.E is the energy of the reference particle in [GeV]. \n* ParticleArray.q_array - is a 1D numpy array of the charges each (macro) particles in [C]", "#from ocelot.adaptors.astra2ocelot import *\n#p_array_init = astraBeam2particleArray(filename='beam_130MeV.ast')\n#p_array_init = astraBeam2particleArray(filename='beam_130MeV_off_crest.ast')\n\n# save ParticleArray to compresssed numpy array \n#save_particle_array(\"tracking_beam.npz\", p_array_init)\np_array_init = load_particle_array(\"sc_beam.npz\")", "Selection of the tracking order and lattice for the tracking.\nMagneticLattice(sequence, start=None, stop=None, method=MethodTM()) have wollowing arguments: \n* sequence - list of the elements,\n* start - first element of the lattice. If None, then lattice starts from the first element of the sequence,\n* stop - last element of the lattice. If None, then lattice stops by the last element of the sequence,\n* method=MethodTM() - method of the tracking. MethodTM() class assigns transfer map to every element. By default all elements are assigned first order transfer map - TransferMap. One can create one's own map, but there are following predefined maps:\n - TransferMap - first order matrices. \n - SecondTM - 2nd order matrices.\n - KickTM - kick applyed.\n - RungeKuttaTM - Runge-Kutta integrator is applyed, but required 3D magnetic field function element.mag_field = lambda x, y, z: (Bx, By, Bz) (see example ocelot/demos/ebeam/tune_shift.py)", "# initialization of tracking method\nmethod = MethodTM()\n\n# for second order tracking we have to choose SecondTM \nmethod.global_method = SecondTM\n\n# for first order tracking uncomment next line\n# method.global_method = TransferMap\n\n# we start simulation from the first quadrupole (QI.46.I1) after RF section.\n# you can change stop element (and the start element, as well) \n# START_73_I1 - marker before Dog leg\n# START_96_I1 - marker before Bunch Compresion\nlat_t = MagneticLattice(cell, start=start_sim, stop=None, method=method)", "Tracking\nfor tracking we have to define following objects:\n\nNavigator is object which navigates the beam distribution (ParticleArray) throught the lattice. The Navigator knows with what step (atr: unit_step) the beam distribution will be tracked and knows where to apply one or another Physics Processes. \n\nIn order to add collective effects (Space charge, CSR or wake) method add_physics_proc() must be called:\n* add_physics_proc(physics_proc, elem1, elem2)\n - physics_proc - physics process, can be CSR, SpaceCharge, Wake etc\n - elem1 and elem2 - first and last elements between which the physics process will be applied.\n\nattributes:\nunit_step = 1 [m] (default value) - unit step for all physics processes \n\n\n\n<div class=\"alert alert-block alert-warning\">\n<b>Note:</b> unit_step is ignored if no PhysicsProcesses has been added (the PhysicsProc which applied as one kick is not counted, e.g. BeamTransform). In that case, the tracking is performed element by element. \n</div>\n\n\ntrack(lattice, p_array, navi, print_progress=True, calc_tws=True, bounds=None) \n\nthe function performs tracking of the particles [ParticleArray] through the lattice [MagneticLattice]. This function also calculates twiss parameters of the beam distribution on each tracking step (optional). \n\nlattice: Magnetic Lattice\np_array: ParticleArray\nnavi: Navigator\nprint_progress: True, print tracking progress\ncalc_tws: True, during the tracking twiss parameters are calculated from the beam distribution\nbounds: None, optional, [left_bound, right_bound] - bounds in units of std(p_array.tau()) to calculate twiss parameters of the particular beam slice. By default (bounds=None), twiss parameters are calculated for the whole beam.\nreturn: twiss list, ParticleArray. In case calc_tws=False, twiss_list is list of empty Twiss classes.", "navi = Navigator(lat_t)\np_array = deepcopy(p_array_init)\nstart = time.time()\ntws_track, p_array = track(lat_t, p_array, navi)\nprint(\"\\n time exec:\", time.time() - start, \"sec\")\n\n# you can change top_plot argument, for example top_plot=[\"alpha_x\", \"alpha_y\"]\nplot_opt_func(lat_t, tws_track, top_plot=[\"E\"], fig_name=0, legend=False)\nplt.show()", "Tracking with beam matching\nTo match beam with design optics we can use artificial matching - beam Transformation:\nBeamTransform(tws=Twiss())\nIn Twiss object beta, alpha functions as well as phase advances twiss.mux and twiss.muy (zero by default) also can be specified", "tw = Twiss()\ntw.beta_x = 2.36088\ntw.beta_y = 2.824\ntw.alpha_x = 1.2206\ntw.alpha_y = -1.35329\n\nbt = BeamTransform(tws=tw)\n\nnavi = Navigator(lat_t)\n\nnavi.unit_step = 1 # ignored in that case, tracking will performs element by element. \n # - there is no PhysicsProc along the lattice, \n # BeamTransform is aplied only once\n\nnavi.add_physics_proc(bt, OTRC_55_I1, OTRC_55_I1)\np_array = deepcopy(p_array_init)\nstart = time.time()\ntws_track, p_array = track(lat_t, p_array, navi)\nprint(\"\\n time exec:\", time.time() - start, \"sec\")\nplot_opt_func(lat_t, tws_track, top_plot=[\"E\"], fig_name=0, legend=False)\nplt.show()", "<div class=\"alert alert-block alert-warning\">\n<b>Note:</b> The function “track()” reruns twiss list (\"tws_track\") and ParticleArray (\"p_array\"). “p_array” is final ParticleArray. \"tws_track” is a list of Twiss objects where twiss parameters are calculated from the particle distribution. So, inside each Twiss object, there are twiss parameters (beta_x, alpha_x, beta_y, alpha_y) and emittances (emit_x, emit_y) as well as the first and second moments, e.g. $x\\cdot x$, $x\\cdot px$, $px \\cdot px$, $x \\cdot y$, $y \\cdot px$ and so on. It can be useful, if you want to see evolution of the beam during tracking with collective effects.\n</div>\n\nHere are examples how to retrieve the beam parameters from the twiss list.\nTo retrieve the trajectory of the beam (central of mass):\n\nx = [tw.x for tw in tws_track]\n\nOr size:\n\nsigma_x = np.sqrt([tw.xx for tw in tws_track])\n\nWhere tw.xx is the second moment. \nAlso, you can retrieve the beam length during beam tracking \n\nsigma_tau = np.sqrt([tw.tautau for tw in tws_track])\n\nAnd you need the longitudinal coordinate along the lattice:\n\ns = [tw.s for tw in tws_track]\n\nExample", "sigma_x = np.sqrt([tw.xx for tw in tws_track])\ns = [tw.s for tw in tws_track]\n\nplt.plot(s, sigma_x)\nplt.xlabel(\"s [m]\")\nplt.ylabel(r\"$\\sigma_x$, [m]\")\nplt.show()\n", "Beam distribution", "# the beam head is on left side \nshow_e_beam(p_array, figsize=(8,6))", "Explicit usage of matplotlib functions\nCurrent profile", "bins_start, hist_start = get_current(p_array, num_bins=200)\n\nplt.figure(4)\nplt.title(\"current: end\")\nplt.plot(bins_start*1000, hist_start)\nplt.xlabel(\"s, mm\")\nplt.ylabel(\"I, A\")\nplt.grid(True)\nplt.show()\n\ntau = np.array([p.tau for p in p_array])\ndp = np.array([p.p for p in p_array])\nx = np.array([p.x for p in p_array])\ny = np.array([p.y for p in p_array])\n\nax1 = plt.subplot(311)\n# inverse head and teil. The beam head is right side\nax1.plot(-tau*1000, x*1000, 'r.')\nplt.setp(ax1.get_xticklabels(), visible=False)\nplt.ylabel(\"x, mm\")\nplt.grid(True)\n\nax2 = plt.subplot(312, sharex=ax1)\nax2.plot(-tau*1000, y*1000, 'r.')\nplt.setp(ax2.get_xticklabels(), visible=False)\nplt.ylabel(\"y, mm\")\nplt.grid(True)\n\nax3 = plt.subplot(313, sharex=ax1)\nax3.plot(-tau*1000, dp, 'r.')\nplt.ylabel(\"dE/E\")\nplt.xlabel(\"s, mm\")\nplt.grid(True)\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ES-DOC/esdoc-jupyterhub
notebooks/ipsl/cmip6/models/sandbox-2/toplevel.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Toplevel\nMIP Era: CMIP6\nInstitute: IPSL\nSource ID: SANDBOX-2\nSub-Topics: Radiative Forcings. \nProperties: 85 (42 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-20 15:02:45\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'ipsl', 'sandbox-2', 'toplevel')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --&gt; Flux Correction\n3. Key Properties --&gt; Genealogy\n4. Key Properties --&gt; Software Properties\n5. Key Properties --&gt; Coupling\n6. Key Properties --&gt; Tuning Applied\n7. Key Properties --&gt; Conservation --&gt; Heat\n8. Key Properties --&gt; Conservation --&gt; Fresh Water\n9. Key Properties --&gt; Conservation --&gt; Salt\n10. Key Properties --&gt; Conservation --&gt; Momentum\n11. Radiative Forcings\n12. Radiative Forcings --&gt; Greenhouse Gases --&gt; CO2\n13. Radiative Forcings --&gt; Greenhouse Gases --&gt; CH4\n14. Radiative Forcings --&gt; Greenhouse Gases --&gt; N2O\n15. Radiative Forcings --&gt; Greenhouse Gases --&gt; Tropospheric O3\n16. Radiative Forcings --&gt; Greenhouse Gases --&gt; Stratospheric O3\n17. Radiative Forcings --&gt; Greenhouse Gases --&gt; CFC\n18. Radiative Forcings --&gt; Aerosols --&gt; SO4\n19. Radiative Forcings --&gt; Aerosols --&gt; Black Carbon\n20. Radiative Forcings --&gt; Aerosols --&gt; Organic Carbon\n21. Radiative Forcings --&gt; Aerosols --&gt; Nitrate\n22. Radiative Forcings --&gt; Aerosols --&gt; Cloud Albedo Effect\n23. Radiative Forcings --&gt; Aerosols --&gt; Cloud Lifetime Effect\n24. Radiative Forcings --&gt; Aerosols --&gt; Dust\n25. Radiative Forcings --&gt; Aerosols --&gt; Tropospheric Volcanic\n26. Radiative Forcings --&gt; Aerosols --&gt; Stratospheric Volcanic\n27. Radiative Forcings --&gt; Aerosols --&gt; Sea Salt\n28. Radiative Forcings --&gt; Other --&gt; Land Use\n29. Radiative Forcings --&gt; Other --&gt; Solar \n1. Key Properties\nKey properties of the model\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTop level overview of coupled model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of coupled model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Flux Correction\nFlux correction properties of the model\n2.1. Details\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how flux corrections are applied in the model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.flux_correction.details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Genealogy\nGenealogy and history of the model\n3.1. Year Released\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nYear the model was released", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3.2. CMIP3 Parent\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCMIP3 parent if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3.3. CMIP5 Parent\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCMIP5 parent if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3.4. Previous Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nPreviously known as", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Software Properties\nSoftware properties of model\n4.1. Repository\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nLocation of code for this component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Code Version\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCode version identifier.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Code Languages\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nCode language(s).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.4. Components Structure\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe how model realms are structured into independent software components (coupled via a coupler) and internal software components.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.5. Coupler\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nOverarching coupling framework for model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OASIS\" \n# \"OASIS3-MCT\" \n# \"ESMF\" \n# \"NUOPC\" \n# \"Bespoke\" \n# \"Unknown\" \n# \"None\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Coupling\n**\n5.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of coupling in the model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Atmosphere Double Flux\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the atmosphere passing a double flux to the ocean and sea ice (as opposed to a single one)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "5.3. Atmosphere Fluxes Calculation Grid\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nWhere are the air-sea fluxes calculated", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Atmosphere grid\" \n# \"Ocean grid\" \n# \"Specific coupler grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "5.4. Atmosphere Relative Winds\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre relative or absolute winds used to compute the flux? I.e. do ocean surface currents enter the wind stress calculation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Tuning Applied\nTuning methodology for model\n6.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics/diagnostics retained. Document the relative weight given to climate performance metrics/diagnostics versus process oriented metrics/diagnostics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.2. Global Mean Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList set of metrics/diagnostics of the global mean state used in tuning model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.3. Regional Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of regional metrics/diagnostics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.4. Trend Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList observed trend metrics/diagnostics used in tuning model/component (such as 20th century)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.5. Energy Balance\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how energy balance was obtained in the full system: in the various components independently or at the components coupling stage?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.6. Fresh Water Balance\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how fresh_water balance was obtained in the full system: in the various components independently or at the components coupling stage?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7. Key Properties --&gt; Conservation --&gt; Heat\nGlobal heat convervation properties of the model\n7.1. Global\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how heat is conserved globally", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. Atmos Ocean Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how heat is conserved at the atmosphere/ocean coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.3. Atmos Land Interface\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how heat is conserved at the atmosphere/land coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.4. Atmos Sea-ice Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how heat is conserved at the atmosphere/sea-ice coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.5. Ocean Seaice Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how heat is conserved at the ocean/sea-ice coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.6. Land Ocean Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how heat is conserved at the land/ocean coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8. Key Properties --&gt; Conservation --&gt; Fresh Water\nGlobal fresh water convervation properties of the model\n8.1. Global\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how fresh_water is conserved globally", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Atmos Ocean Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how fresh_water is conserved at the atmosphere/ocean coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.3. Atmos Land Interface\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how fresh water is conserved at the atmosphere/land coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.4. Atmos Sea-ice Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how fresh water is conserved at the atmosphere/sea-ice coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.5. Ocean Seaice Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how fresh water is conserved at the ocean/sea-ice coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.6. Runoff\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe how runoff is distributed and conserved", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.7. Iceberg Calving\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how iceberg calving is modeled and conserved", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.8. Endoreic Basins\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how endoreic basins (no ocean access) are treated", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.9. Snow Accumulation\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe how snow accumulation over land and over sea-ice is treated", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Key Properties --&gt; Conservation --&gt; Salt\nGlobal salt convervation properties of the model\n9.1. Ocean Seaice Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how salt is conserved at the ocean/sea-ice coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Key Properties --&gt; Conservation --&gt; Momentum\nGlobal momentum convervation properties of the model\n10.1. Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how momentum is conserved in the model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11. Radiative Forcings\nRadiative forcings of the model for historical and scenario (aka Table 12.1 IPCC AR5)\n11.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of radiative forcings (GHG and aerosols) implementation in model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12. Radiative Forcings --&gt; Greenhouse Gases --&gt; CO2\nCarbon dioxide forcing\n12.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Radiative Forcings --&gt; Greenhouse Gases --&gt; CH4\nMethane forcing\n13.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14. Radiative Forcings --&gt; Greenhouse Gases --&gt; N2O\nNitrous oxide forcing\n14.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15. Radiative Forcings --&gt; Greenhouse Gases --&gt; Tropospheric O3\nTroposheric ozone forcing\n15.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16. Radiative Forcings --&gt; Greenhouse Gases --&gt; Stratospheric O3\nStratospheric ozone forcing\n16.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17. Radiative Forcings --&gt; Greenhouse Gases --&gt; CFC\nOzone-depleting and non-ozone-depleting fluorinated gases forcing\n17.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.2. Equivalence Concentration\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDetails of any equivalence concentrations used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"Option 1\" \n# \"Option 2\" \n# \"Option 3\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.3. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18. Radiative Forcings --&gt; Aerosols --&gt; SO4\nSO4 aerosol forcing\n18.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "18.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19. Radiative Forcings --&gt; Aerosols --&gt; Black Carbon\nBlack carbon aerosol forcing\n19.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "19.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "20. Radiative Forcings --&gt; Aerosols --&gt; Organic Carbon\nOrganic carbon aerosol forcing\n20.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "21. Radiative Forcings --&gt; Aerosols --&gt; Nitrate\nNitrate forcing\n21.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "21.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22. Radiative Forcings --&gt; Aerosols --&gt; Cloud Albedo Effect\nCloud albedo effect forcing (RFaci)\n22.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22.2. Aerosol Effect On Ice Clouds\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRadiative effects of aerosols on ice clouds are represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "22.3. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "23. Radiative Forcings --&gt; Aerosols --&gt; Cloud Lifetime Effect\nCloud lifetime effect forcing (ERFaci)\n23.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.2. Aerosol Effect On Ice Clouds\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRadiative effects of aerosols on ice clouds are represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "23.3. RFaci From Sulfate Only\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRadiative forcing from aerosol cloud interactions from sulfate aerosol only?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "23.4. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "24. Radiative Forcings --&gt; Aerosols --&gt; Dust\nDust forcing\n24.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "24.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "25. Radiative Forcings --&gt; Aerosols --&gt; Tropospheric Volcanic\nTropospheric volcanic forcing\n25.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.2. Historical Explosive Volcanic Aerosol Implementation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in historical simulations", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.3. Future Explosive Volcanic Aerosol Implementation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in future simulations", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.4. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "26. Radiative Forcings --&gt; Aerosols --&gt; Stratospheric Volcanic\nStratospheric volcanic forcing\n26.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "26.2. Historical Explosive Volcanic Aerosol Implementation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in historical simulations", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "26.3. Future Explosive Volcanic Aerosol Implementation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in future simulations", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "26.4. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "27. Radiative Forcings --&gt; Aerosols --&gt; Sea Salt\nSea salt forcing\n27.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "27.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "28. Radiative Forcings --&gt; Other --&gt; Land Use\nLand use forcing\n28.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "28.2. Crop Change Only\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nLand use change represented via crop change only?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "28.3. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "29. Radiative Forcings --&gt; Other --&gt; Solar\nSolar forcing\n29.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow solar forcing is provided", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"irradiance\" \n# \"proton\" \n# \"electron\" \n# \"cosmic ray\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "29.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
jakevdp/nfft
notebooks/Benchmarks.ipynb
mit
[ "Benchmarks\nThis notebook contains benchmarks against the pynfft package, which is a Python wrapper of Fortran code", "import numpy as np\n\n# Import from main directory\nimport os, sys\nsys.path.append(os.path.abspath('..'))\n\nimport nfft\nnfft.__version__\n\nimport pynfft\npynfft.__version__", "Benchmarking the Forward Transform\nDefine some test data:", "def make_forward_data(M, N):\n x = -0.5 + np.random.rand(M)\n f_hat = np.random.randn(N) + 1j * np.random.randn(N)\n return x, f_hat", "Define a utility function around pynfft:", "def pynfft_forward(x, f_hat):\n M = len(x)\n N = len(f_hat)\n plan = pynfft.nfft.NFFT(N, M)\n plan.x = x\n plan.precompute()\n plan.f_hat = f_hat\n # Need copy because of bug in pynfft 1.x\n # See https://github.com/ghisvail/pyNFFT/issues/57\n return plan.trafo().copy()\n\nx, f_hat = make_forward_data(1000, 100000)\n\nout1 = nfft.nfft(x, f_hat)\nout2 = pynfft_forward(x, f_hat)\nnp.allclose(out1, out2)\n\n%timeit nfft.nfft(x, f_hat)\n%timeit pynfft_forward(x, f_hat)", "Benchmarking the Adjoint Transform\nDefine some test data:", "def make_adjoint_data(M):\n x = -0.5 + np.random.rand(M)\n f = np.random.randn(M) + 1j * np.random.randn(M)\n return x, f", "Define a utility function around pynfft:", "def pynfft_adjoint(x, f, N):\n M = len(x)\n plan = pynfft.nfft.NFFT(N, M)\n plan.x = x\n plan.precompute()\n plan.f = f\n # Need copy because of bug in pynfft 1.x\n # See https://github.com/ghisvail/pyNFFT/issues/57\n return plan.adjoint().copy()\n\nx, f = make_adjoint_data(1000)\nN = 100000\n\nout1 = nfft.nfft_adjoint(x, f, N)\nout2 = pynfft_adjoint(x, f, N)\n\nnp.allclose(out1, out2)\n\n%timeit nfft.nfft_adjoint(x, f, N, sigma=3)\n%timeit pynfft_adjoint(x, f, N)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tuanavu/python-cookbook-3rd
notebooks/ch01/12_determine_the_top_n_items_occurring_in_a_list.ipynb
mit
[ "Determining the Most Frequently Occurring Items in a Sequence\nProblem\n\nDetermine the most frequently occurring items in the sequence.\n\nSolution\n\nmost_common() method in collections.Counter", "words = [\n 'look', 'into', 'my', 'eyes', 'look', 'into', 'my', 'eyes',\n 'the', 'eyes', 'the', 'eyes', 'the', 'eyes', 'not', 'around', 'the',\n 'eyes', \"don't\", 'look', 'around', 'the', 'eyes', 'look', 'into',\n 'my', 'eyes', \"you're\", 'under'\n]\n\nfrom collections import Counter\nword_counts = Counter(words)\ntop_three = word_counts.most_common(3)\nprint(top_three)\n# outputs [('eyes', 8), ('the', 5), ('look', 4)]\n\nprint(word_counts['not'])\nprint(word_counts['eyes'])", "Discussion\n\nIncrement the count manually", "# morewords = ['why','are','you','not','looking','in','my','eyes']\n# for word in morewords:\n# word_counts[word] += 1", "Update word counts using update()", "morewords = ['why','are','you','not','looking','in','my','eyes']\nword_counts.update(morewords)\nprint(word_counts.most_common(3))", "You can use Counter to do mathematical operations.", "a = Counter(words)\nb = Counter(morewords)\n\nprint(a)\nprint(b)\n\n# Combine counts\nc = a + b\nc\n\n# Subtract counts\nd = a - b\nd" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
jasag/Phytoliths-recognition-system
code/notebooks/Phytoliths_Classifier/Phytoliths_Recognition.ipynb
bsd-3-clause
[ "<div Style=\"text-align: center;line-height: 30px;font-size:32px;font-weight: bold\"> Reconocimiento automático de fitolitos</div>\n\nEn este notebook comprobaremos el reconocimiento de fitolitos en una imagen ejemplo, para facilmente comprobar el portencial de nuestro clasificador.", "# Imports\nimport pickle\n\n%matplotlib inline \n#para dibujar en el propio notebook\nimport numpy as np #numpy como np\nimport matplotlib.pyplot as plt #matplotlib como plot\n\nfrom skimage import io\nfrom skimage.transform import rescale\nfrom skimage.color import rgb2gray\nfrom skimage.io import imshow\n\nfrom skimage.feature import daisy", "Obtenemos nuestro clasificador", "path = '../../rsc/obj/'\n\ncls_path = path + 'cls.sav'\ncluster_path = path + 'cluster.sav'\n\ncls = pickle.load(open(cls_path, 'rb'))\ncluster = pickle.load(open(cluster_path, 'rb'))", "Obtenemos la imagen ejemplo", "img_path = '../../rsc/img/Default/2017_5_17_17_54Image_746.jpg'\n# img_path = '../../rsc/img/Default/2017_5_17_18_17Image_803.jpg'\n# img_path = '../../rsc/img/Default/2017_5_17_16_38Image_483.jpg'\n# img_path = '../../rsc/img/Default/2017_5_17_18_9Image_7351.jpg'\n# img_path = '../../rsc/img/Default/2017_5_17_15_27Image_100.jpg'\nimage = rescale(io.imread(img_path), 0.5)\nimage = rgb2gray(image)\n\nimshow(image)", "Definimos algunas funciones necesarias", "def predict_image(imgTest):\n global cluster\n global cls\n \n num_centers = len(cluster.cluster_centers_)\n testInstances = []\n \n \n features = daisy(imgTest)\n numFils, numCols, sizeDesc = features.shape\n features = features.reshape((numFils*numCols,sizeDesc))\n \n \n \n pertenencias=cluster.predict(features)\n # extrae histograma\n bovw_representation, _ = np.histogram(pertenencias, bins=num_centers, range=(0,num_centers-1))\n # añade al conjunto de entrenamiento final\n testInstances.append(bovw_representation)\n \n testInstances = np.array(testInstances)\n \n return cls.predict_proba(testInstances)\n\ndef sliding_window(image, alto,ancho, stepSize):\n # slide a window across the image\n for y in range(0, image.shape[0], stepSize):\n for x in range(0, image.shape[1], stepSize):\n # yield the current window\n #print(alto,ancho)\n #print(image[y:y + alto, x:x + ancho].shape)\n yield (x, y, image[y:y + alto, x:x + ancho])\n \n \ndef predict_window(img):\n probs = predict_image(img)[0]\n #print(probs,probs[0]>0.5)\n #if probs[0]>0.5:\n # plt.imshow(img)\n \n return probs[1]\n\n\n#stepSize=60\nstepSize=40\n# stepSize=70\n# alto=280\n# ancho=280\nalto=150\nancho=150\nwindowSize = (alto,ancho)\n\ndef predict_sliding_window(image, alto,ancho, stepSize):\n #print(alto,ancho, stepSize)\n \n probs = []\n predichas = []\n \n i = 0\n # loop over the sliding window for each layer of the pyramid\n n_winds = 0 \n for (x, y, window) in sliding_window(image, alto, ancho, stepSize):\n #print(window.shape,alto,ancho)\n if window.shape[0] != alto or window.shape[1] != ancho:\n continue\n n_winds+=1\n \n print(\"hay \",n_winds,\" ventanas\")\n \n for (x, y, window) in sliding_window(image, alto,ancho, stepSize):\n # if the window does not meet our desired window size, ignore it\n if window.shape[0] != alto or window.shape[1] != ancho:\n continue \n \n \n \n i+=1 \n if i%10==0:\n print(\"Procesada ventana \"+str(i)+\" de \"+str(n_winds),end=\"\\r\")\n \n # THIS IS WHERE YOU WOULD PROCESS YOUR WINDOW, SUCH AS APPLYING A\n # MACHINE LEARNING CLASSIFIER TO CLASSIFY THE CONTENTS OF THE\n # WINDOW\n prob = predict_window(window)\n if prob>0.5:\n \n probs.append(prob) \n # x1 ,y1, x2, y2\n box = (x, y, x+ancho, y+alto)\n predichas.append(box)\n \n return probs,np.array(predichas)\n\ndef non_max_suppression(boxes, probs=None, overlapThresh=0.3):\n # if there are no boxes, return an empty list\n if len(boxes) == 0:\n return []\n\n # if the bounding boxes are integers, convert them to floats -- this\n # is important since we'll be doing a bunch of divisions\n if boxes.dtype.kind == \"i\":\n boxes = boxes.astype(\"float\")\n\n # initialize the list of picked indexes\n pick = []\n\n # grab the coordinates of the bounding boxes\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n\n # compute the area of the bounding boxes and grab the indexes to sort\n # (in the case that no probabilities are provided, simply sort on the\n # bottom-left y-coordinate)\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n idxs = y2\n\n # if probabilities are provided, sort on them instead\n if probs is not None:\n idxs = probs\n\n # sort the indexes\n idxs = np.argsort(idxs)\n\n # keep looping while some indexes still remain in the indexes list\n while len(idxs) > 0:\n # grab the last index in the indexes list and add the index value\n # to the list of picked indexes\n last = len(idxs) - 1\n i = idxs[last]\n pick.append(i)\n\n # find the largest (x, y) coordinates for the start of the bounding\n # box and the smallest (x, y) coordinates for the end of the bounding\n # box\n xx1 = np.maximum(x1[i], x1[idxs[:last]])\n yy1 = np.maximum(y1[i], y1[idxs[:last]])\n xx2 = np.minimum(x2[i], x2[idxs[:last]])\n yy2 = np.minimum(y2[i], y2[idxs[:last]])\n\n # compute the width and height of the bounding box\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n\n # compute the ratio of overlap\n overlap = (w * h) / area[idxs[:last]]\n\n # delete all indexes from the index list that have overlap greater\n # than the provided overlap threshold\n idxs = np.delete(idxs, np.concatenate(([last],\n np.where(overlap > overlapThresh)[0])))\n\n # return only the bounding boxes that were picked\n return boxes[pick].astype(\"int\")\n\nprobs0,predichas0 = predict_sliding_window(image, alto,ancho, stepSize)\n\nprobs0 = np.array(probs0)\npredichas0 = np.array(predichas0)\npredichas0 = predichas0[probs0>0.995]\nprobs0 = probs0[probs0>0.995]\nboxes = non_max_suppression(predichas0, probs=probs0, overlapThresh=0.3)\nprint(boxes)\n\nimport matplotlib.patches as patches\n\nfig = plt.figure(figsize=(5, 5))\n\nax = fig.add_subplot(111)\nax.imshow(image, cmap=plt.get_cmap('gray'))\n\nfor box in boxes:\n # Create a Rectangle patch\n rect = patches.Rectangle((box[0],box[1]),\n box[2]-box[0],\n box[3]-box[1],\n linewidth=1,edgecolor='g',facecolor='none')\n\n # Add the patch to the Axes\n ax.add_patch(rect)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
IACS-CS-207/cs207-F17
lectures/L7/L7.ipynb
mit
[ "Lecture 7\nSoftware design, documentation, and testing\nDesign of a program\nFrom the Practice of Programming:\n\nThe essence of design is to balance competing goals and constraints. Although there may be many tradeoffs when one is writing a small self-contained system, the ramifications of particular choices remain within the system and affect only the individual programmer. But when code is to be used by others, decisions have wider repercussions.\n\nSoftware Design Desirables\n\nDocumentation\nnames (understandable names)\npre+post conditions or requirements\n\n\nMaintainability\nExtensibility\nModularity and Encapsulation\n\n\nPortability\nInstallability\nGenerality\nData Abstraction (change types, change data structures)\nFunctional Abstraction (the object model, overloading)\nRobustness\nProvability: Invariants, preconditions, postconditions\nUser Proofing, Adversarial Inputs \n\n\n\n\nEfficiency\nUse of appropriate algorithms and data structures\nOptimization (but no premature optimization)\n\n\n\nIssues to be aware of:\n\nInterfaces\n\nYour program is being designed to be used by someone: either an end user, another programmer, or even yourself. This interface is a contract between you and the user.\n\nHiding Information\n\nThere is information hiding between layers (a higher up layer can be more abstract). Encapsulation, abstraction, and modularization, are some of the techniques used here.\n\nResource Management\n\nResource management issues: who allocates storage for data structures. Generally we want resource allocation/deallocation to happen in the same layer.\n\nHow to Deal with Errors\n\nDo we return special values? Do we throw exceptions? Who handles them?\nInterface principles\nInterfaces should:\n\nhide implementation details\nhave a small set of operations exposed, the smallest possible, and these should be orthogonal. Be stingy with the user.\nbe transparent with the user in what goes on behind the scenes\nbe consistent internally: library functions should have similar signature, classes similar methods, and external programs should have the same cli flags\n\n Testing should deal with ALL of the issues above, and each layer ought to be tested separately . \nTesting\nThere are different kinds of tests inspired by the interface principles just described.\n\n\nacceptance tests verify that a program meets a customer's expectations. In a sense these are a test of the interface to the customer: does the program do everything you promised the customer it would do?\n\n\nunit tests are tests which test a unit of the program for use by another unit. These could test the interface for a client, but they must also test the internal functions that you want to use.\n\n\nExploratory testing, regression testing, and integration testing are done in both of these categories, with the latter trying to combine layers and subsystems, not necessarily at the level of an entire application. \nOne can also performance test, random and exploratorily test, and stress test a system (to create adversarial situations).\nDocumentation\nDocumentation is a contract between a user (client) and an implementor (library writer).\nWrite good documentation\n\nFollow standards of PEP 257\nClearly outline the inputs, outputs, default values, and expected behavior\nInclude basic usage examples when possible", "def quad_roots(a=1.0, b=2.0, c=0.0):\n \"\"\"Returns the roots of a quadratic equation: ax^2 + bx + c = 0.\n \n INPUTS\n =======\n a: float, optional, default value is 1\n Coefficient of quadratic term\n b: float, optional, default value is 2\n Coefficient of linear term\n c: float, optional, default value is 0\n Constant term\n \n RETURNS\n ========\n roots: 2-tuple of complex floats\n Has the form (root1, root2) unless a = 0 \n in which case a ValueError exception is raised\n \n EXAMPLES\n =========\n >>> quad_roots(1.0, 1.0, -12.0)\n ((3+0j), (-4+0j))\n \"\"\"\n import cmath # Can return complex numbers from square roots\n if a == 0:\n raise ValueError(\"The quadratic coefficient is zero. This is not a quadratic equation.\")\n else:\n sqrtdisc = cmath.sqrt(b * b - 4.0 * a * c)\n r1 = -b + sqrtdisc\n r2 = -b - sqrtdisc\n return (r1 / 2.0 / a, r2 / 2.0 / a)", "Documenting Invariants\n\nAn invariant is something that is true at some point in the code.\nInvariants and the contract are what we use to guide our implementation.\nPre-conditions and post-conditions are special cases of invariants.\nPre-conditions are true at function entry. They constrain the user.\nPost-conditions are true at function exit. They constrain the implementation.\n\nYou can change implementations, stuff under the hood, etc, but once the software is in the wild you can't change the pre-conditions and post-conditions since the client user is depending upon them.", "def quad_roots(a=1.0, b=2.0, c=0.0):\n \"\"\"Returns the roots of a quadratic equation: ax^2 + bx + c.\n \n INPUTS\n =======\n a: float, optional, default value is 1\n Coefficient of quadratic term\n b: float, optional, default value is 2\n Coefficient of linear term\n c: float, optional, default value is 0\n Constant term\n \n RETURNS\n ========\n roots: 2-tuple of complex floats\n Has the form (root1, root2) unless a = 0 \n in which case a ValueError exception is raised\n\n NOTES\n =====\n PRE: \n - a, b, c have numeric type\n - three or fewer inputs\n POST:\n - a, b, and c are not changed by this function\n - raises a ValueError exception if a = 0\n - returns a 2-tuple of roots\n\n EXAMPLES\n =========\n >>> quad_roots(1.0, 1.0, -12.0)\n ((3+0j), (-4+0j))\n \"\"\"\n import cmath # Can return complex numbers from square roots\n if a == 0:\n raise ValueError(\"The quadratic coefficient is zero. This is not a quadratic equation.\")\n else:\n sqrtdisc = cmath.sqrt(b * b - 4.0 * a * c)\n r1 = -b + sqrtdisc\n r2 = -b - sqrtdisc\n return (r1 / 2.0 / a, r2 / 2.0 / a)", "Accessing Documentation (1)\n\nDocumentation can be accessed by calling the __doc__ special method\nSimply calling function_name.__doc__ will give a pretty ugly output\nYou can make it cleaner by making use of splitlines()", "quad_roots.__doc__.splitlines()", "Accessing Documentation (2)\nA nice way to access the documentation is to use the pydoc module.", "import pydoc\npydoc.doc(quad_roots)", "Testing\nThere are different kinds of tests inspired by the interface principles just described.\n\n\nacceptance tests verify that a program meets a customer's expectations. In a sense these are a test of the interface to the customer: does the program do everything you promised the customer it would do?\n\n\nunit tests are tests which test a unit of the program for use by another unit. These could test the interface for a client, but they must also test the internal functions that you want to use.\n\n\nExploratory testing, regression testing, and integration testing are done in both of these categories, with the latter trying to combine layers and subsystems, not necessarily at the level of an entire application. \nOne can also performance test, random and exploratorily test, and stress test a system (to create adversarial situations).\nTesting of a program\nTest as you write your program.\nThis is so important that I repeat it.\nTest as you go.\nFrom The Practice of Programming:\n\nThe effort of testing as you go is minimal and pays off handsomely. Thinking about testing as you write a program will lead to better code, because that's when you know best what the code should do. If instead you wait until something breaks, you will probably have forgotten how the code works. Working under pressure, you will need to figure it out again, which takes time, and the fixes will be less thorough and more fragile because your refreshed understanding is likely to be incomplete.\n\nTest Driven Develoment\ndoctest\nThe doctest module allows us to test pieces of code that we put into our doc. string.\nThe doctests are a type of unit test, which document the interface of the function by example.\nDoctests are an example of a test harness. We write some tests and execute them all at once. Note that individual tests can be written and executed individually in an ad-hoc manner. However, that is especially inefficient.\nOf course, too many doctests clutter the documentation section.\nThe doctests should not cover every case; they should describe the various ways a class or function can be used. There are better ways to do more comprehensive testing.", "import doctest\ndoctest.testmod(verbose=True)", "Principles of Testing\n\nTest simple parts first\nTest code at its boundaries\nThe idea is that most errors happen at data boundaries such as empty input, single input item, exactly full array, wierd values, etc. If a piece of code works at the boundaries, its likely to work elsewhere...\nProgram defensively\n\"Program defensively. A useful technique is to add code to handle \"can't happen\" cases, situations where it is not logically possible for something to happen but (because of some failure elsewhere) it might anyway. As an example, a program processing grades might expect that there would be no negative or huge values but should check anyway.\n\n\nAutomate using a test harness\nTest incrementally\n\nTest simple parts first:\nA test for the quad_roots function:", "def test_quadroots():\n assert quad_roots(1.0, 1.0, -12.0) == ((3+0j), (-4+0j))\n\ntest_quadroots()", "Test at the boundaries\nHere we write a test to handle the crazy case in which the user passes strings in as the coefficients.", "def test_quadroots_types():\n try:\n quad_roots(\"\", \"green\", \"hi\")\n except TypeError as err:\n assert(type(err) == TypeError)\n\ntest_quadroots_types()", "We can also check to make sure the $a=0$ case is handled okay:", "def test_quadroots_zerocoeff():\n try:\n quad_roots(a=0.0)\n except ValueError as err:\n assert(type(err) == ValueError)\n\ntest_quadroots_zerocoeff()", "When you get an error\nIt could be that:\n\nyou messed up an implementation\nyou did not handle a case\nyour test was messed up (be careful of this)\n\nIf the error was not found in an existing test, create a new test that represents the problem before you do anything else. The test should capture the essence of the problem: this process itself is useful in uncovering bugs. Then this error may even suggest more tests.\nAutomate Using a Test Harness\nGreat! So we've written some ad-hoc tests. It's pretty clunky. We should use a test harness.\nAs mentioned already, doctest is a type of test harness. It has it's uses, but gets messy quickly.\nWe'll talk about pytest here.\nPreliminaries\n\nThe idea is that our code consists of several different pieces (or objects)\nThe objects are grouped based on how they are related to each other\ne.g. you may have a class that contains different statistical operations\nWe'll get into this idea much more in the coming weeks\nFor now, we can think of having related functions all in one file\nWe want to test each of those functions\nTests should include checking correctness of output, correctness of input, fringe cases, etc\n\nI will work in the Jupyter notebook for demo purposes.\nTo create and save a file in the Jupyter notebook, you type %%file file_name.py.\nI highly recommend that you actually write your code using a text editor (like vim) or an IDE like Sypder.\nThe toy examples that we've been working with in the class so far can be done in Jupyter, but a real project can be done more efficiently through other means.", "%%file roots.py\ndef quad_roots(a=1.0, b=2.0, c=0.0):\n \"\"\"Returns the roots of a quadratic equation: ax^2 + bx + c = 0.\n \n INPUTS\n =======\n a: float, optional, default value is 1\n Coefficient of quadratic term\n b: float, optional, default value is 2\n Coefficient of linear term\n c: float, optional, default value is 0\n Constant term\n \n RETURNS\n ========\n roots: 2-tuple of complex floats\n Has the form (root1, root2) unless a = 0 \n in which case a ValueError exception is raised\n \n EXAMPLES\n =========\n >>> quad_roots(1.0, 1.0, -12.0)\n ((3+0j), (-4+0j))\n \"\"\"\n import cmath # Can return complex numbers from square roots\n if a == 0:\n raise ValueError(\"The quadratic coefficient is zero. This is not a quadratic equation.\")\n else:\n sqrtdisc = cmath.sqrt(b * b - 4.0 * a * c)\n r1 = -b + sqrtdisc\n r2 = -b - sqrtdisc\n return (r1 / 2.0 / a, r2 / 2.0 / a)", "Let's put our tests into one file.", "%%file test_roots.py\nimport roots\n\ndef test_quadroots_result():\n assert roots.quad_roots(1.0, 1.0, -12.0) == ((3+0j), (-4+0j))\n\ndef test_quadroots_types():\n try:\n roots.quad_roots(\"\", \"green\", \"hi\")\n except TypeError as err:\n assert(type(err) == TypeError)\n\ndef test_quadroots_zerocoeff():\n try:\n roots.quad_roots(a=0.0)\n except ValueError as err:\n assert(type(err) == ValueError)\n\n!pytest", "Code Coverage\nIn some sense, it would be nice to somehow check that every line in a program has been covered by a test. If you could do this, you might know that a particular line has not contributed to making something wrong. But this is hard to do: it would be hard to use normal input data to force a program to go through particular statements. So we settle for testing the important lines. The pytest-cov module makes sure that this works.\nCoverage does not mean that every edge case has been tried, but rather, every critical statement has been.\nLet's add a new function to our roots file.", "%%file roots.py\ndef linear_roots(a=1.0, b=0.0):\n \"\"\"Returns the roots of a linear equation: ax+ b = 0.\n \n INPUTS\n =======\n a: float, optional, default value is 1\n Coefficient of linear term\n b: float, optional, default value is 0\n Coefficient of constant term\n \n RETURNS\n ========\n roots: 1-tuple of real floats\n Has the form (root) unless a = 0 \n in which case a ValueError exception is raised\n \n EXAMPLES\n =========\n >>> linear_roots(1.0, 2.0)\n -2.0\n \"\"\"\n if a == 0:\n raise ValueError(\"The linear coefficient is zero. This is not a linear equation.\")\n else:\n return ((-b / a))\n\ndef quad_roots(a=1.0, b=2.0, c=0.0):\n \"\"\"Returns the roots of a quadratic equation: ax^2 + bx + c = 0.\n \n INPUTS\n =======\n a: float, optional, default value is 1\n Coefficient of quadratic term\n b: float, optional, default value is 2\n Coefficient of linear term\n c: float, optional, default value is 0\n Constant term\n \n RETURNS\n ========\n roots: 2-tuple of complex floats\n Has the form (root1, root2) unless a = 0 \n in which case a ValueError exception is raised\n \n EXAMPLES\n =========\n >>> quad_roots(1.0, 1.0, -12.0)\n ((3+0j), (-4+0j))\n \"\"\"\n import cmath # Can return complex numbers from square roots\n if a == 0:\n raise ValueError(\"The quadratic coefficient is zero. This is not a quadratic equation.\")\n else:\n sqrtdisc = cmath.sqrt(b * b - 4.0 * a * c)\n r1 = -b + sqrtdisc\n r2 = -b - sqrtdisc\n return (r1 / 2.0 / a, r2 / 2.0 / a)", "Run the tests and check code coverage", "!pytest --cov", "Run the tests, report code coverage, and report missing lines.", "!pytest --cov --cov-report term-missing", "Run tests, including the doctests, report code coverage, and report missing lines.", "!pytest --doctest-modules --cov --cov-report term-missing", "Let's put some tests in for the linear roots function.", "%%file test_roots.py\nimport roots\n\ndef test_quadroots_result():\n assert roots.quad_roots(1.0, 1.0, -12.0) == ((3+0j), (-4+0j))\n\ndef test_quadroots_types():\n try:\n roots.quad_roots(\"\", \"green\", \"hi\")\n except TypeError as err:\n assert(type(err) == TypeError)\n\ndef test_quadroots_zerocoeff():\n try:\n roots.quad_roots(a=0.0)\n except ValueError as err:\n assert(type(err) == ValueError)\n\ndef test_linearoots_result():\n assert roots.linear_roots(2.0, -3.0) == 1.5\n\ndef test_linearroots_types():\n try:\n roots.linear_roots(\"ocean\", 6.0)\n except TypeError as err:\n assert(type(err) == TypeError)\n\ndef test_linearroots_zerocoeff():\n try:\n roots.linear_roots(a=0.0)\n except ValueError as err:\n assert(type(err) == ValueError)", "Now run the tests and check code coverage.", "!pytest --doctest-modules --cov --cov-report term-missing" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ioam/holoviews
examples/user_guide/Deploying_Bokeh_Apps.ipynb
bsd-3-clause
[ "Deploying Bokeh Apps", "import numpy as np\nimport holoviews as hv\nhv.extension('bokeh')", "Purpose\nHoloViews is an incredibly convenient way of working interactively and exploratively within a notebook or commandline context. However, once you have implemented a polished interactive dashboard or some other complex interactive visualization, you will often want to deploy it outside the notebook to share with others who may not be comfortable with the notebook interface. \nIn the simplest case, to visualize some HoloViews container or element obj, you can export it to a standalone HTML file for sharing using the save function of the Bokeh renderer:\nhv.save(obj, 'out.html')\nThis command will generate a file out.html that you can put on any web server, email directly to colleagues, etc.; it is fully self-contained and does not require any Python server to be installed or running. \nUnfortunately, a static approach like this cannot support any HoloViews object that uses DynamicMap (either directly or via operations that return DynamicMaps like decimate, datashade, and rasterize). Anything with DynamicMap requires a live, running Python server to dynamically select and provide the data for the various parameters that can be selected by the user. Luckily, when you need a live Python process during the visualization, the Bokeh server provides a very convenient way of deploying HoloViews plots and interactive dashboards in a scalable and flexible manner. The Bokeh server allows all the usual interactions that HoloViews lets you define and more including:\n\nresponding to plot events and tool interactions via Linked Streams\ngenerating and interacting with plots via the usual widgets that HoloViews supports for HoloMap and DynamicMap objects.\nusing periodic and timeout events to drive plot updates\ncombining HoloViews plots with custom Bokeh plots to quickly write highly customized apps.\n\nOverview\nIn this guide we will cover how we can deploy a Bokeh app from a HoloViews plot in a number of different ways:\n\n\nInline from within the Jupyter notebook\n\n\nStarting a server interactively and open it in a new browser window.\n\n\nFrom a standalone script file\n\n\nCombining HoloViews and Bokeh models to create a more customized app\n\n\nIf you have read a bit about HoloViews you will know that HoloViews objects are not themselves plots, instead they contain sufficient data and metadata allowing them to be rendered automatically in a notebook context. In other words, when a HoloViews object is evaluated a backend specific Renderer converts the HoloViews object into Bokeh models, a Matplotlib figure or a Plotly graph. This intermediate representation is then rendered as an image or as HTML with associated Javascript, which is what ends up being displayed.\nThe workflow\nThe most convenient way to work with HoloViews is to iteratively improve a visualization in the notebook. Once you have developed a visualization or dashboard that you would like to deploy you can use the BokehRenderer to export the visualization as illustrated above, or you can deploy it as a Bokeh server app. \nHere we will create a small interactive plot, using Linked Streams, which mirrors the points selected using box- and lasso-select tools in a second plot and computes some statistics:", "# Declare some points\npoints = hv.Points(np.random.randn(1000,2 ))\n\n# Declare points as source of selection stream\nselection = hv.streams.Selection1D(source=points)\n\n# Write function that uses the selection indices to slice points and compute stats\ndef selected_info(index):\n arr = points.array()[index]\n if index:\n label = 'Mean x, y: %.3f, %.3f' % tuple(arr.mean(axis=0))\n else:\n label = 'No selection'\n return points.clone(arr, label=label).opts(color='red')\n\n# Combine points and DynamicMap\nselected_points = hv.DynamicMap(selected_info, streams=[selection])\nlayout = points.opts(tools=['box_select', 'lasso_select']) + selected_points\n\nlayout", "<img src='https://assets.holoviews.org/gifs/examples/streams/bokeh/point_selection1d.gif'></img>\nWorking with the BokehRenderer\nWhen working with Bokeh server or wanting to manipulate a backend specific plot object you will have to use a HoloViews Renderer directly to convert the HoloViews object into the backend specific representation. Therefore we will start by getting a hold of a BokehRenderer:", "renderer = hv.renderer('bokeh')\nprint(renderer)", "python\nBokehRenderer()\nAll Renderer classes in HoloViews are so called ParameterizedFunctions; they provide both classmethods and instance methods to render an object. You can easily create a new Renderer instance using the .instance method:", "renderer = renderer.instance(mode='server')", "Renderers can also have different modes. In this case we will instantiate the renderer in 'server' mode, which tells the Renderer to render the HoloViews object to a format that can easily be deployed as a server app. Before going into more detail about deploying server apps we will quickly remind ourselves how the renderer turns HoloViews objects into Bokeh models.\nFigures\nThe BokehRenderer converts the HoloViews object to a HoloViews Plot, which holds the Bokeh models that will be rendered to screen. As a very simple example we can convert a HoloViews Image to a HoloViews plot:", "hvplot = renderer.get_plot(layout)\nprint(hvplot)", "&lt;LayoutPlot LayoutPlot01811&gt;\nUsing the state attribute on the HoloViews plot we can access the Bokeh Column model, which we can then work with directly.", "hvplot.state", "Column(id='1570', ...)\nIn the background this is how HoloViews converts any HoloViews object into Bokeh models, which can then be converted to embeddable or standalone HTML and be rendered in the browser. This conversion is usually done in the background using the figure_data method:", "html = renderer._figure_data(hvplot, 'html')", "Bokeh Documents\nIn Bokeh the Document is the basic unit at which Bokeh models (such as plots, layouts and widgets) are held and serialized. The serialized JSON representation is then sent to BokehJS on the client-side browser. When in 'server' mode the BokehRenderer will automatically return a server Document:", "renderer(layout)", "(&lt;bokeh.document.Document at 0x11afc7590&gt;,\n {'file-ext': 'html', 'mime_type': u'text/html'})\nWe can also easily use the server_doc method to get a Bokeh Document, which does not require you to make an instance in 'server' mode.", "doc = renderer.server_doc(layout)\ndoc.title = 'HoloViews App'", "In the background however, HoloViews uses the Panel library to render components to a Bokeh model which can be rendered in the notebook, to a file or on a server:", "import panel as pn\n\nmodel = pn.panel(layout).get_root()\nmodel", "For more information on the interaction between Panel and HoloViews see the the Panel documentation.\nDeploying with panel serve\nDeployment from a script with panel serve is one of the most common ways to deploy a Bokeh app. Any .py or .ipynb file that attaches a plot to Bokeh's curdoc can be deployed using panel serve. The easiest way to do this is using wrapping the HoloViews component in Panel using pn.panel(hvobj) and then calling the panel_obj.servable() method, which accepts any HoloViews object ensures that the plot is discoverable by Panel and the underlying Bokeh server. See below to see a full standalone script:\n```python\nimport numpy as np\nimport panel as pn\nimport holoviews as hv\nimport holoviews.plotting.bokeh\npoints = hv.Points(np.random.randn(1000,2 )).opts(tools=['box_select', 'lasso_select'])\nselection = hv.streams.Selection1D(source=points)\ndef selected_info(index):\n arr = points.array()[index]\n if index:\n label = 'Mean x, y: %.3f, %.3f' % tuple(arr.mean(axis=0))\n else:\n label = 'No selection'\n return points.clone(arr, label=label).opts(color='red')\nlayout = points + hv.DynamicMap(selected_info, streams=[selection])\npn.panel(layout).servable(title='HoloViews App')\n```\nIn just a few steps we can iteratively refine in the notebook to a deployable Panel app. Note also that we can also deploy an app directly from a notebook. By using .servable() in a notebook any regular .ipynb file can be made into a valid Panel/Bokeh app, which can be served with panel serve example.ipynb.\nIt is also possible to create a Bokeh Document more directly working with the underlying Bokeh representation instead. This in itself is sufficient to make the plot servable using bokeh serve:", "hv.renderer('bokeh').server_doc(layout)", "In addition to starting a server from a script we can also start up a server interactively, so let's do a quick deep dive into Bokeh Application and Server objects and how we can work with them from within HoloViews.\nBokeh Server\nTo start a Bokeh server directly from a notebook we can also use Panel, specifically we'll use the panel.serve function. We'll define a DynamicMap of a sine Curve varying by frequency, phase and an offset and then create a server instance using Panel:", "def sine(frequency, phase, amplitude):\n xs = np.linspace(0, np.pi*4)\n return hv.Curve((xs, np.sin(frequency*xs+phase)*amplitude)).opts(width=800)\n\nranges = dict(frequency=(1, 5), phase=(-np.pi, np.pi), amplitude=(-2, 2), y=(-2, 2))\ndmap = hv.DynamicMap(sine, kdims=['frequency', 'phase', 'amplitude']).redim.range(**ranges)\n\nserver = pn.serve(dmap, start=False, show=False)", "&lt;bokeh.server.server.Server object at 0x10b3a0510&gt;\nNext we can define a callback on the IOLoop that will open the server app in a new browser window and actually start the app (and if outside the notebook the IOLoop):", "server.start()\nserver.show('/')\n\n# Outside the notebook ioloop needs to be started\n# from tornado.ioloop import IOLoop\n# loop = IOLoop.current()\n# loop.start() ", "After running the cell above you should have noticed a new browser window popping up displaying our plot. Once you are done playing with it you can stop it with:", "server.stop()", "We can achieve the equivalent using the .show method on a Panel object:", "server = pn.panel(dmap).show()", "<img width='80%' src=\"https://assets.holoviews.org/gifs/guides/user_guide/Deploying_Bokeh_Apps/bokeh_server_new_window.png\"></img>\nWe will once again stop this Server before continuing:", "server.stop()", "Inlining apps in the notebook\nInstead of displaying our app in a new browser window we can also display an app inline in the notebook simply by using the .app method on Panel object. The server app will be killed whenever you rerun or delete the cell that contains the output. Additionally, if your Jupyter Notebook server is not running on the default address or port (localhost:8888) supply the websocket origin, which should match the first part of the URL of your notebook:", "pn.panel(dmap).app('localhost:8888')", "<img width='80%' src='https://assets.holoviews.org/gifs/guides/user_guide/Deploying_Bokeh_Apps/bokeh_server_inline_simple.gif'></img>\nPeriodic callbacks\nOne of the most important features of deploying apps is the ability to attach asynchronous, periodic callbacks, which update the plot. The simplest way of achieving this is to attach a Counter stream on the plot which is incremented on each callback. As a simple demo we'll simply compute a phase offset from the counter value, animating the sine wave:", "def sine(counter):\n phase = counter*0.1%np.pi*2\n xs = np.linspace(0, np.pi*4)\n return hv.Curve((xs, np.sin(xs+phase))).opts(width=800)\n\ncounter = hv.streams.Counter()\ndmap = hv.DynamicMap(sine, streams=[counter])\n\ndmap_pane = pn.panel(dmap)\n\ndmap_pane.app('localhost:8891')", "<img width='80%' src='https://assets.holoviews.org/gifs/guides/user_guide/Deploying_Bokeh_Apps/bokeh_server_periodic.gif'></img>\nOnce we have created a Panel object we can call the add_periodic_callback method to set up a periodic callback. The first argument to the method is the callback and the second argument period specified in milliseconds. As soon as we start this callback you should see the Curve above become animated.", "def update():\n counter.event(counter=counter.counter+1)\n\ncb = dmap_pane.add_periodic_callback(update, period=200)", "Once started we can stop and start it at will using the .stop and .start methods:", "cb.stop()", "Combining Bokeh Application and Flask Application\nWhile Panel and Bokeh are great ways to create an application often we want to leverage the simplicity of a Flask server. With Flask we can easily embed a HoloViews, Bokeh and Panel application in a regular website. The main idea for getting Bokeh and Flask to work together is to run both apps on ports and then use Flask to pull the Bokeh Serve session with pull_session from bokeh.client.session.", "def sine(frequency, phase, amplitude):\n xs = np.linspace(0, np.pi*4)\n return hv.Curve((xs, np.sin(frequency*xs+phase)*amplitude)).options(width=800)\n\nranges = dict(frequency=(1, 5), phase=(-np.pi, np.pi), amplitude=(-2, 2), y=(-2, 2))\ndmap = hv.DynamicMap(sine, kdims=['frequency', 'phase', 'amplitude']).redim.range(**ranges)\n\npn.serve(dmap, websocket_origin='localhost:5000', port=5006, show=False)", "We run load up our dynamic map into a Bokeh Application with the parameter allow_websocket_origin=[\"localhost:5000\"]\n```python\nfrom bokeh.client import pull_session\nfrom bokeh.embed import server_session\nfrom flask import Flask, render_template\nfrom flask import send_from_directory\napp = Flask(name)\nlocally creates a page\n@app.route('/')\ndef index():\n with pull_session(url=\"http://localhost:5006/\") as session:\n # generate a script to load the customized session\n script = server_session(session_id=session.id, url='http://localhost:5006')\n # use the script in the rendered page\n return render_template(\"embed.html\", script=script, template=\"Flask\")\nif name == 'main':\n # runs app in debug mode\n app.run(port=5000, debug=True)\n```\nNote that in a notebook context we cannot use pull_session but this example demonstrates how we can embed the Bokeh server inside a simple flask app.\nThis is an example of a basic flask app. To find out more about Flask a tutorial can be found on the Flask Quickstart Guide. \nBelow is an example of a basic Flask App that pulls from the Bokeh Application. The Bokeh Application is using Server from Bokeh and IOLoop from tornado to run the app. \n```python\nholoviews.py\nimport holoviews as hv\nimport panel as pn\nimport numpy as np\nhv.extension('bokeh')\ndef sine(frequency, phase, amplitude):\n xs = np.linspace(0, np.pi4)\n return hv.Curve((xs, np.sin(frequencyxs+phase)*amplitude)).options(width=800)\nif name == 'main':\n ranges = dict(frequency=(1, 5), phase=(-np.pi, np.pi), amplitude=(-2, 2), y=(-2, 2))\n dmap = hv.DynamicMap(sine, kdims=['frequency', 'phase', 'amplitude']).redim.range(**ranges)\n pn.serve(dmap, port=5006, allow_websocket_origin=[\"localhost:5000\"], show=False)\n```\n```python\nflaskApp.py\nfrom bokeh.client import pull_session\nfrom bokeh.embed import server_session\nfrom flask import Flask, render_template\nfrom flask import send_from_directory\napp = Flask(name)\nlocally creates a page\n@app.route('/')\ndef index():\n with pull_session(url=\"http://localhost:5006/\") as session:\n # generate a script to load the customized session\n script = server_session(session_id=session.id, url='http://localhost:5006')\n # use the script in the rendered page\n return render_template(\"embed.html\", script=script, template=\"Flask\")\nif name == 'main':\n # runs app in debug mode\n app.run(port=5000, debug=True)\n```\n```html\n<!-- embed.html -->\n\n<!doctype html>\n\n<html lang=\"en\">\n<head>\n <meta charset=\"utf-8\">\n <title>Embedding a Bokeh Server With Flask</title>\n</head>\n<body>\n <div>\n This Bokeh app below served by a Bokeh server that has been embedded\n in another web app framework. For more information see the section\n <a target=\"_blank\" href=\"https://bokeh.pydata.org/en/latest/docs/user_guide/server.html#embedding-bokeh-server-as-a-library\">Embedding Bokeh Server as a Library</a>\n in the User's Guide.\n </div>\n {{ script|safe }}\n</body>\n</html>\n```\nIf you wish to replicate navigate to the examples/gallery/apps/flask directory and follow the these steps:\n\nStep One: call python holoviews_app.py in the terminal (this will start the Panel/Bokeh server)\nStep Two: open a new terminal and call python flask_app.py (this will start the Flask application)\nStep Three: go to web browser and type localhost:5000 and the app will appear\n\nCombining HoloViews and Panel or Bokeh Plots/Widgets\nWhile HoloViews provides very convenient ways of creating an app it is not as fully featured as Bokeh itself is. Therefore we often want to extend a HoloViews based app with Panel or Bokeh plots and widgets. Here we will discover to achieve this with both Panel and then the equivalent using pure Bokeh.", "import holoviews as hv\nimport numpy as np\nimport panel as pn\n\n# Create the holoviews app again\ndef sine(phase):\n xs = np.linspace(0, np.pi*4)\n return hv.Curve((xs, np.sin(xs+phase))).opts(width=800)\n\nstream = hv.streams.Stream.define('Phase', phase=0.)()\ndmap = hv.DynamicMap(sine, streams=[stream])\n\nstart, end = 0, np.pi*2\nslider = pn.widgets.FloatSlider(start=start, end=end, value=start, step=0.2, name=\"Phase\")\n\n# Create a slider and play buttons\ndef animate_update():\n year = slider.value + 0.2\n if year > end:\n year = start\n slider.value = year\n\ndef slider_update(event):\n # Notify the HoloViews stream of the slider update \n stream.event(phase=event.new)\n\nslider.param.watch(slider_update, 'value')\n\ndef animate(event):\n if button.name == '► Play':\n button.name = '❚❚ Pause'\n callback.start()\n else:\n button.name = '► Play'\n callback.stop()\n\nbutton = pn.widgets.Button(name='► Play', width=60, align='end')\nbutton.on_click(animate)\ncallback = button.add_periodic_callback(animate_update, 50, start=False)\n\napp = pn.Column(\n dmap,\n pn.Row(slider, button)\n)\n\napp", "If instead we want to deploy this we could add .servable as discussed before or use pn.serve. Note however that when using pn.serve all sessions will share the same state therefore it is best to \nwrap the creation of the app in a function which we can then provide to pn.serve. For more detail on deploying Panel applications also see the Panel server deployment guide.\nNow we can reimplement the same example using Bokeh allowing us to compare and contrast the approaches:", "import numpy as np\nimport holoviews as hv\n\nfrom bokeh.io import show, curdoc\nfrom bokeh.layouts import layout\nfrom bokeh.models import Slider, Button\n\nrenderer = hv.renderer('bokeh').instance(mode='server')\n\n# Create the holoviews app again\ndef sine(phase):\n xs = np.linspace(0, np.pi*4)\n return hv.Curve((xs, np.sin(xs+phase))).opts(width=800)\n\nstream = hv.streams.Stream.define('Phase', phase=0.)()\ndmap = hv.DynamicMap(sine, streams=[stream])\n\n# Define valid function for FunctionHandler\n# when deploying as script, simply attach to curdoc\ndef modify_doc(doc):\n # Create HoloViews plot and attach the document\n hvplot = renderer.get_plot(dmap, doc)\n\n # Create a slider and play buttons\n def animate_update():\n year = slider.value + 0.2\n if year > end:\n year = start\n slider.value = year\n\n def slider_update(attrname, old, new):\n # Notify the HoloViews stream of the slider update \n stream.event(phase=new)\n \n start, end = 0, np.pi*2\n slider = Slider(start=start, end=end, value=start, step=0.2, title=\"Phase\")\n slider.on_change('value', slider_update)\n \n callback_id = None\n\n def animate():\n global callback_id\n if button.label == '► Play':\n button.label = '❚❚ Pause'\n callback_id = doc.add_periodic_callback(animate_update, 50)\n else:\n button.label = '► Play'\n doc.remove_periodic_callback(callback_id)\n button = Button(label='► Play', width=60)\n button.on_click(animate)\n \n # Combine the holoviews plot and widgets in a layout\n plot = layout([\n [hvplot.state],\n [slider, button]], sizing_mode='fixed')\n \n doc.add_root(plot)\n return doc\n\n# To display in the notebook\nshow(modify_doc, notebook_url='localhost:8888')\n\n# To display in a script\n# doc = modify_doc(curdoc()) ", "<img width='80%' src='https://assets.holoviews.org/gifs/guides/user_guide/Deploying_Bokeh_Apps/bokeh_server_play.gif'></img>\nAs you can see depending on your needs you have complete freedom whether to use just HoloViews and deploy your application, combine it Panel or even with pure Bokeh." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
robertoalotufo/ia898
master/tutorial_numpy_1_2.ipynb
mit
[ "Table of Contents\n<p><div class=\"lev1 toc-item\"><a href=\"#Fatiamento-em-narray-unidimensional\" data-toc-modified-id=\"Fatiamento-em-narray-unidimensional-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Fatiamento em narray unidimensional</a></div><div class=\"lev1 toc-item\"><a href=\"#Inicializando-um-array-unidimensional\" data-toc-modified-id=\"Inicializando-um-array-unidimensional-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>Inicializando um array unidimensional</a></div><div class=\"lev1 toc-item\"><a href=\"#Exemplo-simples-de-fatiamento\" data-toc-modified-id=\"Exemplo-simples-de-fatiamento-3\"><span class=\"toc-item-num\">3&nbsp;&nbsp;</span>Exemplo simples de fatiamento</a></div><div class=\"lev1 toc-item\"><a href=\"#Exemplo-de-fatiamento-com-indices-negativos\" data-toc-modified-id=\"Exemplo-de-fatiamento-com-indices-negativos-4\"><span class=\"toc-item-num\">4&nbsp;&nbsp;</span>Exemplo de fatiamento com indices negativos</a></div><div class=\"lev2 toc-item\"><a href=\"#Acessando-o-último-elemento-com-índice-negativo\" data-toc-modified-id=\"Acessando-o-último-elemento-com-índice-negativo-41\"><span class=\"toc-item-num\">4.1&nbsp;&nbsp;</span>Acessando o último elemento com índice negativo</a></div><div class=\"lev2 toc-item\"><a href=\"#Inversão-do-array-com-step-negativo-(step-=--1)\" data-toc-modified-id=\"Inversão-do-array-com-step-negativo-(step-=--1)-42\"><span class=\"toc-item-num\">4.2&nbsp;&nbsp;</span>Inversão do array com step negativo (step = -1)</a></div><div class=\"lev1 toc-item\"><a href=\"#Fatiamento-avançado\" data-toc-modified-id=\"Fatiamento-avançado-5\"><span class=\"toc-item-num\">5&nbsp;&nbsp;</span>Fatiamento avançado</a></div><div class=\"lev2 toc-item\"><a href=\"#Supressão-do-indice-limite-inferior\" data-toc-modified-id=\"Supressão-do-indice-limite-inferior-51\"><span class=\"toc-item-num\">5.1&nbsp;&nbsp;</span>Supressão do indice limite inferior</a></div><div class=\"lev2 toc-item\"><a href=\"#Supressão-do-indice-limite-superior\" data-toc-modified-id=\"Supressão-do-indice-limite-superior-52\"><span class=\"toc-item-num\">5.2&nbsp;&nbsp;</span>Supressão do indice limite superior</a></div><div class=\"lev2 toc-item\"><a href=\"#Supressão-do-indice-do-step\" data-toc-modified-id=\"Supressão-do-indice-do-step-53\"><span class=\"toc-item-num\">5.3&nbsp;&nbsp;</span>Supressão do indice do step</a></div><div class=\"lev2 toc-item\"><a href=\"#Todos-os-elementos-com-passo-unitário\" data-toc-modified-id=\"Todos-os-elementos-com-passo-unitário-54\"><span class=\"toc-item-num\">5.4&nbsp;&nbsp;</span>Todos os elementos com passo unitário</a></div><div class=\"lev1 toc-item\"><a href=\"#Documentação-Oficial-Numpy\" data-toc-modified-id=\"Documentação-Oficial-Numpy-6\"><span class=\"toc-item-num\">6&nbsp;&nbsp;</span>Documentação Oficial Numpy</a></div><div class=\"lev1 toc-item\"><a href=\"#Links-Interessantes\" data-toc-modified-id=\"Links-Interessantes-7\"><span class=\"toc-item-num\">7&nbsp;&nbsp;</span>Links Interessantes</a></div>\n\n# Fatiamento em narray unidimensional\n\nUm recurso importante do numpy é o fatiamento no qual é possível acessar um subconjunto do array de diversas formas. O fatiamento define os índices onde o array será acessado definindo o ponto inicial, final e o passo entre eles, nesta ordem: [inicial:final:passo].\n\n# Inicializando um array unidimensional", "import numpy as np\n\na = np.arange(20) # a é um vetor de dimensão 20\nprint('a = \\n', a)", "Exemplo simples de fatiamento\nPara a realização do fatiamento são utilizados 3 parâmetros, colocados no local do índice do array. Os 3 parâmetros são separados por dois pontos \":\". Todos os 3 parâmetros podem ser opcionais que ocorrem quando o valor inicial é 0, o valor final é o tamanho do array e o passo é 1. Lembrar que a ordem deles é: [inicial:final:passo]. Se o passo for 1 fica: [inicial:final]. Se o início for 0 fica: [:final] e se o final for o último fica: [inicio:] e se forem todos [:].\nO fatiamento é feito começando pelo primeiro valor, adicionando-se o passo até antes do último valor. Três aspectos são extremamente importantes de serem lembrados: O índice inicial começa em zero, o índice final nunca é atingido, o último índice utilizado é sempre o imediatamente anterior e o Numpy admite índices negativos, que é uma indexação do último (-1) até o primeiro elemento (-W).\nOs exemplos a seguir ajudam a fixar estes conceitos.\nO código abaixo acessa os elementos ímpares começando de 1 até 14:", "a = np.arange(20)\nprint('Resultado da operação a[1:15:2]')\nprint(a[1:15:2])", "Exemplo de fatiamento com indices negativos\nAcessando o último elemento com índice negativo\nO código abaixo acessa os elementos ímpares até antes do último elemento:", "a = np.arange(20)\nprint('Resultado da operação a[1:-1:2]')\nprint(a[1:-1:2])\nprint('Note que o fatiamento termina antes do último elemento (-1)')", "Inversão do array com step negativo (step = -1)", "a = np.arange(20)\nprint('Resultado da operação a[-3:2:-1]')\nprint(a[-3:2:-1])\nprint('Note que o fatiamento retorna o array invertido')\nprint('Antepenúltimo até o terceiro elemento com step = -1')", "Fatiamento avançado\nÉ possível realizar o fatiamento utilizando os 3 parâmetros explícitos ( o limite inferior, limite superior e o step), ou podemos suprimir algum desses parâmetros. Nestes casos a função toma o valor defaut: limite inferior = primeiro elemento, limite superior = último elemento e step = 1.\nÉ possível realizar o fatiamento utilizando os 3 parâmetros explícitos\n( o limite inferior, limite superior e o step), ou podemos suprimir algum \ndesses parâmetros. Nestes casos a função toma o valor defaut: limite \ninferior = primeiro elemento, limite superior = último elemento e step = 1.\n|Proposta inicial | Equivalente |\n|---------------------|-------------|\n|a[0:len(a):1] | a[:] |\n|a[0:10:1] | a[:10] |\n|a[0:10:2] | a[:10:2] |\n|a[2:len(a):1] | a[2::] |\n|a[2:len(a):2] | a[2::2] |\nSupressão do indice limite inferior\nQuando o índice do limite inferior é omitido, é subentendido que é 0:", "a = np.arange(20)\nprint('Resultado da operação a[:15:2]')\nprint(a[:15:2])\nprint('Note que o fatiamento inicia do primeiro elemento')\nprint('Primeiro elemento até antes do 15o com passo duplo')", "Supressão do indice limite superior\nQuando o índice do limite superior é omitido, fica implícito que é o último elemento:", "a = np.arange(20)\nprint('Resultado da operação a[1::2]')\nprint(a[1::2])\nprint('Note que o fatiamento termina último elemento')\nprint('Primeiro elemento até o último com passo duplo')", "Supressão do indice do step\nO índice do step é opcional e quando não é indicado, seu valor é 1:", "a = np.arange(20)\nprint('Resultado da operação a[1:15]')\nprint(a[1:15])\nprint('Note que o fatiamento tem step unitário')\nprint('Primeiro elemento até antes do 15o com passo um')", "Todos os elementos com passo unitário", "a = np.arange(20)\nprint('Resultado da operação a[:]')\nprint(a[:])\nprint('Todos os elementos com passo unitário')", "Documentação Oficial Numpy\nScipy.org: indexação\nLinks Interessantes\nScipy Lectures: Indexação e fatiamento" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
GoogleCloudPlatform/ml-design-patterns
07_responsible_ai/explainability.ipynb
apache-2.0
[ "Explainability design pattern\nIn the Explainability design pattern, we look at approaches to understanding how and why models make predictions, with the goal of improving user trust in ML systems.", "from sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf", "Explaining simpler models\nHere we'll use the learned coefficients from a linear regression model as an explainability approach. Note: be cautious when drawing conclusions from learned weights, see the Explainability section in the book for more details.", "!gsutil cp gs://ml-design-patterns/auto-mpg.csv .\n\ndata = pd.read_csv('auto-mpg.csv', na_values='?')\ndata = data.dropna()\n\ndata = data.drop(columns=['car name'])\n\ndata = pd.get_dummies(data, columns=['origin'])\n\ndata.head()\n\nlabels = data['mpg']\ndata = data.drop(columns=['mpg', 'cylinders'])\n\nx,y = data,labels\nx_train,x_test,y_train,y_test = train_test_split(x,y)", "Train a Scikit-learn linear regression model on the data and print the learned coefficients", "model = LinearRegression().fit(x_train, y_train)\n\ncoefficients = model.coef_\ncoefdf = pd.DataFrame(coefficients, index=data.columns.tolist(), columns=['Learned coefficients'])\n\ncoefdf", "Feature attributions with SHAP\nUsing the same dataset, we'll train a deep neural net with TensorFlow and use the SHAP library to get feature attributions.", "model = tf.keras.Sequential([\n tf.keras.layers.Dense(16, activation='relu', input_shape=[len(x_train.iloc[0])]),\n tf.keras.layers.Dense(16, activation='relu'),\n tf.keras.layers.Dense(1)\n])\n\noptimizer = tf.keras.optimizers.RMSprop(0.001)\n\nmodel.compile(loss='mse',\n optimizer=optimizer,\n metrics=['mae', 'mse'])\n\nmodel.fit(x_train, y_train, epochs=1000)\n\n!pip install shap\n\nimport shap\n\n# Create an explainer object and get feature attributions for the first 10 examples in our test dataset\nexplainer = shap.DeepExplainer(model, x_train[:200])\nshap_values = explainer.shap_values(x_test.values[:10])\n\n# Print the feature attributions for the first example in our test set\nshap_values[0][0]\n\n# This is the baseline value shap is using\nexplainer.expected_value.numpy()\n\nshap.initjs()\nshap.force_plot(explainer.expected_value[0].numpy(), shap_values[0][0,:], x_test.iloc[0,:])\n\nshap.summary_plot(shap_values, feature_names=data.columns.tolist(), class_names=['MPG'])", "Feature attributions with Explainable AI\nThis part is coming soon :) In the mean time , see the docs.\nCopyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
kubeflow/pipelines
samples/core/dataflow/dataflow.ipynb
apache-2.0
[ "GCP Dataflow Component Sample\nA Kubeflow Pipeline component that prepares data by submitting an Apache Beam job (authored in Python) to Cloud Dataflow for execution. The Python Beam code is run with Cloud Dataflow Runner.\nIntended use\nUse this component to run a Python Beam code to submit a Cloud Dataflow job as a step of a Kubeflow pipeline. \nRuntime arguments\nName | Description | Optional | Data type| Accepted values | Default |\n:--- | :----------| :----------| :----------| :----------| :---------- |\npython_file_path | The path to the Cloud Storage bucket or local directory containing the Python file to be run. | | GCSPath | | |\nproject_id | The ID of the Google Cloud Platform (GCP) project containing the Cloud Dataflow job.| | String | | |\nregion | The Google Cloud Platform (GCP) region to run the Cloud Dataflow job.| | String | | |\nstaging_dir | The path to the Cloud Storage directory where the staging files are stored. A random subdirectory will be created under the staging directory to keep the job information.This is done so that you can resume the job in case of failure. staging_dir is passed as the command line arguments (staging_location and temp_location) of the Beam code. | Yes | GCSPath | | None |\nrequirements_file_path | The path to the Cloud Storage bucket or local directory containing the pip requirements file. | Yes | GCSPath | | None |\nargs | The list of arguments to pass to the Python file. | No | List | A list of string arguments | None |\nwait_interval | The number of seconds to wait between calls to get the status of the job. | Yes | Integer | | 30 |\nInput data schema\nBefore you use the component, the following files must be ready in a Cloud Storage bucket:\n- A Beam Python code file.\n- A requirements.txt file which includes a list of dependent packages.\nThe Beam Python code should follow the Beam programming guide as well as the following additional requirements to be compatible with this component:\n- It accepts the command line arguments --project, --region, --temp_location, --staging_location, which are standard Dataflow Runner options.\n- It enables info logging before the start of a Cloud Dataflow job in the Python code. This is important to allow the component to track the status and ID of the job that is created. For example, calling logging.getLogger().setLevel(logging.INFO) before any other code.\nOutput\nName | Description\n:--- | :----------\njob_id | The id of the Cloud Dataflow job that is created.\nCautions & requirements\nTo use the components, the following requirements must be met:\n- Cloud Dataflow API is enabled.\n- The component is running under a secret Kubeflow user service account in a Kubeflow Pipeline cluster. For example:\ncomponent_op(...)\nThe Kubeflow user service account is a member of:\n- roles/dataflow.developer role of the project.\n- roles/storage.objectViewer role of the Cloud Storage Objects python_file_path and requirements_file_path.\n- roles/storage.objectCreator role of the Cloud Storage Object staging_dir. \nDetailed description\nThe component does several things during the execution:\n- Downloads python_file_path and requirements_file_path to local files.\n- Starts a subprocess to launch the Python program.\n- Monitors the logs produced from the subprocess to extract the Cloud Dataflow job information.\n- Stores the Cloud Dataflow job information in staging_dir so the job can be resumed in case of failure.\n- Waits for the job to finish.\nSetup", "project = 'Input your PROJECT ID'\nregion = 'Input GCP region' # For example, 'us-central1'\noutput = 'Input your GCS bucket name' # No ending slash", "Install Pipeline SDK", "!python3 -m pip install 'kfp>=0.1.31' --quiet", "Load the component using KFP SDK", "import kfp.deprecated.components as comp\n\ndataflow_python_op = comp.load_component_from_url(\n 'https://raw.githubusercontent.com/kubeflow/pipelines/1.7.0-rc.3/components/gcp/dataflow/launch_python/component.yaml')\n\nhelp(dataflow_python_op)", "Use the wordcount python sample\nIn this sample, we run a wordcount sample code in a Kubeflow Pipeline. The output will be stored in a Cloud Storage bucket. Here is the sample code:", "!gsutil cat gs://ml-pipeline/sample-pipeline/word-count/wc.py", "Example pipeline that uses the component", "import kfp.deprecated as kfp\nfrom kfp.deprecated import dsl, Client\nimport json\n@dsl.pipeline(\n name='dataflow-launch-python-pipeline',\n description='Dataflow launch python pipeline'\n)\ndef pipeline(\n python_file_path = 'gs://ml-pipeline/sample-pipeline/word-count/wc.py',\n project_id = project,\n region = region,\n staging_dir = output,\n requirements_file_path = 'gs://ml-pipeline/sample-pipeline/word-count/requirements.txt',\n wait_interval = 30\n):\n dataflow_python_op(\n python_file_path = python_file_path, \n project_id = project_id, \n region = region, \n staging_dir = staging_dir, \n requirements_file_path = requirements_file_path, \n args = json.dumps(['--output', f'{staging_dir}/wc/wordcount.out']),\n wait_interval = wait_interval)", "Submit the pipeline for execution", "Client().create_run_from_pipeline_func(pipeline, arguments={})", "Inspect the output", "!gsutil cat $output/wc/wordcount.out", "References\n\nComponent python code\nComponent docker file\nSample notebook\nDataflow Python Quickstart" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
matthewzhenggong/fiwt
workspace_py/RigRollId-Copy5.ipynb
lgpl-3.0
[ "Parameter Estimation of RIG Roll Experiments\nSetup and descriptions\n\nWithout ACM model\nTurn on wind tunnel\nOnly 1DoF for RIG roll movement\nUse small-amplitude aileron command of CMP as inputs (in degrees)\n$$U = \\delta_{a,cmp}(t)$$\nConsider RIG roll angle and its derivative as States (in radians)\n$$X = \\begin{pmatrix} \\phi_{rig} \\ \\dot{\\phi}_{rig} \\end{pmatrix}$$\nObserve RIG roll angle and its derivative as Outputs (in degrees)\n$$Z = \\begin{pmatrix} \\phi_{rig} \\ \\dot{\\phi}_{rig} \\end{pmatrix}$$\nUse output error method based on most-likehood(ML) to estimate\n$$ \\theta = \\begin{pmatrix} C_{l,\\delta_a,cmp} \\ C_{lp,cmp} \\end{pmatrix} $$\n\nStartup computation engines", "%run matt_startup\n%run -i matt_utils\n\nbutton_qtconsole()\n\n#import other needed modules in all used engines\n#with dview.sync_imports():\n# import os", "Data preparation\nLoad raw data", "filename = 'FIWT_Exp015_20150601145005.dat.npz'\n\ndef loadData():\n # Read and parse raw data\n global exp_data\n exp_data = np.load(filename)\n\n # Select colums\n global T_cmp, da_cmp\n T_cmp = exp_data['data33'][:,0]\n da_cmp = np.average(exp_data['data33'][:,3:11:2], axis=1)\n\n global T_rig, phi_rig\n T_rig = exp_data['data44'][:,0]\n phi_rig = exp_data['data44'][:,2]\n\nloadData()\n\ntext_loadData()", "Check time sequence and inputs/outputs\nClick 'Check data' button to show the raw data. \nClick on curves to select time point and push into queue; click 'T/s' text to pop up last point in the queue; and click 'Output' text to print time sequence table.", "def checkInputOutputData():\n\n #check inputs/outputs\n fig, ax = plt.subplots(2,1,True)\n ax[0].plot(T_cmp,da_cmp,'r',picker=1)\n ax[1].plot(T_rig,phi_rig, 'b', picker=2)\n ax[0].set_ylabel('$\\delta \\/ / \\/ ^o$')\n ax[1].set_ylabel('$\\phi \\/ / \\/ ^o/s$')\n ax[1].set_xlabel('$T \\/ / \\/ s$', picker=True)\n ax[0].set_title('Output', picker=True)\n \n\n fig.canvas.mpl_connect('pick_event', onPickTime)\n fig.show()\n display(fig)\n\nbutton_CheckData()", "Input $\\delta_T$ and focused time ranges\nFor each section,\n* Select time range and shift it to start from zero;\n* Resample Time, Inputs, Outputs in unique $\\delta_T$;\n* Smooth Input/Observe data if flag bit0 is set;\n* Take derivatives of observe data if flag bit1 is set.", "\n# Pick up focused time ranges\ntime_marks = [[1501.28, 1505.50, \"doublet u1\"],\n [1507.40, 1511.80, \"doublet u2\"],\n [1513.55, 1517.87, \"doublet u3\"],\n [1519.70, 1523.50, \"doublet u4\"],\n [1537.60, 1541.64, \"doublet d1\"],\n [1543.76, 1547.90, \"doublet d2\"],\n [1549.91, 1554.20, \"doublet d3\"],\n [1555.86, 1560.00, \"doublet d4\"],\n [1609.30, 1615.49, \"3-2-1-1 u1\"], \n [1617.89, 1624.25, \"3-2-1-1 u2\"],\n [1626.49, 1633.45, \"3-2-1-1 u3\"],\n [1634.99, 1642.38, \"3-2-1-1 u4\"],\n [1651.40, 1657.50, \"3-2-1-1 d1\"],\n [1659.90, 1666.68, \"3-2-1-1 d2\"],\n [1668.50, 1674.69, \"3-2-1-1 d3\"],\n [1677.00, 1683.88, \"3-2-1-1 d4\"],\n [1748.59, 1809.05, \"linear sweep u1\"], \n [1825.89, 1885.96, \"linear sweep d1\"], \n [1905.86, 1965.17, \"exp sweep u1\"], \n ]\n\n\n# Decide DT,U,Z and their processing method\nDT=0.01\nprocess_set = {\n 'U':[(T_cmp, da_cmp,0),],\n 'Z':[(T_rig, phi_rig,3),],\n 'cutoff_freq': 10 #Hz\n }\n\nU_names = ['$\\delta_{a,cmp} \\, / \\, ^o$',]\nY_names = Z_names = ['$\\phi_{a,rig} \\, / \\, ^o$',\n '$\\dot{\\phi}_{a,rig} \\, / \\, ^o/s$',]\n\ndisplay_data_prepare()", "Resample and filter data in sections", "resample(True);", "Define dynamic model to be estimated\n$$\\left{\\begin{matrix}\n\\ddot{\\phi}{rig} = \\frac{M{x,rig}}{I_{xx,rig}} \\\nM_{x,rig} = M_{x,a} + M_{x,f} + M_{x,cg} \\\nM_{x,a} = \\frac{1}{2} \\rho V^2S_cb_c \\left ( C_{la,cmp}\\delta_{a,cmp} + C{lp,cmp} \\frac{b_c}{2V} \\dot{\\phi}{rig} \\right ) \\\nM{x,f} = -F_c \\, sign(\\dot{\\phi}{rig}) - f\\dot{\\phi}{rig} \\\nM_{x,cg} = -m_T g l_{zT} \\sin \\left ( \\phi - \\phi_0 \\right ) \\\n\\end{matrix}\\right.$$", "%%px --local\n#update common const parameters in all engines\n\n#problem size\nNx = 2\nNu = 1\nNy = 2\nNpar = 9\n\n#reference\nS_c = 0.1254 #S_c(m2) \nb_c = 0.7 #b_c(m)\ng = 9.81 #g(m/s2)\nV = 30 #V(m/s)\n\n#other parameters\nv_th = 0.5/57.3 #v_th(rad/s)\nv_th2 = 0.5/57.3 #v_th(rad/s)\n\n#for short\nqbarSb = 0.5*1.225*V*V*S_c*b_c\nb2v = b_c/(2*V)\n\ndef x0(Z,T,U,params):\n return Z[0,:]/57.3\n\ndef xdot(X,t,U,params):\n Cla_cmp = params[0]\n Clp_cmp = params[1]\n Ixx = params[2]\n F_c = params[3]\n f = params[4]\n m_T = params[5]\n l_z_T = params[6]\n kBrk = params[7]\n phi0 = params[8]\n\n phi = X[0]\n phi_dot = X[1]\n \n idx = int(t/DT)\n da_cmp = U[idx,0]*0.01745329\n \n moments = -(m_T*l_z_T)*g*math.sin(phi-phi0)\n\n abs_phi_dot = abs(phi_dot)\n F = f*phi_dot\n if abs_phi_dot > v_th+v_th2:\n F += math.copysign(F_c, phi_dot)\n elif abs_phi_dot > v_th:\n F += math.copysign(F_c*(kBrk-(kBrk-1)*(abs_phi_dot-v_th)/v_th2), phi_dot)\n else:\n F += phi_dot/v_th*(F_c*kBrk)\n moments -= F\n \n moments += qbarSb*(Cla_cmp*da_cmp + Clp_cmp*phi_dot*b2v)\n \n phi_dot2 = moments/Ixx\n return [phi_dot, phi_dot2]\n\ndef obs(X,T,U,params):\n return X*57.3\n\n\n\ndisplay(HTML('<b>Constant Parameters</b>'))\ntable = ListTable()\ntable.append(['Name','Value','unit'])\ntable.append(['$S_c$',S_c,r'$m^2$'])\ntable.append(['$b_c$',b_c,'$m$'])\ntable.append(['$g$',g,r'$ms^{-2}$'])\ntable.append(['$V$',V,r'$ms^{-1}$'])\ndisplay(table)\n", "Initial guess\n\nInput default values and ranges for parameters\nSelect sections for trainning\nAdjust parameters based on simulation results\nDecide start values of parameters for optimization", "#initial guess\nparam0 = [\n -0.3, #Cla_cmp(1/rad)\n -0.5, #Clp_cmp\n 0.199141909329, #Ixx(kg*m2)\n 0.0580817418532, #F_c(N*m)\n 0.0407466009837, #f(N*m/(rad/s))\n 7.5588, #m_T(kg)\n 0.0444, #l_z_T(m)\n 1.01, #kBrk\n 0, #phi0(rad)\n]\n\nparam_name = ['$Cla_{cmp}$',\n '$Clp_{cmp}$',\n '$I_{xx,rig}$', \n '$F_c$',\n '$f$',\n '$m_T$',\n '$l_{zT}$',\n '$k_{Brk}$',\n '$phi_0$']\nparam_unit = ['$rad^{-1}$',\n '$1$',\n '$kg\\,m^2$',\n '$Nm$',\n r'$\\frac{Nm}{rad/s}$',\n 'kg',\n 'm',\n '1',\n '$rad$']\nNparID = 4\nopt_idx = [0,1,2,8]\nopt_param0 = [param0[i] for i in opt_idx]\npar_del = [0.3*1e-3, 0.3*1e-3, 0.2*1e-3, 0.0174]\nbounds = [(-1,-1e-6),(-2,-1e-6), (1e-6,0.5), (-0.1,0.1)]\n'''\nNparID = 3\nopt_idx = [0,1,7]\nopt_param0 = [param0[i] for i in opt_idx]\npar_del = [0.3*1e-3, 0.3*1e-3, 0.0174]\nbounds = [(-1,-1e-6),(-2,-1e-6), (-0.1,0.1)]\n'''\n\n\ndisplay_default_params()\n\n#select sections for training\nsection_idx = range(8)\ndisplay_data_for_train()\n\n#push parameters to engines\npush_opt_param()\n\n\n# select 2 section from training data\nidx = random.sample(section_idx, 2)\n\ninteract_guess();", "Optimize using ML", "display_preopt_params()\n\nif True:\n InfoMat = None\n method = 'trust-ncg'\n def hessian(opt_params, index):\n global InfoMat\n return InfoMat\n dview['enable_infomat']=True\n options={'gtol':1}\n opt_bounds = None\nelse:\n method = 'L-BFGS-B'\n hessian = None\n dview['enable_infomat']=False\n options={'ftol':1e-2,'maxfun':10}\n opt_bounds = bounds\n\ncnt = 0\ntmp_rslt = None\nT0 = time.time()\nprint('#cnt, Time, |R|')\n\n%time res = sp.optimize.minimize(fun=costfunc, x0=opt_param0, \\\n args=(opt_idx,), method=method, jac=True, hess=hessian, \\\n bounds=opt_bounds, options=options)\n", "Show and test results", "display_opt_params()\n\n# show result\nidx = random.sample(range(8), 2) \\\n + random.sample(range(8,16), 2) \\\n + random.sample(range(16,19), 2)\ndisplay_data_for_test();\n\nupdate_guess();\n\ntoggle_inputs()\nbutton_qtconsole()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
clarka34/exploring-ship-logbooks
scripts/classifier-notebook.ipynb
mit
[ "Notebook used to develop code\n\noutput from classification is data frame with slave_logs (maybe rename that column?) indicating:\ncliwoc_data (unclassified) = 0\ncliwoc_data (no slaves) = 1\ncliwoc_data (slaves) = 2\nslave_data = 3\nclassified as slave log = 4\nclassified as non slave log = 5", "classifier_algorithm = \"Decision Tree\"\n\nimport collections\nimport exploringShipLogbooks\n\nimport numpy as np\nimport os.path as op\nimport pandas as pd\nimport exploringShipLogbooks.wordcount as wc\n\nfrom fuzzywuzzy import fuzz\nfrom sklearn import preprocessing\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn import tree\n\nfrom exploringShipLogbooks.basic_utils import clean_data\nfrom exploringShipLogbooks.basic_utils import encode_data_df\nfrom exploringShipLogbooks.basic_utils import extract_logbook_data\nfrom exploringShipLogbooks.fuzz_replacement import fuzzy_wuzzy_classification\nfrom exploringShipLogbooks.basic_utils import isolate_columns\nfrom exploringShipLogbooks.basic_utils import isolate_training_data\n\nfrom exploringShipLogbooks.config import *", "Load and clean data\nLoad CLIWOC ship logs", "# extract data from zip file\ncliwoc_data = extract_logbook_data('CLIWOC15.csv')\n\nlabel_encoding = preprocessing.LabelEncoder().fit(cliwoc_data['LogbookIdent']).classes_\ncliwoc_data['LogbookIdent'] = preprocessing.LabelEncoder().fit_transform(cliwoc_data['LogbookIdent'])", "Find definite slave data in CLIWOC data set\n\nThese logs will be used to test the classifier", "# extract logs that mention slaves\nslave_mask = wc.count_key_words(cliwoc_data, text_columns, slave_words)\nprint('Found ', len(slave_mask[slave_mask]), ' logs that mention slaves')", "Clean CLIWOC data", "# find indices of ship names that are \"non-slave\" ships before dropping ship name column\nnon_slave_log_locations = isolate_training_data(cliwoc_data, {'ShipName': non_slave_ships})\nprint('Found ', len(non_slave_log_locations[non_slave_log_locations==True]), ' logs that are non-slave ships')\n\ncliwoc_data['slave_logs'] = np.zeros(len(cliwoc_data))\nslave_log_locations = cliwoc_data['LogbookIdent'].isin(list(cliwoc_data['LogbookIdent']\n [slave_mask].unique()))", "cliwoc_data (unclassified) = 0\ncliwoc_data (no slaves) = 1\ncliwoc_data (slaves) = 2\nslave_data = 3", "cliwoc_data.loc[non_slave_log_locations,'slave_logs'] = 1\ncliwoc_data.loc[slave_log_locations,'slave_logs'] = 2\n\ncliwoc_data = cliwoc_data.sort_values('LogbookIdent', ascending=True)\ncliwoc_data_all = cliwoc_data.set_index('LogbookIdent', drop= False).copy()\ncliwoc_data = cliwoc_data.set_index('LogbookIdent', drop = False)\ncliwoc_data = cliwoc_data.drop_duplicates('LogbookIdent')\n\n\n# uncomment this if looking at ship names for manual review\n#desired_columns.append('ShipName')\n\n# remove undesired columns\ncliwoc_data = isolate_columns(cliwoc_data, desired_columns)", "Load Slave Voyages data", "data_path = op.join(exploringShipLogbooks.__path__[0], 'data')\nfile_name = data_path + '/tastdb-exp-2010'\nslave_voyage_logs = pd.read_pickle(file_name)\n\nyear_ind = ~(slave_voyage_logs['yeardep'].isnull())\nslave_voyage_logs = slave_voyage_logs[year_ind]\n\ncliwoc_ind = (slave_voyage_logs['yeardep']>cliwoc_data['Year'].min()) & (slave_voyage_logs['yeardep']<cliwoc_data['Year'].max())\nslave_voyage_logs = slave_voyage_logs[cliwoc_ind]", "Clean Slave voyages data", "slave_voyage_desired_cols = list(slave_voyage_conversions.keys())\nslave_voyage_logs = isolate_columns(slave_voyage_logs, slave_voyage_desired_cols)\n\nslave_voyage_logs.rename(columns=slave_voyage_conversions, inplace=True)\n#slave_voyage_logs.columns = ['Nationality', 'ShipType', 'VoyageFrom', 'VoyageTo', 'Year']\n\nslave_voyage_logs['slave_logs'] = 3\nslave_voyage_indices = range(len(slave_voyage_logs)) + (cliwoc_data.tail(1).index[0]+1)\nslave_voyage_logs = slave_voyage_logs.set_index(slave_voyage_indices)", "Join data sets", "all_data = pd.concat([cliwoc_data, slave_voyage_logs])\n#all_data = cliwoc_data.append(slave_voyage_logs)\nall_data = clean_data(all_data)\n\n# cleanup\n#del cliwoc_data, slave_voyage_logs\n\nall_data.head()", "Test of fuzzywuzzy method", "all_data_test = all_data.copy()\n\nfuzz_columns = ['Nationality', 'ShipType', 'VoyageFrom', 'VoyageTo']\n\nfor col in fuzz_columns:\n all_data = fuzzy_wuzzy_classification(all_data, col)\n", "Encode data\n\nMust encode data before separating, otherwise values that do not occur in a subset will be encoded differently", "from sklearn.preprocessing import LabelEncoder\nclass MultiColumnLabelEncoder:\n def __init__(self,columns = None):\n self.columns = columns # array of column names to encode\n\n def fit(self,X,y=None):\n return self # not relevant here\n\n def transform(self,X):\n '''\n Transforms columns of X specified in self.columns using\n LabelEncoder(). If no columns specified, transforms all\n columns in X.\n '''\n output = X.copy()\n if self.columns is not None:\n for col in self.columns:\n if is_instance(X[col][0], str):\n output[col] = LabelEncoder().fit_transform(output[col])\n else:\n output[col] = X[col]\n else:\n for colname,col in output.iteritems():\n output[colname] = LabelEncoder().fit_transform(col)\n return output\n\n def fit_transform(self,X,y=None):\n return self.fit(X,y).transform(X)\n\nif classifier_algorithm == \"Decision Tree\":\n all_data = MultiColumnLabelEncoder().fit_transform(all_data)\nelif classifier_algorithm == \"Naive Bayes\":\n all_data = encode_data_df(all_data)\n all_data['no_data'] = all_data['nan'].apply(lambda x: x.any(), axis=1).astype(int)\n all_data = all_data.drop('nan', axis=1)\nelse:\n raise KeyError(\"Please enter a valid classification type (Decision Trees or Naive Bayes)\")\n", "Extract training data, and create list of classes", "unclassified_logs = all_data[all_data['slave_logs']==0]\n#unclassified_logs = unclassified_logs.drop('slave_logs', axis=1)\n\nvalidation_set_1 = all_data[all_data['slave_logs']==2]\n#validation_set_1 = validation_set_1.drop('slave_logs', axis=1)\n\n# reserve first 20% of slave_voyage_logs as validation set\nvalidation_set_2_indices = range(slave_voyage_indices.min(),\n slave_voyage_indices.min() + round(len(slave_voyage_indices)*.2))\nvalidation_set_2 = all_data.iloc[validation_set_2_indices]\n#validation_set_2 = validation_set_2.drop('slave_logs', axis=1)\n\ntraining_logs_pos = all_data.drop(validation_set_2_indices)\ntraining_logs_pos = training_logs_pos[training_logs_pos['slave_logs']==3]\n#training_logs_pos = training_logs_pos.drop('slave_logs', axis=1)\n\n# note! This relies on cliwoc data being first in all_data\n# could make more robust later\ntraining_logs_neg = all_data[all_data['slave_logs']==1]\n#training_logs_neg = training_logs_neg.drop('slave_logs', axis=1)\n\n# cleanup\n#del all_data", "left this code so we can check if there are any null values in each \n dataframe", "def finding_null_values(df):\n return df.isnull().sum()[df.isnull().sum()>0]\n\nrepeat_multiplier = round(len(training_logs_pos)/len(training_logs_neg))\n\n# create list of classes for training data (0 is for non-slave, 1 is for slave)\n# index matches training_data\nclasses = np.zeros(len(training_logs_neg)).repeat(repeat_multiplier)\n#classes = np.append(classes, np.ones(len(training_logs_pos)))\nclasses = np.append(classes, np.ones(len(training_logs_pos)))\n\n# join training data\nneg_rep = pd.concat([training_logs_neg]*repeat_multiplier)\ntraining_data = pd.concat([neg_rep, training_logs_pos], ignore_index = True)\n\n# convert to numpy array\ncolumns = list(training_data.columns)\ncolumns.remove('slave_logs')\ntraining_data = training_data.as_matrix(columns)\n", "Fit training data to classifier\n\nnote! first column of numpy array is index! do not include in classification!", "if classifier_algorithm == \"Decision Tree\":\n classifier = MultinomialNB(alpha = 1.0, class_prior = None, fit_prior = True)\n classifier.fit(training_data[::,1::], classes)\nelif classifier_algorithm == \"Naive Bayes\":\n classifier = tree.DecisionTreeClassifier()\n classifier.fit(training_data[::,1::], classes)\nelse: \n raise KeyError(\"Please enter a valid classification type (Decision Trees or Naive Bayes)\")", "Test classifier\n\ncheck if slave logs from cliwoc data were classified correctly (want mostly classified as 1)\ncompare first column with slave_index", "def validation_test(classifier, validation_set, expected_class):\n \"\"\"\n input classifer object, validation set (data frame), and expected class \n of validation set (i.e. 1 or 0). Prints successful classification rate.\n \"\"\"\n columns = list(validation_set.columns)\n columns.remove('slave_logs')\n validation_set = validation_set.as_matrix(columns)\n predictions = classifier.predict(validation_set[::,1::])\n \n counts = collections.Counter(predictions)\n percent_correct = (counts[expected_class]/(len(predictions))* 100)\n \n print('Validation set was classified as', expected_class,\n round(percent_correct,2), '% of the time')\n\ndef predict_class(classifier, data_subset):\n \"\"\"\n Predict class of data, and append predictions to data frame\n \"\"\"\n try:\n # drop old predictions before reclassifying (if they exist)\n data_subset = data_subset.drop('predictions', axis = 1)\n data_to_classify = data_subset.copy()\n except:\n data_to_classify = data_subset.copy()\n pass\n \n # convert to numpy and classify\n columns = list(data_to_classify.columns)\n columns.remove('slave_logs')\n data_matrix = data_to_classify.as_matrix(columns)\n predictions = classifier.predict(data_matrix[::,1::])\n \n # revalue slave_log ID column to indicate classification\n data_to_classify['slave_logs'] = predictions + 4\n \n # print statstics\n counts = collections.Counter(predictions)\n \n for key in counts:\n percent = (counts[key]/(len(predictions))* 100)\n print(round(percent, 2), 'of data was classified as ', key)\n \n # update slave_log columns\n\n return data_to_classify\n\nprint('Testing validation data from slave logs data set')\nvalidation_test(classifier, validation_set_2, 1)\n\nprint('Testing validation data from cliwoc data set:')\nvalidation_test(classifier, validation_set_1, 1)\n\nunclassified_logs = predict_class(classifier, unclassified_logs)\n\nunclassified_logs.head()", "try decision trees plotting\n\n Following lines of code do not currently work, we need to install graphviz", "# export PDF with decision tree\nfrom sklearn.externals.six import StringIO \nimport os\nimport pydot \n\ndot_data = StringIO()\ntree.export_graphviz(new_classifier, out_file=dot_data) \ngraph = pydot.graph_from_dot_data(dot_data.getvalue()) \ngraph.write_pdf(\"test.pdf\") " ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ibab/tensorprob
examples/example1_particle_decays.ipynb
mit
[ "Example 1: Mixture of normal and exponential distributions\nThis example is inspired by particle physics:\nThe dataset consists of the reconstructed masses of possible particle candidates.\nBackground decays follow an exponential distribution, while signal decays follow a normal distribution.", "%matplotlib inline\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom tensorprob import Model, Parameter, Normal, Exponential, Mix2, ScipyLBFGSBOptimizer\n# We use the matplotlib_hep library to easily create high energy physics plots\nfrom matplotlib_hep import histpoints\nplt.rcParams['figure.figsize'] = (10.0, 6.0)", "We model our distribution as a mixture of a normal distribution (parameters mu and sigma and mixture weight f) and an exponential distribution (parameter lamb and mixture weight 1 -f).\nThis model can be translated into TensorProb as follows:", "with Model() as model:\n mu = Parameter()\n sigma = Parameter(lower=0)\n lamb = Parameter(lower=0)\n f = Parameter(lower=0.0, upper=1)\n\n X = Mix2(f,\n Normal(mu, sigma, lower=0, upper=50),\n Exponential(lamb, lower=0, upper=50),\n lower=0,\n upper=50,\n )", "We declare X as an observed variable and set suitable initial parameter values:", "model.observed(X)\nmodel.initialize({\n mu: 25,\n sigma: 2,\n lamb: 0.03,\n f: 0.2\n})", "The dataset is generated with numpy:", "np.random.seed(0)\nexp_data = np.random.exponential(40, 10000)\nexp_data = exp_data[(0 < exp_data) & (exp_data < 50)]\nnorm_data = np.random.normal(20, 2, 500)\ndata = np.concatenate([exp_data, norm_data])", "Now we perform a fit of the model using the default optimizer:", "result = model.fit(data)\nprint(result)", "The fit converged successfully and we can visualize the distribution:", "xs = np.linspace(0, 50, 200)\nx, N, w = histpoints(data, bins=60, color='k', ms=3, capsize=0)\nplt.plot(xs, w * model.pdf(xs), 'b-', lw=2)\nplt.xlabel('mass')\nplt.ylabel('candidates')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
AllenDowney/ThinkBayes2
examples/btp01soln.ipynb
mit
[ "Bayes Theorem Problems\nThis notebook presents code and exercises from Think Bayes, second edition.\nCopyright 2016 Allen B. Downey\nMIT License: https://opensource.org/licenses/MIT", "from __future__ import print_function, division\n\n% matplotlib inline\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport numpy as np\n\nfrom thinkbayes2 import Hist, Pmf, Cdf, Suite, Beta\nimport thinkplot", "The sock problem\nYuzhong Huang\nThere are two drawers of socks. The first drawer has 40 white socks and 10 black socks; the second drawer has 20 white socks and 30 black socks. We randomly get 2 socks from a drawer, and it turns out to be a pair(same color) but we don't know the color of these socks. What is the chance that we picked the first drawer.", "# Solution\n\npmf = Pmf(['drawer 1', 'drawer 2'])\npmf['drawer 1'] *= (40/50)**2 + (10/50)**2\npmf['drawer 2'] *= (30/50)**2 + (20/50)**2\npmf.Normalize()\npmf.Print()\n\n# Solution\n\npmf = Pmf(['drawer 1', 'drawer 2'])\npmf['drawer 1'] *= (40/50)*(39/49) + (10/50)*(9/49)\npmf['drawer 2'] *= (30/50)*(29/49) + (20/50)*(19/49)\npmf.Normalize()\npmf.Print()\n\n# Solution\n\nclass Socks(Suite):\n \n def Likelihood(self, data, hypo):\n \"\"\"Probability of data under hypo.\n \n data: 'pair' or 'no pair'\n hypo: tuple, number of (white, black) socks\n \"\"\"\n white, black = hypo\n total = white + black\n like = white/total*(white-1)/(total-1) + black/total*(black-1)/(total-1)\n if data == 'pair':\n return like\n else:\n return 1-like \n\n# Solution\n\nn = 50\nsocks = Socks()\nfor white in range(n+1):\n socks[white, n-white] = 1\nsocks.Normalize()\nthinkplot.Pdf(socks)\n\n# Solution\n\nsocks.Update('pair')\nthinkplot.Pdf(socks)\nthinkplot.Config(ylim=[0,0.03])", "Chess-playing twins\nAllen Downey\nTwo identical twins are members of my chess club, but they never show up on the same day; in fact, they strictly alternate the days they show up. I can't tell them apart except that one is a better player than the other: Avery beats me 60% of the time and I beat Blake 70% of the time. If I play one twin on Monday and win, and the other twin on Tuesday and lose, which twin did I play on which day?", "# Solution\n\npmf = Pmf(['AB', 'BA'])\npmf['AB'] = 0.4 * 0.3\npmf['BA'] = 0.7 * 0.6\npmf.Normalize()\npmf.Print()\n\n# Solution\n\nclass Chess(Suite):\n \n prob_I_beat = dict(A=0.4, B=0.7)\n \n def Likelihood(self, data, hypo):\n \"\"\"Probability of data under hypo.\n \n data: sequence of 'W' and 'L'\n hypo: sequence of 'A' and 'B'\n \"\"\"\n total = 1\n for outcome, twin in zip(data, hypo):\n like = self.prob_I_beat[twin]\n if outcome == 'W':\n total *= like\n else:\n total *= 1-like\n return total\n\n# Solution\n\nchess = Chess(['AB', 'BA'])\nchess.Update('WL')\nchess.Print()", "1984\nby Katerina Zoltan\nThe place: Airstrip One. The reason: thoughtcrime. The time: ???\nJohn's parents were taken by the Thought Police and erased from all records. John is being initiated into the Youth League and must pass a test. He is asked whether his parents are good comrades. It is not clear what John's admission officer knows:\n\nHe may know that John's parents have been erased and that John did not give them away.\nHe may know only that John's parents have been erased.\nHe may not know that John's parents have been erased.\n\nIt is a well known fact that children who have parents that are 'good comrades' have twice the chances of passing the test. However, if the admission officer knows that their parents committed thoughtcrime (but not that they have been erased yet), a child that gave his parents away has three times the chances of getting in than a child who did not give them away.\nAnd if the admission officer knows the specifics of the arrest, a child that denies that the records are false and their parents existed has a 1/3 chance of getting in, while one who pretends that his parents never existed has a 2/3 chance. Lying to an admission officer that knows the parents have been erased will ensure that the child does not get in. Telling an admission officer that your parents do not exist when he does not know this will give you a 1/3 chance of getting in.\nThere is a 60% chance the admission officer knows nothing, a 25% chance that he knows the parents have been erased, and a 15% chance that the officer knows all of the details. John says that he never had parents and is admitted into the Youth League. What did his admission officer know?", "# Solution\n\nofficer = {'everything':0.15, 'something':0.25, 'nothing':0.6}\n\nclass ThoughtPolice(Suite):\n\n def Likelihood(self, data, hypo):\n if data == 'gave away':\n if hypo == 'everything':\n return 0\n elif hypo == 'something':\n return 1\n else:\n return 3\n elif data == 'none':\n if hypo == 'everything':\n return 2\n elif hypo == 'something':\n return 1\n else:\n return 1\n else: # data == 'good comrades'\n if hypo == 'everything':\n return 0\n elif hypo == 'something':\n return 0\n else:\n return 2\n \npmf = ThoughtPolice(officer)\npmf.Print()\n\n# Solution\n\npmf.Update('none')\npmf.Print()", "Where Am I? - The Robot Localization Problem\nby Kathryn Hite\nBayes's Theorem proves to be extremely useful when building mobile robots that need to know where they are within an environment at any given time. Because of the error in motion and sensor systems, a robot's knowledge of its location in the world is based on probabilities. Let's look at a simplified example that could feasibly be scaled up to create a working localization model.\nPart A: We have a robot that exists within a very simple environement. The map for this environment is a row of 6 grid cells that are colored either green or red and each labeled $x_1$, $x_2$, etc. In real life, a larger form of this grid environment could make up what is known as an occupancy grid, or a map of the world with places that the robot can go represented as green cells and obstacles as red cells.\n|G|R|R|G|G|G|\n|-|-|-|-|-|-|\n|$x_1$|$x_2$|$x_3$|$x_4$|$x_5$|$x_6$|\nThe robot has a sensor that can detect color with an 80% chance of being accurate.\nGiven that the robot gets dropped in the environment and senses red, what is the probability of it being in each of the six locations?", "# Solution\n\ncolors = 'GRRGGG'\nlocs = range(len(colors))\ndata = 'R'\n\npmf = Pmf(locs)\nfor hypo in pmf:\n if colors[hypo] == data:\n pmf[hypo] *= 0.8\n else:\n pmf[hypo] *= 0.2\npmf.Normalize()\npmf.Print()\n\n# Solution\n\nclass Robot(Suite):\n \n colors = 'GRRGGG'\n \n def Likelihood(self, data, hypo):\n \"\"\"\n \n data: 'R' or 'G'\n hypo: index of starting location\n \"\"\"\n if self.colors[hypo] == data:\n return 0.8\n else:\n return 0.2\n\n# Solution\n\nrobot = Robot(locs)\nrobot.Update('R')\nrobot.Print()", "Part B: This becomes an extremely useful tool as we begin to move around the map. Let's try to get a more accurate knowledge of where the robot falls in the world by telling it to move forward one cell.\nThe robot moves forward one cell from its previous position and the sensor reads green, again with an 80% accuracy rate. Update the probability of the robot having started in each location.", "# Solution\n\nclass Robot2(Suite):\n \n colors = 'GRRGGG'\n \n def Likelihood(self, data, hypo):\n \"\"\"\n \n data: tuple (offset, 'R' or 'G')\n hypo: index of starting location\n \"\"\"\n offset, color = data\n index = (hypo + offset) % len(self.colors)\n if self.colors[index] == color:\n return 0.8\n else:\n return 0.2\n\n# Solution\n\nrobot = Robot2(locs)\nrobot.Update((0, 'R'))\nrobot.Print()\n\n# Solution\n\nrobot.Update((1, 'G'))\nrobot.Print()", "Red Dice problems\nSuppose I have a six-sided die that is red on 2 sides and blue on 4 sides, and another die that's the other way around, red on 4 sides and blue on 2.\nI choose a die at random and roll it, and I tell you it came up red. What is the probability that I rolled the second die (red on 4 sides)?", "# Solution\n\nfrom fractions import Fraction\n\nd1 = Pmf({'Red':Fraction(2), 'Blue':Fraction(4)}, label='d1 (bluish) ')\nd1.Print()\n\n# Solution\n\nd2 = Pmf({'Red':Fraction(4), 'Blue':Fraction(2)}, label='d2 (reddish)')\nd2.Print()\n\n# Solution\n\ndice = Pmf({d1:Fraction(1), d2:Fraction(1)})\ndice.Print()\n\n# Solution\n\nclass Dice(Suite):\n def Likelihood(self, data, hypo):\n \"\"\"\n data: 'Red' or 'Blue'\n hypo: a Die object\n \"\"\"\n return hypo[data]\n\n# Solution\n\nprior = Dice({d1:Fraction(1), d2:Fraction(1)})\nprior.Print()\n\n# Solution\n\nposterior = prior.Copy()\nposterior.Update('Red')\nposterior.Print()", "Scenario B\nSuppose I roll the same die again. What is the probability I get red?", "# Solution\n\nfrom thinkbayes2 import MakeMixture\n\npredictive = MakeMixture(posterior)\npredictive.Print()", "Scenario A\nInstead of rolling the same die, suppose I choosing a die at random and roll it. What is the probability that I get red?", "# Solution\n\nfrom thinkbayes2 import MakeMixture\n\npredictive = MakeMixture(prior)\npredictive.Print()", "Scenario C\nNow let's run a different experiment. Suppose I choose a die and roll it. If the outcome is red, I report the outcome. Otherwise I choose a die again and roll again, and repeat until I get red.\nWhat is the probability that the last die I rolled is the reddish one?", "# Solution\n\n# On each roll, there are four possible results, with these probabilities:\n\n# d1, red 1/2 * 1/3\n# d1, blue 1/2 * 2/3\n# d2, red 1/2 * 2/3\n# d2, blue 1/2 * 1/3\n\n#On the last roll, I tell you that the outcome is red, so we are left with two possibilities:\n\n# d1, red 1/2 * 1/3\n# d2, red 1/2 * 2/3\n\n# The likelihood ratio is 2 to 1, so we can use that to update the prior:\n\n# Solution\n\nposterior = prior.Copy()\nposterior[d1] *= 1\nposterior[d2] *= 2\nposterior.Normalize()\nposterior.Print()", "Scenario D\nFinally, suppose I choose a die and roll it over and over until I get red, then report the outcome. What is the probability that the die I rolled is the reddish one?", "# Solution\n\n# In this case, the likelihood of the data is the same regardless of\n# which die I rolled, so the posterior is the same as the prior.\n\nposterior = prior.Copy()\nposterior.Print()\n\n# Solution\n\n#In summary, each of the four scenarios yields a different pair of posterior\n# and predictive distributions.\n\n# Scenario Posterior probability of d2 Predictive probability of red\n# A 2/3 1/2\n# B 2/3 5/9\n# C 2/3 1\n# D 1/2 1", "The bus problem\nAllen Downey\nTwo buses routes run past my house, headed for Arlington and Billerica. In theory, the Arlington bus runs every 20 minutes and the Billerica bus every 30 minutes, but by the time they get to me, the time between buses is well-modeled by exponential distributions with means 20 and 30.\nPart 1: Suppose I see a bus outside my house, but I can't read the destination. What is the probability that it is an Arlington bus?\nPart 2: Suppose I see a bus go by, but I don't see the destination, and 3 minutes later I see another bus. What is the probability that the second bus is going to Arlington?", "# Solution\n\ndef generate_times(lam, n=10):\n gaps = np.random.exponential(lam, n)\n times = np.cumsum(gaps)\n for time in times:\n yield time\n\n# Solution\n\nfor time in generate_times(20, 10):\n print(time)\n\n# Solution\n\ndef generate_buses(names, lams, n):\n buses = [generate_times(lam, n) for lam in lams]\n times = [next(bus) for bus in buses]\n\n while True:\n i = np.argmin(times) \n yield(names[i], times[i])\n times[i] = next(buses[i])\n\n# Solution\n\nnext(generate_buses('AB', [20, 30], 10))\n\n# Solution\n\nres = []\nfor bus, time in generate_buses('AB', [20, 30], 1000):\n res.append((bus, time))\n\n# Solution\n\nbuses, times = zip(*res)\n\n# Solution\n\nhist = Hist(buses)\nhist['A'] / hist.Total()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ES-DOC/esdoc-jupyterhub
notebooks/snu/cmip6/models/sandbox-1/aerosol.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Aerosol\nMIP Era: CMIP6\nInstitute: SNU\nSource ID: SANDBOX-1\nTopic: Aerosol\nSub-Topics: Transport, Emissions, Concentrations, Optical Radiative Properties, Model. \nProperties: 69 (37 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:38\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'snu', 'sandbox-1', 'aerosol')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --&gt; Software Properties\n3. Key Properties --&gt; Timestep Framework\n4. Key Properties --&gt; Meteorological Forcings\n5. Key Properties --&gt; Resolution\n6. Key Properties --&gt; Tuning Applied\n7. Transport\n8. Emissions\n9. Concentrations\n10. Optical Radiative Properties\n11. Optical Radiative Properties --&gt; Absorption\n12. Optical Radiative Properties --&gt; Mixtures\n13. Optical Radiative Properties --&gt; Impact Of H2o\n14. Optical Radiative Properties --&gt; Radiative Scheme\n15. Optical Radiative Properties --&gt; Cloud Interactions\n16. Model \n1. Key Properties\nKey properties of the aerosol model\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of aerosol model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of aerosol model code", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Scheme Scope\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nAtmospheric domains covered by the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.scheme_scope') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"troposhere\" \n# \"stratosphere\" \n# \"mesosphere\" \n# \"mesosphere\" \n# \"whole atmosphere\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.4. Basic Approximations\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBasic approximations made in the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.basic_approximations') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.5. Prognostic Variables Form\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPrognostic variables in the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"3D mass/volume ratio for aerosols\" \n# \"3D number concenttration for aerosols\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.6. Number Of Tracers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of tracers in the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.number_of_tracers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "1.7. Family Approach\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre aerosol calculations generalized into families of species?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.family_approach') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Software Properties\nSoftware properties of aerosol code\n2.1. Repository\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nLocation of code for this component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.2. Code Version\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCode version identifier.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.3. Code Languages\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nCode language(s).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Timestep Framework\nPhysical properties of seawater in ocean\n3.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMathematical method deployed to solve the time evolution of the prognostic variables", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses atmospheric chemistry time stepping\" \n# \"Specific timestepping (operator splitting)\" \n# \"Specific timestepping (integrated)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3.2. Split Operator Advection Timestep\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTimestep for aerosol advection (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.3. Split Operator Physical Timestep\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTimestep for aerosol physics (in seconds).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.4. Integrated Timestep\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTimestep for the aerosol model (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.5. Integrated Scheme Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecify the type of timestep scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Implicit\" \n# \"Semi-implicit\" \n# \"Semi-analytic\" \n# \"Impact solver\" \n# \"Back Euler\" \n# \"Newton Raphson\" \n# \"Rosenbrock\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Meteorological Forcings\n**\n4.1. Variables 3D\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nThree dimensionsal forcing variables, e.g. U, V, W, T, Q, P, conventive mass flux", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Variables 2D\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTwo dimensionsal forcing variables, e.g. land-sea mask definition", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Frequency\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nFrequency with which meteological forcings are applied (in seconds).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Resolution\nResolution in the aersosol model grid\n5.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Canonical Horizontal Resolution\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.3. Number Of Horizontal Gridpoints\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5.4. Number Of Vertical Levels\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nNumber of vertical levels resolved on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5.5. Is Adaptive Grid\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDefault is False. Set true if grid resolution changes during execution.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Tuning Applied\nTuning methodology for aerosol model\n6.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.2. Global Mean Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList set of metrics of the global mean state used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.3. Regional Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of regional metrics of mean state used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.4. Trend Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList observed trend metrics used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7. Transport\nAerosol transport\n7.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of transport in atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod for aerosol transport modeling", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Specific transport scheme (eulerian)\" \n# \"Specific transport scheme (semi-lagrangian)\" \n# \"Specific transport scheme (eulerian and semi-lagrangian)\" \n# \"Specific transport scheme (lagrangian)\" \n# TODO - please enter value(s)\n", "7.3. Mass Conservation Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMethod used to ensure mass conservation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Mass adjustment\" \n# \"Concentrations positivity\" \n# \"Gradients monotonicity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "7.4. Convention\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nTransport by convention", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.convention') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Convective fluxes connected to tracers\" \n# \"Vertical velocities connected to tracers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8. Emissions\nAtmospheric aerosol emissions\n8.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of emissions in atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMethod used to define aerosol species (several methods allowed because the different species may not use the same method).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Prescribed (climatology)\" \n# \"Prescribed CMIP6\" \n# \"Prescribed above surface\" \n# \"Interactive\" \n# \"Interactive above surface\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.3. Sources\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nSources of the aerosol species are taken into account in the emissions scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.sources') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Vegetation\" \n# \"Volcanos\" \n# \"Bare ground\" \n# \"Sea surface\" \n# \"Lightning\" \n# \"Fires\" \n# \"Aircraft\" \n# \"Anthropogenic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.4. Prescribed Climatology\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify the climatology type for aerosol emissions", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_climatology') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Interannual\" \n# \"Annual\" \n# \"Monthly\" \n# \"Daily\" \n# TODO - please enter value(s)\n", "8.5. Prescribed Climatology Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and prescribed via a climatology", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.6. Prescribed Spatially Uniform Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and prescribed as spatially uniform", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.7. Interactive Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and specified via an interactive method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.8. Other Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and specified via an &quot;other method&quot;", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.other_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.9. Other Method Characteristics\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCharacteristics of the &quot;other method&quot; used for aerosol emissions", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.other_method_characteristics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Concentrations\nAtmospheric aerosol concentrations\n9.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of concentrations in atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.2. Prescribed Lower Boundary\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed at the lower boundary.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.3. Prescribed Upper Boundary\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed at the upper boundary.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.4. Prescribed Fields Mmr\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed as mass mixing ratios.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.5. Prescribed Fields Mmr\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed as AOD plus CCNs.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Optical Radiative Properties\nAerosol optical and radiative properties\n10.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of optical and radiative properties", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11. Optical Radiative Properties --&gt; Absorption\nAbsortion properties in aerosol scheme\n11.1. Black Carbon\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAbsorption mass coefficient of black carbon at 550nm (if non-absorbing enter 0)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.2. Dust\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAbsorption mass coefficient of dust at 550nm (if non-absorbing enter 0)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.3. Organics\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAbsorption mass coefficient of organics at 550nm (if non-absorbing enter 0)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "12. Optical Radiative Properties --&gt; Mixtures\n**\n12.1. External\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there external mixing with respect to chemical composition?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.2. Internal\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there internal mixing with respect to chemical composition?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.3. Mixing Rule\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf there is internal mixing with respect to chemical composition then indicate the mixinrg rule", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Optical Radiative Properties --&gt; Impact Of H2o\n**\n13.1. Size\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes H2O impact size?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "13.2. Internal Mixture\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes H2O impact internal mixture?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "14. Optical Radiative Properties --&gt; Radiative Scheme\nRadiative scheme for aerosol\n14.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of radiative scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.2. Shortwave Bands\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of shortwave bands", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14.3. Longwave Bands\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of longwave bands", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15. Optical Radiative Properties --&gt; Cloud Interactions\nAerosol-cloud interactions\n15.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of aerosol-cloud interactions", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.2. Twomey\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the Twomey effect included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "15.3. Twomey Minimum Ccn\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf the Twomey effect is included, then what is the minimum CCN number?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15.4. Drizzle\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the scheme affect drizzle?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "15.5. Cloud Lifetime\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the scheme affect cloud lifetime?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "15.6. Longwave Bands\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of longwave bands", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "16. Model\nAerosol model\n16.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16.2. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nProcesses included in the Aerosol model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Dry deposition\" \n# \"Sedimentation\" \n# \"Wet deposition (impaction scavenging)\" \n# \"Wet deposition (nucleation scavenging)\" \n# \"Coagulation\" \n# \"Oxidation (gas phase)\" \n# \"Oxidation (in cloud)\" \n# \"Condensation\" \n# \"Ageing\" \n# \"Advection (horizontal)\" \n# \"Advection (vertical)\" \n# \"Heterogeneous chemistry\" \n# \"Nucleation\" \n# TODO - please enter value(s)\n", "16.3. Coupling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nOther model components coupled to the Aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Radiation\" \n# \"Land surface\" \n# \"Heterogeneous chemistry\" \n# \"Clouds\" \n# \"Ocean\" \n# \"Cryosphere\" \n# \"Gas phase chemistry\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.4. Gas Phase Precursors\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of gas phase aerosol precursors.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.gas_phase_precursors') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"DMS\" \n# \"SO2\" \n# \"Ammonia\" \n# \"Iodine\" \n# \"Terpene\" \n# \"Isoprene\" \n# \"VOC\" \n# \"NOx\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.5. Scheme Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nType(s) of aerosol scheme used by the aerosols model (potentially multiple: some species may be covered by one type of aerosol scheme and other species covered by another type).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Bulk\" \n# \"Modal\" \n# \"Bin\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.6. Bulk Scheme Species\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of species covered by the bulk scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.bulk_scheme_species') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sulphate\" \n# \"Nitrate\" \n# \"Sea salt\" \n# \"Dust\" \n# \"Ice\" \n# \"Organic\" \n# \"Black carbon / soot\" \n# \"SOA (secondary organic aerosols)\" \n# \"POM (particulate organic matter)\" \n# \"Polar stratospheric ice\" \n# \"NAT (Nitric acid trihydrate)\" \n# \"NAD (Nitric acid dihydrate)\" \n# \"STS (supercooled ternary solution aerosol particule)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
tensorflow/docs-l10n
site/en-snapshot/guide/keras/preprocessing_layers.ipynb
apache-2.0
[ "Copyright 2020 The TensorFlow Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "Working with preprocessing layers\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/guide/keras/preprocessing_layers\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/snapshot-keras/site/en/guide/keras/preprocessing_layers.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/keras-team/keras-io/blob/master/guides/preprocessing_layers.py\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/keras/preprocessing_layers.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>\n\nKeras preprocessing\nThe Keras preprocessing layers API allows developers to build Keras-native input\nprocessing pipelines. These input processing pipelines can be used as independent\npreprocessing code in non-Keras workflows, combined directly with Keras models, and\nexported as part of a Keras SavedModel.\nWith Keras preprocessing layers, you can build and export models that are truly\nend-to-end: models that accept raw images or raw structured data as input; models that\nhandle feature normalization or feature value indexing on their own.\nAvailable preprocessing\nText preprocessing\n\ntf.keras.layers.TextVectorization: turns raw strings into an encoded\n representation that can be read by an Embedding layer or Dense layer.\n\nNumerical features preprocessing\n\ntf.keras.layers.Normalization: performs feature-wise normalize of\n input features.\ntf.keras.layers.Discretization: turns continuous numerical features\n into integer categorical features.\n\nCategorical features preprocessing\n\ntf.keras.layers.CategoryEncoding: turns integer categorical features\n into one-hot, multi-hot, or count dense representations.\ntf.keras.layers.Hashing: performs categorical feature hashing, also known as\n the \"hashing trick\".\ntf.keras.layers.StringLookup: turns string categorical values an encoded\n representation that can be read by an Embedding layer or Dense layer.\ntf.keras.layers.IntegerLookup: turns integer categorical values into an\n encoded representation that can be read by an Embedding layer or Dense\n layer.\n\nImage preprocessing\nThese layers are for standardizing the inputs of an image model.\n\ntf.keras.layers.Resizing: resizes a batch of images to a target size.\ntf.keras.layers.Rescaling: rescales and offsets the values of a batch of\n image (e.g. go from inputs in the [0, 255] range to inputs in the [0, 1]\n range.\ntf.keras.layers.CenterCrop: returns a center crop of a batch of images.\n\nImage data augmentation\nThese layers apply random augmentation transforms to a batch of images. They\nare only active during training.\n\ntf.keras.layers.RandomCrop\ntf.keras.layers.RandomFlip\ntf.keras.layers.RandomTranslation\ntf.keras.layers.RandomRotation\ntf.keras.layers.RandomZoom\ntf.keras.layers.RandomHeight\ntf.keras.layers.RandomWidth\ntf.keras.layers.RandomContrast\n\nThe adapt() method\nSome preprocessing layers have an internal state that can be computed based on\na sample of the training data. The list of stateful preprocessing layers is:\n\nTextVectorization: holds a mapping between string tokens and integer indices\nStringLookup and IntegerLookup: hold a mapping between input values and integer\nindices.\nNormalization: holds the mean and standard deviation of the features.\nDiscretization: holds information about value bucket boundaries.\n\nCrucially, these layers are non-trainable. Their state is not set during training; it\nmust be set before training, either by initializing them from a precomputed constant,\nor by \"adapting\" them on data.\nYou set the state of a preprocessing layer by exposing it to training data, via the\nadapt() method:", "import numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import layers\n\ndata = np.array([[0.1, 0.2, 0.3], [0.8, 0.9, 1.0], [1.5, 1.6, 1.7],])\nlayer = layers.Normalization()\nlayer.adapt(data)\nnormalized_data = layer(data)\n\nprint(\"Features mean: %.2f\" % (normalized_data.numpy().mean()))\nprint(\"Features std: %.2f\" % (normalized_data.numpy().std()))", "The adapt() method takes either a Numpy array or a tf.data.Dataset object. In the\ncase of StringLookup and TextVectorization, you can also pass a list of strings:", "data = [\n \"ξεῖν᾽, ἦ τοι μὲν ὄνειροι ἀμήχανοι ἀκριτόμυθοι\",\n \"γίγνοντ᾽, οὐδέ τι πάντα τελείεται ἀνθρώποισι.\",\n \"δοιαὶ γάρ τε πύλαι ἀμενηνῶν εἰσὶν ὀνείρων:\",\n \"αἱ μὲν γὰρ κεράεσσι τετεύχαται, αἱ δ᾽ ἐλέφαντι:\",\n \"τῶν οἳ μέν κ᾽ ἔλθωσι διὰ πριστοῦ ἐλέφαντος,\",\n \"οἵ ῥ᾽ ἐλεφαίρονται, ἔπε᾽ ἀκράαντα φέροντες:\",\n \"οἱ δὲ διὰ ξεστῶν κεράων ἔλθωσι θύραζε,\",\n \"οἵ ῥ᾽ ἔτυμα κραίνουσι, βροτῶν ὅτε κέν τις ἴδηται.\",\n]\nlayer = layers.TextVectorization()\nlayer.adapt(data)\nvectorized_text = layer(data)\nprint(vectorized_text)", "In addition, adaptable layers always expose an option to directly set state via\nconstructor arguments or weight assignment. If the intended state values are known at\nlayer construction time, or are calculated outside of the adapt() call, they can be set\nwithout relying on the layer's internal computation. For instance, if external vocabulary\nfiles for the TextVectorization, StringLookup, or IntegerLookup layers already\nexist, those can be loaded directly into the lookup tables by passing a path to the\nvocabulary file in the layer's constructor arguments.\nHere's an example where we instantiate a StringLookup layer with precomputed vocabulary:", "vocab = [\"a\", \"b\", \"c\", \"d\"]\ndata = tf.constant([[\"a\", \"c\", \"d\"], [\"d\", \"z\", \"b\"]])\nlayer = layers.StringLookup(vocabulary=vocab)\nvectorized_data = layer(data)\nprint(vectorized_data)", "Preprocessing data before the model or inside the model\nThere are two ways you could be using preprocessing layers:\nOption 1: Make them part of the model, like this:\npython\ninputs = keras.Input(shape=input_shape)\nx = preprocessing_layer(inputs)\noutputs = rest_of_the_model(x)\nmodel = keras.Model(inputs, outputs)\nWith this option, preprocessing will happen on device, synchronously with the rest of the\nmodel execution, meaning that it will benefit from GPU acceleration.\nIf you're training on GPU, this is the best option for the Normalization layer, and for\nall image preprocessing and data augmentation layers.\nOption 2: apply it to your tf.data.Dataset, so as to obtain a dataset that yields\nbatches of preprocessed data, like this:\npython\ndataset = dataset.map(lambda x, y: (preprocessing_layer(x), y))\nWith this option, your preprocessing will happen on CPU, asynchronously, and will be\nbuffered before going into the model.\nIn addition, if you call dataset.prefetch(tf.data.AUTOTUNE) on your dataset,\nthe preprocessing will happen efficiently in parallel with training:\npython\ndataset = dataset.map(lambda x, y: (preprocessing_layer(x), y))\ndataset = dataset.prefetch(tf.data.AUTOTUNE)\nmodel.fit(dataset, ...)\nThis is the best option for TextVectorization, and all structured data preprocessing\nlayers. It can also be a good option if you're training on CPU\nand you use image preprocessing layers.\nWhen running on TPU, you should always place preprocessing layers in the tf.data pipeline\n(with the exception of Normalization and Rescaling, which run fine on TPU and are commonly\nused as the first layer is an image model).\nBenefits of doing preprocessing inside the model at inference time\nEven if you go with option 2, you may later want to export an inference-only end-to-end\nmodel that will include the preprocessing layers. The key benefit to doing this is that\nit makes your model portable and it helps reduce the\ntraining/serving skew.\nWhen all data preprocessing is part of the model, other people can load and use your\nmodel without having to be aware of how each feature is expected to be encoded &\nnormalized. Your inference model will be able to process raw images or raw structured\ndata, and will not require users of the model to be aware of the details of e.g. the\ntokenization scheme used for text, the indexing scheme used for categorical features,\nwhether image pixel values are normalized to [-1, +1] or to [0, 1], etc. This is\nespecially powerful if you're exporting\nyour model to another runtime, such as TensorFlow.js: you won't have to\nreimplement your preprocessing pipeline in JavaScript.\nIf you initially put your preprocessing layers in your tf.data pipeline,\nyou can export an inference model that packages the preprocessing.\nSimply instantiate a new model that chains\nyour preprocessing layers and your training model:\npython\ninputs = keras.Input(shape=input_shape)\nx = preprocessing_layer(inputs)\noutputs = training_model(x)\ninference_model = keras.Model(inputs, outputs)\nQuick recipes\nImage data augmentation\nNote that image data augmentation layers are only active during training (similarly to\nthe Dropout layer).", "from tensorflow import keras\nfrom tensorflow.keras import layers\n\n# Create a data augmentation stage with horizontal flipping, rotations, zooms\ndata_augmentation = keras.Sequential(\n [\n layers.RandomFlip(\"horizontal\"),\n layers.RandomRotation(0.1),\n layers.RandomZoom(0.1),\n ]\n)\n\n# Load some data\n(x_train, y_train), _ = keras.datasets.cifar10.load_data()\ninput_shape = x_train.shape[1:]\nclasses = 10\n\n# Create a tf.data pipeline of augmented images (and their labels)\ntrain_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))\ntrain_dataset = train_dataset.batch(16).map(lambda x, y: (data_augmentation(x), y))\n\n\n# Create a model and train it on the augmented image data\ninputs = keras.Input(shape=input_shape)\nx = layers.Rescaling(1.0 / 255)(inputs) # Rescale inputs\noutputs = keras.applications.ResNet50( # Add the rest of the model\n weights=None, input_shape=input_shape, classes=classes\n)(x)\nmodel = keras.Model(inputs, outputs)\nmodel.compile(optimizer=\"rmsprop\", loss=\"sparse_categorical_crossentropy\")\nmodel.fit(train_dataset, steps_per_epoch=5)", "You can see a similar setup in action in the example\nimage classification from scratch.\nNormalizing numerical features", "# Load some data\n(x_train, y_train), _ = keras.datasets.cifar10.load_data()\nx_train = x_train.reshape((len(x_train), -1))\ninput_shape = x_train.shape[1:]\nclasses = 10\n\n# Create a Normalization layer and set its internal state using the training data\nnormalizer = layers.Normalization()\nnormalizer.adapt(x_train)\n\n# Create a model that include the normalization layer\ninputs = keras.Input(shape=input_shape)\nx = normalizer(inputs)\noutputs = layers.Dense(classes, activation=\"softmax\")(x)\nmodel = keras.Model(inputs, outputs)\n\n# Train the model\nmodel.compile(optimizer=\"adam\", loss=\"sparse_categorical_crossentropy\")\nmodel.fit(x_train, y_train)", "Encoding string categorical features via one-hot encoding", "# Define some toy data\ndata = tf.constant([[\"a\"], [\"b\"], [\"c\"], [\"b\"], [\"c\"], [\"a\"]])\n\n# Use StringLookup to build an index of the feature values and encode output.\nlookup = layers.StringLookup(output_mode=\"one_hot\")\nlookup.adapt(data)\n\n# Convert new test data (which includes unknown feature values)\ntest_data = tf.constant([[\"a\"], [\"b\"], [\"c\"], [\"d\"], [\"e\"], [\"\"]])\nencoded_data = lookup(test_data)\nprint(encoded_data)", "Note that, here, index 0 is reserved for out-of-vocabulary values\n(values that were not seen during adapt()).\nYou can see the StringLookup in action in the\nStructured data classification from scratch\nexample.\nEncoding integer categorical features via one-hot encoding", "# Define some toy data\ndata = tf.constant([[10], [20], [20], [10], [30], [0]])\n\n# Use IntegerLookup to build an index of the feature values and encode output.\nlookup = layers.IntegerLookup(output_mode=\"one_hot\")\nlookup.adapt(data)\n\n# Convert new test data (which includes unknown feature values)\ntest_data = tf.constant([[10], [10], [20], [50], [60], [0]])\nencoded_data = lookup(test_data)\nprint(encoded_data)", "Note that index 0 is reserved for missing values (which you should specify as the value\n0), and index 1 is reserved for out-of-vocabulary values (values that were not seen\nduring adapt()). You can configure this by using the mask_token and oov_token\nconstructor arguments of IntegerLookup.\nYou can see the IntegerLookup in action in the example\nstructured data classification from scratch.\nApplying the hashing trick to an integer categorical feature\nIf you have a categorical feature that can take many different values (on the order of\n10e3 or higher), where each value only appears a few times in the data,\nit becomes impractical and ineffective to index and one-hot encode the feature values.\nInstead, it can be a good idea to apply the \"hashing trick\": hash the values to a vector\nof fixed size. This keeps the size of the feature space manageable, and removes the need\nfor explicit indexing.", "# Sample data: 10,000 random integers with values between 0 and 100,000\ndata = np.random.randint(0, 100000, size=(10000, 1))\n\n# Use the Hashing layer to hash the values to the range [0, 64]\nhasher = layers.Hashing(num_bins=64, salt=1337)\n\n# Use the CategoryEncoding layer to multi-hot encode the hashed values\nencoder = layers.CategoryEncoding(num_tokens=64, output_mode=\"multi_hot\")\nencoded_data = encoder(hasher(data))\nprint(encoded_data.shape)", "Encoding text as a sequence of token indices\nThis is how you should preprocess text to be passed to an Embedding layer.", "# Define some text data to adapt the layer\nadapt_data = tf.constant(\n [\n \"The Brain is wider than the Sky\",\n \"For put them side by side\",\n \"The one the other will contain\",\n \"With ease and You beside\",\n ]\n)\n\n# Create a TextVectorization layer\ntext_vectorizer = layers.TextVectorization(output_mode=\"int\")\n# Index the vocabulary via `adapt()`\ntext_vectorizer.adapt(adapt_data)\n\n# Try out the layer\nprint(\n \"Encoded text:\\n\", text_vectorizer([\"The Brain is deeper than the sea\"]).numpy(),\n)\n\n# Create a simple model\ninputs = keras.Input(shape=(None,), dtype=\"int64\")\nx = layers.Embedding(input_dim=text_vectorizer.vocabulary_size(), output_dim=16)(inputs)\nx = layers.GRU(8)(x)\noutputs = layers.Dense(1)(x)\nmodel = keras.Model(inputs, outputs)\n\n# Create a labeled dataset (which includes unknown tokens)\ntrain_dataset = tf.data.Dataset.from_tensor_slices(\n ([\"The Brain is deeper than the sea\", \"for if they are held Blue to Blue\"], [1, 0])\n)\n\n# Preprocess the string inputs, turning them into int sequences\ntrain_dataset = train_dataset.batch(2).map(lambda x, y: (text_vectorizer(x), y))\n# Train the model on the int sequences\nprint(\"\\nTraining model...\")\nmodel.compile(optimizer=\"rmsprop\", loss=\"mse\")\nmodel.fit(train_dataset)\n\n# For inference, you can export a model that accepts strings as input\ninputs = keras.Input(shape=(1,), dtype=\"string\")\nx = text_vectorizer(inputs)\noutputs = model(x)\nend_to_end_model = keras.Model(inputs, outputs)\n\n# Call the end-to-end model on test data (which includes unknown tokens)\nprint(\"\\nCalling end-to-end model on test string...\")\ntest_data = tf.constant([\"The one the other will absorb\"])\ntest_output = end_to_end_model(test_data)\nprint(\"Model output:\", test_output)", "You can see the TextVectorization layer in action, combined with an Embedding mode,\nin the example\ntext classification from scratch.\nNote that when training such a model, for best performance, you should always\nuse the TextVectorization layer as part of the input pipeline.\nEncoding text as a dense matrix of ngrams with multi-hot encoding\nThis is how you should preprocess text to be passed to a Dense layer.", "# Define some text data to adapt the layer\nadapt_data = tf.constant(\n [\n \"The Brain is wider than the Sky\",\n \"For put them side by side\",\n \"The one the other will contain\",\n \"With ease and You beside\",\n ]\n)\n# Instantiate TextVectorization with \"multi_hot\" output_mode\n# and ngrams=2 (index all bigrams)\ntext_vectorizer = layers.TextVectorization(output_mode=\"multi_hot\", ngrams=2)\n# Index the bigrams via `adapt()`\ntext_vectorizer.adapt(adapt_data)\n\n# Try out the layer\nprint(\n \"Encoded text:\\n\", text_vectorizer([\"The Brain is deeper than the sea\"]).numpy(),\n)\n\n# Create a simple model\ninputs = keras.Input(shape=(text_vectorizer.vocabulary_size(),))\noutputs = layers.Dense(1)(inputs)\nmodel = keras.Model(inputs, outputs)\n\n# Create a labeled dataset (which includes unknown tokens)\ntrain_dataset = tf.data.Dataset.from_tensor_slices(\n ([\"The Brain is deeper than the sea\", \"for if they are held Blue to Blue\"], [1, 0])\n)\n\n# Preprocess the string inputs, turning them into int sequences\ntrain_dataset = train_dataset.batch(2).map(lambda x, y: (text_vectorizer(x), y))\n# Train the model on the int sequences\nprint(\"\\nTraining model...\")\nmodel.compile(optimizer=\"rmsprop\", loss=\"mse\")\nmodel.fit(train_dataset)\n\n# For inference, you can export a model that accepts strings as input\ninputs = keras.Input(shape=(1,), dtype=\"string\")\nx = text_vectorizer(inputs)\noutputs = model(x)\nend_to_end_model = keras.Model(inputs, outputs)\n\n# Call the end-to-end model on test data (which includes unknown tokens)\nprint(\"\\nCalling end-to-end model on test string...\")\ntest_data = tf.constant([\"The one the other will absorb\"])\ntest_output = end_to_end_model(test_data)\nprint(\"Model output:\", test_output)", "Encoding text as a dense matrix of ngrams with TF-IDF weighting\nThis is an alternative way of preprocessing text before passing it to a Dense layer.", "# Define some text data to adapt the layer\nadapt_data = tf.constant(\n [\n \"The Brain is wider than the Sky\",\n \"For put them side by side\",\n \"The one the other will contain\",\n \"With ease and You beside\",\n ]\n)\n# Instantiate TextVectorization with \"tf-idf\" output_mode\n# (multi-hot with TF-IDF weighting) and ngrams=2 (index all bigrams)\ntext_vectorizer = layers.TextVectorization(output_mode=\"tf-idf\", ngrams=2)\n# Index the bigrams and learn the TF-IDF weights via `adapt()`\n\nwith tf.device(\"CPU\"):\n # A bug that prevents this from running on GPU for now.\n text_vectorizer.adapt(adapt_data)\n\n# Try out the layer\nprint(\n \"Encoded text:\\n\", text_vectorizer([\"The Brain is deeper than the sea\"]).numpy(),\n)\n\n# Create a simple model\ninputs = keras.Input(shape=(text_vectorizer.vocabulary_size(),))\noutputs = layers.Dense(1)(inputs)\nmodel = keras.Model(inputs, outputs)\n\n# Create a labeled dataset (which includes unknown tokens)\ntrain_dataset = tf.data.Dataset.from_tensor_slices(\n ([\"The Brain is deeper than the sea\", \"for if they are held Blue to Blue\"], [1, 0])\n)\n\n# Preprocess the string inputs, turning them into int sequences\ntrain_dataset = train_dataset.batch(2).map(lambda x, y: (text_vectorizer(x), y))\n# Train the model on the int sequences\nprint(\"\\nTraining model...\")\nmodel.compile(optimizer=\"rmsprop\", loss=\"mse\")\nmodel.fit(train_dataset)\n\n# For inference, you can export a model that accepts strings as input\ninputs = keras.Input(shape=(1,), dtype=\"string\")\nx = text_vectorizer(inputs)\noutputs = model(x)\nend_to_end_model = keras.Model(inputs, outputs)\n\n# Call the end-to-end model on test data (which includes unknown tokens)\nprint(\"\\nCalling end-to-end model on test string...\")\ntest_data = tf.constant([\"The one the other will absorb\"])\ntest_output = end_to_end_model(test_data)\nprint(\"Model output:\", test_output)\n", "Important gotchas\nWorking with lookup layers with very large vocabularies\nYou may find yourself working with a very large vocabulary in a TextVectorization, a StringLookup layer,\nor an IntegerLookup layer. Typically, a vocabulary larger than 500MB would be considered \"very large\".\nIn such case, for best performance, you should avoid using adapt().\nInstead, pre-compute your vocabulary in advance\n(you could use Apache Beam or TF Transform for this)\nand store it in a file. Then load the vocabulary into the layer at construction\ntime by passing the filepath as the vocabulary argument.\nUsing lookup layers on a TPU pod or with ParameterServerStrategy.\nThere is an outstanding issue that causes performance to degrade when using\na TextVectorization, StringLookup, or IntegerLookup layer while\ntraining on a TPU pod or on multiple machines via ParameterServerStrategy.\nThis is slated to be fixed in TensorFlow 2.7." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
lknelson/text-analysis-2017
05-TextExploration/00-IntroductionToTopicModeling_ExerciseSolutions.ipynb
bsd-3-clause
[ "Introduction to Topic Modeling\nToday we'll implement the most basic, and the original, topic modeling algorithm, LDA, using Python's scikit-learn. The other major topic modeling package is Gensim. \nLearning Goals\n\nImplement a basic topic modeling algorithm and learn how to tweak it\nLearn how to use different methods to calculate topic prevalence\nLearn how to create some simple graphs with this output\nThink though how and why you might use topic modeling in a text analysis project\n\nOutline\n<ol start=\"0\">\n <li>[The Pandas Dataframe: Music Reviews](#df)</li>\n <li>[Fit an LDA Topic Model using scikit-learn](#fit)</li>\n <li>[Document by Topic Distribution](#dtd)</li>\n <li>[Words Aligned with each Topic](#words)</li>\n <li>[Topic Prevalence](#prev)</li>\n <li>[Topics Over Time](#time)</li>\n</ol>\n\nKey Terms\n\n\nTopic Modeling:\n\nA statistical model to uncover abstract topics within a text. It uses the co-occurrence fo words within documents, compared to their distribution across documents, to uncover these abstract themes. The output is a list of weighted words, which indicate the subject of each topic, and a weight distribution across topics for each document.\n\n\n\nLDA:\n\nLatent Dirichlet Allocation. A implementation of topic modeling that assumes a Dirichlet prior. It does not take document order into account, unlike other topic modeling algorithms.\n\n\n\nFurther Resources\nMore detailed description of implementing LDA using scikit-learn.\n<a id='df'></a>\n0. The Pandas Dataframe: Music Reviews\nFirst, we read our music reviews corpus, which is stored as a .csv file on our hard drive, into a Pandas dataframe.", "import pandas\nimport numpy as np\nimport matplotlib.pyplot as plt\ndf_lit = pandas.read_csv(\"../Data/childrens_lit.csv.bz2\", sep='\\t', index_col=0, encoding = 'utf-8', compression='bz2')\n\n#drop rows where the text is missing.\ndf_lit = df_lit.dropna(subset=['text'])\n\n#view the dataframe\ndf_lit", "<a id='fit'></a>\n1. Fit a Topic Model, using LDA\nNow we're ready to fit the model. This requires the use of CountVecorizer, which we've already used, and the scikit-learn function LatentDirichletAllocation.\nSee here for more information about this function.", "####Adopted From: \n#Author: Olivier Grisel <olivier.grisel@ensta.org>\n# Lars Buitinck\n# Chyi-Kwei Yau <chyikwei.yau@gmail.com>\n# License: BSD 3 clause\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.decomposition import LatentDirichletAllocation\n\nn_samples = 2000\nn_topics = 4\nn_top_words = 50\n\n##This is a function to print out the top words for each topic in a pretty way.\n#Don't worry too much about understanding every line of this code.\ndef print_top_words(model, feature_names, n_top_words):\n for topic_idx, topic in enumerate(model.components_):\n print(\"\\nTopic #%d:\" % topic_idx)\n print(\" \".join([feature_names[i]\n for i in topic.argsort()[:-n_top_words - 1:-1]]))\n print()\n\n# Vectorize our text using CountVectorizer\nprint(\"Extracting tf features for LDA...\")\ntf_vectorizer = CountVectorizer(max_df=0.80, min_df=50,\n max_features=None,\n stop_words='english'\n )\n\ntf = tf_vectorizer.fit_transform(df_lit.text)\n\nprint(\"Fitting LDA models with tf features, \"\n \"n_samples=%d and n_topics=%d...\"\n % (n_samples, n_topics))\n\n#define the lda function, with desired options\n#Check the documentation, linked above, to look through the options\nlda = LatentDirichletAllocation(n_topics=n_topics, max_iter=20,\n learning_method='online',\n learning_offset=80.,\n total_samples=n_samples,\n random_state=0)\n#fit the model\nlda.fit(tf)\n\n#print the top words per topic, using the function defined above.\n#Unlike R, which has a built-in function to print top words, we have to write our own for scikit-learn\n#I think this demonstrates the different aims of the two packages: R is for social scientists, Python for computer scientists\n\nprint(\"\\nTopics in LDA model:\")\ntf_feature_names = tf_vectorizer.get_feature_names()\nprint_top_words(lda, tf_feature_names, n_top_words)\n\n####Exercise:\n###Copy and paste the above code and fit a new model, lda_new, by changing some of the parameters. How does this change the output.\n###Suggestions:\n## 1. Change the number of topics. \n## 2. Do not remove stop words. \n## 3. Change other options, either in the vectorize stage or the LDA model\n\nlda_new = LatentDirichletAllocation(n_topics=10, max_iter=20,\n learning_method='online',\n learning_offset=80.,\n total_samples=n_samples,\n random_state=0)\n#fit the model\nlda_new.fit(tf)", "<a id='dtd'></a>\n2. Document by Topic Distribution\nOne thing we may want to do with the output is find the most representative texts for each topic. A simple way to do this (but not memory efficient), is to merge the topic distribution back into the Pandas dataframe.\nFirst get the topic distribution array.", "topic_dist = lda.transform(tf)\ntopic_dist", "Merge back in with the original dataframe.", "topic_dist_df = pandas.DataFrame(topic_dist)\ndf_w_topics = topic_dist_df.join(df_lit)\ndf_w_topics", "Now we can sort the dataframe for the topic of interest, and view the top documents for the topics.\nBelow we sort the documents first by Topic 0 (looking at the top words for this topic I think it's about family, health, and domestic activities), and next by Topic 1 (again looking at the top words I think this topic is about children playing outside in nature). These topics may be a family/nature split?\nLook at the titles for the two different topics. Look at the gender of the author. Hypotheses?", "print(df_w_topics[['title', 'author gender', 0]].sort_values(by=[0], ascending=False))\n\nprint(df_w_topics[['title', 'author gender', 1]].sort_values(by=[1], ascending=False))\n\n#EX: What is the average topic weight by author gender, for each topic?\n### Grapth these results\n#Hint: You can use the python 'range' function and a for-loop\n\ngrouped_mean=df_w_topics.groupby('author gender').mean()\ngrouped_mean[[0,1,2,3]].plot(kind='bar')\nplt.show()\n", "<a id='words'></a>\n3. Words Aligned with each Topic\nFollowing DiMaggio et al., we can calculate the total number of words aligned with each topic, and compare by author gender.", "#first create word count column\n\ndf_w_topics['word_count'] = df_w_topics['text'].apply(lambda x: len(str(x).split()))\ndf_w_topics['word_count']\n\n#multiple topic weight by word count\n\ndf_w_topics['0_wc'] = df_w_topics[0] * df_w_topics['word_count']\ndf_w_topics['0_wc']\n\n#create a for loop to do this for every topic\n\ntopic_columns = range(0, n_topics)\ncol_list = []\nfor num in topic_columns:\n col = \"%d_wc\" % num\n col_list.append(col)\n #Solution\n df_w_topics[col] = df_w_topics[num] * df_w_topics['word_count']\n \ndf_w_topics\n\n#EX: What is the total number of words aligned with each topic, by author gender?\n \n###Solution\ngrouped = df_w_topics.groupby(\"author gender\")\ngrouped.sum()\n\n#EX: What is the proportion of total words aligned with each topic, by author gender?\nwc_columns = ['0_wc', '1_wc', '2_wc', '3_wc']\nfor n in wc_columns:\n print(n)\n print(grouped[n].sum()/grouped['word_count'].sum())", "Question: Why might we want to do one calculation over the other? Take average topic weight per documents versus the average number of words aligned with each topic?\nThis brings us to...\n<a id='prev'></a>\n4. Topic Prevalence", "###EX: \n# Find the most prevalent topic in the corpus.\n# Find the least prevalent topic in the corpus. \n# Hint: How do we define prevalence? What are different ways of measuring this,\n# and the benefits/drawbacks of each? \n\nfor e in col_list:\n print(e)\n print(df_w_topics[e].sum()/df_w_topics['word_count'].sum())\n\nfor e in topic_columns:\n print(e)\n print(df_w_topics[e].mean())", "<a id='time'></a>\n4. Prevalence over time\nWe can do the same as above, but by year, to graph the prevalence of each topic over time.", "grouped_year = df_w_topics.groupby('year')\nfig3 = plt.figure()\nchrt = 0\nfor e in col_list:\n chrt += 1 \n ax2 = fig3.add_subplot(2,3, chrt)\n (grouped_year[e].sum()/grouped_year['word_count'].sum()).plot(kind='line', title=e)\n \nfig3.tight_layout()\nplt.show()", "Topic 2 I interpret to be about battles in France. What is going on between 1800 and 1804 in France that might make this topic increasingly popular over this time period?" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ES-DOC/esdoc-jupyterhub
notebooks/miroc/cmip6/models/sandbox-1/ocnbgchem.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Ocnbgchem\nMIP Era: CMIP6\nInstitute: MIROC\nSource ID: SANDBOX-1\nTopic: Ocnbgchem\nSub-Topics: Tracers. \nProperties: 65 (37 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-20 15:02:41\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'miroc', 'sandbox-1', 'ocnbgchem')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --&gt; Time Stepping Framework --&gt; Passive Tracers Transport\n3. Key Properties --&gt; Time Stepping Framework --&gt; Biology Sources Sinks\n4. Key Properties --&gt; Transport Scheme\n5. Key Properties --&gt; Boundary Forcing\n6. Key Properties --&gt; Gas Exchange\n7. Key Properties --&gt; Carbon Chemistry\n8. Tracers\n9. Tracers --&gt; Ecosystem\n10. Tracers --&gt; Ecosystem --&gt; Phytoplankton\n11. Tracers --&gt; Ecosystem --&gt; Zooplankton\n12. Tracers --&gt; Disolved Organic Matter\n13. Tracers --&gt; Particules\n14. Tracers --&gt; Dic Alkalinity \n1. Key Properties\nOcean Biogeochemistry key properties\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of ocean biogeochemistry model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of ocean biogeochemistry model code (PISCES 2.0,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Model Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of ocean biogeochemistry model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.model_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Geochemical\" \n# \"NPZD\" \n# \"PFT\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.4. Elemental Stoichiometry\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe elemental stoichiometry (fixed, variable, mix of the two)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Fixed\" \n# \"Variable\" \n# \"Mix of both\" \n# TODO - please enter value(s)\n", "1.5. Elemental Stoichiometry Details\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe which elements have fixed/variable stoichiometry", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.6. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of all prognostic tracer variables in the ocean biogeochemistry component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.7. Diagnostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of all diagnotic tracer variables in the ocean biogeochemistry component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.8. Damping\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe any tracer damping used (such as artificial correction or relaxation to climatology,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.damping') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Time Stepping Framework --&gt; Passive Tracers Transport\nTime stepping method for passive tracers transport in ocean biogeochemistry\n2.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime stepping framework for passive tracers", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"use ocean model transport time step\" \n# \"use specific time step\" \n# TODO - please enter value(s)\n", "2.2. Timestep If Not From Ocean\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTime step for passive tracers (if different from ocean)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Time Stepping Framework --&gt; Biology Sources Sinks\nTime stepping framework for biology sources and sinks in ocean biogeochemistry\n3.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime stepping framework for biology sources and sinks", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"use ocean model transport time step\" \n# \"use specific time step\" \n# TODO - please enter value(s)\n", "3.2. Timestep If Not From Ocean\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTime step for biology sources and sinks (if different from ocean)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Transport Scheme\nTransport scheme in ocean biogeochemistry\n4.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of transport scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Offline\" \n# \"Online\" \n# TODO - please enter value(s)\n", "4.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTransport scheme used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Use that of ocean model\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "4.3. Use Different Scheme\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDecribe transport scheme if different than that of ocean model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Boundary Forcing\nProperties of biogeochemistry boundary forcing\n5.1. Atmospheric Deposition\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how atmospheric deposition is modeled", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"from file (climatology)\" \n# \"from file (interannual variations)\" \n# \"from Atmospheric Chemistry model\" \n# TODO - please enter value(s)\n", "5.2. River Input\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how river input is modeled", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"from file (climatology)\" \n# \"from file (interannual variations)\" \n# \"from Land Surface model\" \n# TODO - please enter value(s)\n", "5.3. Sediments From Boundary Conditions\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList which sediments are speficied from boundary condition", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.4. Sediments From Explicit Model\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList which sediments are speficied from explicit sediment model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Gas Exchange\n*Properties of gas exchange in ocean biogeochemistry *\n6.1. CO2 Exchange Present\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs CO2 gas exchange modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.2. CO2 Exchange Type\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe CO2 gas exchange", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OMIP protocol\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "6.3. O2 Exchange Present\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs O2 gas exchange modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.4. O2 Exchange Type\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe O2 gas exchange", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OMIP protocol\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "6.5. DMS Exchange Present\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs DMS gas exchange modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.6. DMS Exchange Type\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify DMS gas exchange scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.7. N2 Exchange Present\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs N2 gas exchange modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.8. N2 Exchange Type\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify N2 gas exchange scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.9. N2O Exchange Present\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs N2O gas exchange modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.10. N2O Exchange Type\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify N2O gas exchange scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.11. CFC11 Exchange Present\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs CFC11 gas exchange modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.12. CFC11 Exchange Type\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify CFC11 gas exchange scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.13. CFC12 Exchange Present\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs CFC12 gas exchange modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.14. CFC12 Exchange Type\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify CFC12 gas exchange scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.15. SF6 Exchange Present\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs SF6 gas exchange modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.16. SF6 Exchange Type\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify SF6 gas exchange scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.17. 13CO2 Exchange Present\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs 13CO2 gas exchange modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.18. 13CO2 Exchange Type\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify 13CO2 gas exchange scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.19. 14CO2 Exchange Present\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs 14CO2 gas exchange modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.20. 14CO2 Exchange Type\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify 14CO2 gas exchange scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.21. Other Gases\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any other gas exchange", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7. Key Properties --&gt; Carbon Chemistry\nProperties of carbon chemistry biogeochemistry\n7.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how carbon chemistry is modeled", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OMIP protocol\" \n# \"Other protocol\" \n# TODO - please enter value(s)\n", "7.2. PH Scale\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf NOT OMIP protocol, describe pH scale.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sea water\" \n# \"Free\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "7.3. Constants If Not OMIP\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf NOT OMIP protocol, list carbon chemistry constants.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8. Tracers\nOcean biogeochemistry tracers\n8.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of tracers in ocean biogeochemistry", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Sulfur Cycle Present\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs sulfur cycle modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "8.3. Nutrients Present\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList nutrient species present in ocean biogeochemistry model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Nitrogen (N)\" \n# \"Phosphorous (P)\" \n# \"Silicium (S)\" \n# \"Iron (Fe)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.4. Nitrous Species If N\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf nitrogen present, list nitrous species.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Nitrates (NO3)\" \n# \"Amonium (NH4)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.5. Nitrous Processes If N\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf nitrogen present, list nitrous processes.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Dentrification\" \n# \"N fixation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9. Tracers --&gt; Ecosystem\nEcosystem properties in ocean biogeochemistry\n9.1. Upper Trophic Levels Definition\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDefinition of upper trophic level (e.g. based on size) ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.2. Upper Trophic Levels Treatment\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDefine how upper trophic level are treated", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Tracers --&gt; Ecosystem --&gt; Phytoplankton\nPhytoplankton properties in ocean biogeochemistry\n10.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of phytoplankton", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Generic\" \n# \"PFT including size based (specify both below)\" \n# \"Size based only (specify below)\" \n# \"PFT only (specify below)\" \n# TODO - please enter value(s)\n", "10.2. Pft\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nPhytoplankton functional types (PFT) (if applicable)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Diatoms\" \n# \"Nfixers\" \n# \"Calcifiers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.3. Size Classes\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nPhytoplankton size classes (if applicable)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Microphytoplankton\" \n# \"Nanophytoplankton\" \n# \"Picophytoplankton\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "11. Tracers --&gt; Ecosystem --&gt; Zooplankton\nZooplankton properties in ocean biogeochemistry\n11.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of zooplankton", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Generic\" \n# \"Size based (specify below)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "11.2. Size Classes\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nZooplankton size classes (if applicable)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Microzooplankton\" \n# \"Mesozooplankton\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12. Tracers --&gt; Disolved Organic Matter\nDisolved organic matter properties in ocean biogeochemistry\n12.1. Bacteria Present\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there bacteria representation ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.2. Lability\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe treatment of lability in dissolved organic matter", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Labile\" \n# \"Semi-labile\" \n# \"Refractory\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13. Tracers --&gt; Particules\nParticulate carbon properties in ocean biogeochemistry\n13.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is particulate carbon represented in ocean biogeochemistry?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.particules.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Diagnostic\" \n# \"Diagnostic (Martin profile)\" \n# \"Diagnostic (Balast)\" \n# \"Prognostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.2. Types If Prognostic\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf prognostic, type(s) of particulate matter taken into account", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"POC\" \n# \"PIC (calcite)\" \n# \"PIC (aragonite\" \n# \"BSi\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.3. Size If Prognostic\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf prognostic, describe if a particule size spectrum is used to represent distribution of particules in water volume", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"No size spectrum used\" \n# \"Full size spectrum\" \n# \"Discrete size classes (specify which below)\" \n# TODO - please enter value(s)\n", "13.4. Size If Discrete\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf prognostic and discrete size, describe which size classes are used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13.5. Sinking Speed If Prognostic\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf prognostic, method for calculation of sinking speed of particules", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Function of particule size\" \n# \"Function of particule type (balast)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14. Tracers --&gt; Dic Alkalinity\nDIC and alkalinity properties in ocean biogeochemistry\n14.1. Carbon Isotopes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhich carbon isotopes are modelled (C13, C14)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"C13\" \n# \"C14)\" \n# TODO - please enter value(s)\n", "14.2. Abiotic Carbon\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs abiotic carbon modelled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "14.3. Alkalinity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is alkalinity modelled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Prognostic\" \n# \"Diagnostic)\" \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
saezlab/kinact
doc/networkin_example.ipynb
gpl-3.0
[ "Example application for NetworKIN-based analysis\nHere, the utility function provided in kinact that enable the usage of NetworKIN will be introduced shortly. We start again by loading the example data from de Graaf et al. included in the package.", "import kinact\ndata_log2, data_p_value = kinact.get_example_data()\nprint data_log2.head()", "NetworKIN uses as input two different files\n+ fasta_file:\n A file containing all sequences of the proteins of interest\n+ site_file:\n A file listing all phosphosites in the format: ID tab position tab residue\nWith the function prepare_networkin_files, the needed files with the right layout are produced in a specified directory, based on a list of phosphosites in the format Uniprot-Accession-ID_ResiduePosition.", "kinact.networkin.prepare_networkin_files(phospho_sites=data_log2.index.tolist(), \n output_dir='./networkin_example_files/', \n organism='human')", "Usage of NetworKIN\nWeb-Interface\nNetworKIN can be used via the high-throughput version of the web interface. In order to do so, select 'Human - UniProt' or 'Yeast - Uniprot' from the drop-down menu and paste the contents of the file 'site_file.txt' into the dedicated field. It is possible, that several phosphosites cannot be matched correctly due to different versions of the UniProt database (these will have to be removed manually). After clicking the 'Submit'-Button, NetworKIN will try to map the UniProt Identifiers to STRING in order to integrate contextual information for the prediction. On the next page, possible problems with the matching will be displayed and the user will be prompted to select isoforms or homologs. After clicking 'Next' at the bottom of the page, NetworKIN will predict likely upstream kinases.\nOn the page displaying the results, there is a 'Save' button. Select 'Full Dataset' and save the file as output.txt.\nLocally\nNetworKIN can also be used locally on your machine, which may be easier depending on the number of phosphosites in your dataset. In order to do so, download the NetworKIN release, the NetPhorest release, and the blast algorithm (important: blast to has be the version 2.2.17, which can be found here) from the dedicated websites. Now, NetPhorest has to be compiled, using a gcc compiler version 3.x., like this:\ncd \"NetPhorest-directory\"\ncc -03 -o netphorest netphorest.c -lm\nThe prediction can then be performed with the following command:\npython \"path to NetworKIN.py\" -n \"path to netphorest\" -b \"path to blast\" \"Taxon Identifier for organism of interest\" fasta_file site_file\ne.g.:\npython ./NetworKIN.py -n ../netphorest/netphorest -b ../blast-2.2.17/bin/blastall 9606 ./fasta_file.txt ./site_file.txt &gt; output.txt\nThe output file can then be used to create the adjacency matrix with a dedicated function in kinact.", "adjacency_matrix = kinact.networkin.get_kinase_targets_from_networkin('./networkin_example_files/output.txt', \n add_omnipath=False, score_cut_off=1)\n\nscores, p_values = kinact.networkin.weighted_mean(data_fc=data_log2['5min'], \n interactions=adjacency_matrix, \n mP=data_log2.values.mean(), \n delta=data_log2.values.std())\nprint scores.sort_values(ascending=False).head()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
aschaffn/phys202-2015-work
assignments/assignment08/InterpolationEx02.ipynb
mit
[ "Interpolation Exercise 2", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nsns.set_style('white')\n\nfrom scipy.interpolate import griddata", "Sparse 2d interpolation\nIn this example the values of a scalar field $f(x,y)$ are known at a very limited set of points in a square domain:\n\nThe square domain covers the region $x\\in[-5,5]$ and $y\\in[-5,5]$.\nThe values of $f(x,y)$ are zero on the boundary of the square at integer spaced points.\nThe value of $f$ is known at a single interior point: $f(0,0)=1.0$.\nThe function $f$ is not known at any other points.\n\nCreate arrays x, y, f:\n\nx should be a 1d array of the x coordinates on the boundary and the 1 interior point.\ny should be a 1d array of the y coordinates on the boundary and the 1 interior point.\nf should be a 1d array of the values of f at the corresponding x and y coordinates.\n\nYou might find that np.hstack is helpful.", "# YOUR CODE HERE\nraise NotImplementedError()", "The following plot should show the points on the boundary and the single point in the interior:", "plt.scatter(x, y);\n\nassert x.shape==(41,)\nassert y.shape==(41,)\nassert f.shape==(41,)\nassert np.count_nonzero(f)==1", "Use meshgrid and griddata to interpolate the function $f(x,y)$ on the entire square domain:\n\nxnew and ynew should be 1d arrays with 100 points between $[-5,5]$.\nXnew and Ynew should be 2d versions of xnew and ynew created by meshgrid.\nFnew should be a 2d array with the interpolated values of $f(x,y)$ at the points (Xnew,Ynew).\nUse cubic spline interpolation.", "# YOUR CODE HERE\nraise NotImplementedError()\n\nassert xnew.shape==(100,)\nassert ynew.shape==(100,)\nassert Xnew.shape==(100,100)\nassert Ynew.shape==(100,100)\nassert Fnew.shape==(100,100)", "Plot the values of the interpolated scalar field using a contour plot. Customize your plot to make it effective and beautiful.", "# YOUR CODE HERE\nraise NotImplementedError()\n\nassert True # leave this to grade the plot" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
delsner/dl-exploration
notebooks/07 - LSTM classifier tensorflow.ipynb
mit
[ "LSTM sentiment classifier", "import math\nimport pickle as p\nimport tensorflow as tf\nimport numpy as np\nimport json", "Quickstart: simple NN in tensorflow", "n_input_nodes = 2\nn_output_nodes = 1\n\nx = tf.placeholder(tf.float32, (None, n_input_nodes))\nW = tf.Variable(tf.ones((n_input_nodes, n_output_nodes)), dtype=tf.float32)\nb = tf.Variable(tf.zeros(n_output_nodes), dtype=tf.float32)\n\nz = tf.matmul(x, W) + b\nout = tf.sigmoid(z)\n\ntest_input = [[0.5, 0.5]]\nwith tf.Session() as session:\n init = tf.global_variables_initializer()\n session.run(init)\n feed_dict = {x: test_input}\n output = session.run([out], feed_dict=feed_dict)\n print(output[0])", "LSTMs for Tweet Sentiment Classification\nsee https://github.com/nicholaslocascio/bcs-lstm/blob/master/Lab.ipynb\nSentiment classification will be done based on words, not on characters!\nModel Parameters", "# set variables\ntweet_size = 20\nhidden_size = 100\nvocab_size = 7597 # amount of words in our vocabulary\nbatch_size = 64\n\n# this just makes sure that all our following operations will be placed in the right graph.\ntf.reset_default_graph()\n\n# create a session variable that we can run later.\nsession = tf.Session()", "Placeholders for input", "# batch_size x tweet_size (each word in tweet) x one_hot_vector of size vocab_size\ntweets = tf.placeholder(dtype=tf.float32, shape=[None, tweet_size, vocab_size])\n# 1d vector of size batch_size as we predict one value per tweet in batch\nlabels = tf.placeholder(dtype=tf.float32, shape=[None])", "Build LSTM layers\nWe want to feed the input sequence, word by word, into an LSTM layer, or multiple LSTM layers (we could also call this an LSTM encoder). At each \"timestep\", we feed in the next word, and the LSTM updates its cell state. The final LSTM cell state can then be fed through a final classification layer(s) to get our sentiment prediction.", "# create 2 LSTM cells -> creates a layer of LSTM cells not just a single one\nlstm_cell_1 = tf.contrib.rnn.LSTMCell(hidden_size)\nlstm_cell_2 = tf.contrib.rnn.LSTMCell(hidden_size)\n\n# create multiple LSTM layers by wrapping the two lstm cells in MultiRNNCell\nmulti_lstm_cells = tf.contrib.rnn.MultiRNNCell([lstm_cell_1, lstm_cell_2], state_is_tuple=True)\n\n# define operation that runs LSTM graph across time, on the data\n_, final_state = tf.nn.dynamic_rnn(multi_lstm_cells, tweets, dtype=tf.float32)", "Classification layer\nOnce we have the final state of the LSTM layers after feeding in the tweet word by word we can take it and feed it into a classfication layer.", "# function to create a weight matrix + bias parameters and matrix multiplication\ndef linear(input_, output_size, name, init_bias=0.0):\n shape = input_.get_shape().as_list()\n with tf.variable_scope(name):\n W = tf.get_variable(\n name='weights',\n shape=[shape[-1], output_size],\n dtype=tf.float32,\n initializer=tf.random_normal_initializer(\n stddev=1.0 / math.sqrt(shape[-1])))\n if init_bias is None:\n return tf.matmul(input_, W)\n with tf.variable_scope(name):\n b = tf.get_variable(\n name='bias',\n shape=[output_size],\n initializer=tf.constant_initializer(init_bias))\n return tf.matmul(input_, W) + b\n\n'''\n {Quick note that we need to feed in final_state[-1][-1] into linear since \n final_state is actually a tuple consisting of the cell state \n (used internally for the cell to keep track of things) \n as well as the hidden state (the output of the cell), and one of these \n tuples for each layer. We want the hidden state for the last layer, so we use \n final_state[-1][-1]}''';\n# pass final state into linear function to get output\nsentiment = linear(final_state[-1][-1], 1, 'output')\n\n# define loss (cross-entropy) -> output of classfication layer (logit) needs to be transformed to probability in [0,1] -> use sigmoid\nsentiment = tf.squeeze(sentiment, [1])\n\n# gives loss for each example in batch\nloss = tf.nn.sigmoid_cross_entropy_with_logits(logits=sentiment, labels=labels)\n\n# take mean of all losses\nloss = tf.reduce_mean(loss)\n\n# round probilities to get 1 or 0 classfication\nprob = tf.nn.sigmoid(sentiment)\nprediction = tf.to_float(tf.greater_equal(prob, 0.5))\n\n# calculate sum of errors based on which predictions were actually correct\npred_err = tf.to_float(tf.not_equal(prediction, labels))\npred_err = tf.reduce_sum(pred_err)\n\n\n# train model - define optimizer (adam)\noptimizer = tf.train.AdamOptimizer().minimize(loss)\n\n# initialize variables\ntf.global_variables_initializer().run(session=session)\n\ndef one_hot(raw_data, vocab_size):\n data = np.zeros((len(raw_data), 20, vocab_size))\n for tweet_index in range(len(raw_data)):\n tweet = raw_data[tweet_index]\n for word_index in range(20):\n word_id = tweet[word_index]\n data[tweet_index, word_index, word_id] = 1\n return data\n\n# load data and separate into tweets and labels\ntrain_data = json.load(open('trainTweets_preprocessed.json', 'r'))\ntrain_data = list(\n map(lambda row: (np.array(row[0], dtype=np.int32), str(row[1])),\n train_data))\ntrain_tweets = np.array([t[0] for t in train_data])\ntrain_labels = np.array([int(t[1]) for t in train_data])\n\ntest_data = json.load(open('testTweets_preprocessed.json', 'r'))\ntest_data = list(\n map(lambda row: (np.array(row[0], dtype=np.int32), str(row[1])),\n test_data))\n\nprint(train_tweets[:5])\nprint(train_labels[:5])\n# we are just taking the first 1000 things from the test set for faster evaluation\ntest_data = test_data[0:1000]\ntest_tweets = np.array([t[0] for t in test_data])\none_hot_test_tweets = one_hot(test_tweets, vocab_size)\ntest_labels = np.array([int(t[1]) for t in test_data])\n\n# we'll train with batches of size 64. This means that we run \n# our model on 64 examples and then do gradient descent based on the loss\n# over those 64 examples.\nnum_steps = 1000\n\nfor step in range(num_steps):\n # get data for a batch\n offset = (step * batch_size) % (len(train_data) - batch_size)\n batch_tweets = one_hot(train_tweets[offset:(offset + batch_size)],\n vocab_size)\n batch_labels = train_labels[offset:(offset + batch_size)]\n\n # put this data into a dictionary that we feed in when we run\n # the graph. this data fills in the placeholders we made in the graph.\n data = {tweets: batch_tweets, labels: batch_labels}\n\n # run the 'optimizer', 'loss', and 'pred_err' operations in the graph\n _, loss_value_train, error_value_train = session.run(\n [optimizer, loss, pred_err], feed_dict=data)\n\n # print stuff every 50 steps to see how we are doing\n if (step % 50 == 0):\n print(\"Minibatch train loss at step\", step, \":\", loss_value_train)\n print(\"Minibatch train error: %.3f%%\" % error_value_train)\n\n # get test evaluation\n test_loss = []\n test_error = []\n for batch_num in range(int(len(test_data) / batch_size)):\n test_offset = (batch_num * batch_size) % (\n len(test_data) - batch_size)\n test_batch_tweets = one_hot_test_tweets[test_offset:(\n test_offset + batch_size)]\n test_batch_labels = test_labels[test_offset:(\n test_offset + batch_size)]\n data_testing = {\n tweets: test_batch_tweets,\n labels: test_batch_labels\n }\n loss_value_test, error_value_test = session.run(\n [loss, pred_err], feed_dict=data_testing)\n test_loss.append(loss_value_test)\n test_error.append(error_value_test)\n\n print(\"Test loss: %.3f\" % np.mean(test_loss))\n print(\"Test error: %.3f%%\" % np.mean(test_error))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
GoogleCloudPlatform/training-data-analyst
courses/machine_learning/deepdive2/tensorflow_extended/labs/components_keras.ipynb
apache-2.0
[ "Interacting with TFX Keras Components\nThis notebook will interactively walk through each built-in component of TensorFlow Extended (TFX).\nIt covers every step in an end-to-end machine learning pipeline, from data ingestion to pushing a model to serving.\nWhen you're done, the contents of this notebook can be automatically exported as TFX pipeline source code, which you can orchestrate with Apache Airflow and Apache Beam.\nNote: This notebook demonstrates the use of native Keras models in TFX pipelines. TFX only supports the TensorFlow 2 version of Keras.\nLearning objectives\n\nSet up pipeline paths.\nDownload example data.\nRun TFX components interactively.\n\nIntroduction\nThis notebook demonstrates how to use TFX in a Jupyter/Colab environment. Here, you walk through the Chicago Taxi example in an interactive notebook.\nWorking in an interactive notebook is a useful way to become familiar with the structure of a TFX pipeline. It's also useful when doing development of your own pipelines as a lightweight development environment, but you should be aware that there are differences in the way interactive notebooks are orchestrated, and how they access metadata artifacts.\nOrchestration\nIn a production deployment of TFX, you will use an orchestrator such as Apache Airflow, Kubeflow Pipelines, or Apache Beam to orchestrate a pre-defined pipeline graph of TFX components. In an interactive notebook, the notebook itself is the orchestrator, running each TFX component as you execute the notebook cells.\nMetadata\nIn a production deployment of TFX, you will access metadata through the ML Metadata (MLMD) API. MLMD stores metadata properties in a database such as MySQL or SQLite, and stores the metadata payloads in a persistent store such as on your filesystem. In an interactive notebook, both properties and payloads are stored in an ephemeral SQLite database in the /tmp directory on the Jupyter notebook or Colab server.\nSetup\nFirst, you install and import the necessary packages, set up paths, and download data.\nEach learning objective will correspond to a #TODO in this student lab notebook -- try to complete this notebook first and then review the solution notebook.\nUpgrade Pip\nTo avoid upgrading Pip in a system when running locally, check to make sure that you're running in Colab. Local systems can of course be upgraded separately.", "import sys\nif 'google.colab' in sys.modules:\n !pip install --upgrade pip", "Install TFX\nNote: In Google Colab, because of package updates, the first time you run this cell you must restart the runtime (Runtime > Restart runtime ...).", "# Install the TensorFlow Extended library\n!pip install -U tfx", "Restart the kernel\nPlease ignore any incompatibility warnings and errors. Restart the kernel to use updated packages. (On the Notebook menu, select Kernel > Restart Kernel > Restart).\nImport packages\nYou import necessary packages, including standard TFX component classes.", "import os\nimport pprint\nimport tempfile\nimport urllib\n\nimport absl\nimport tensorflow as tf\nimport tensorflow_model_analysis as tfma\ntf.get_logger().propagate = False\npp = pprint.PrettyPrinter()\n\nfrom tfx import v1 as tfx\nfrom tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext\n\n%load_ext tfx.orchestration.experimental.interactive.notebook_extensions.skip", "Let's check the library versions.", "print('TensorFlow version: {}'.format(tf.__version__))\nprint('TFX version: {}'.format(tfx.__version__))", "Set up pipeline paths", "# This is the root directory for your TFX pip package installation.\n_tfx_root = tfx.__path__[0]\n\n# This is the directory containing the TFX Chicago Taxi Pipeline example.\n_taxi_root = os.path.join(_tfx_root, 'examples/chicago_taxi_pipeline')\n\n# This is the path where your model will be pushed for serving.\n# TODO: Your code goes here\n\n# Set up logging.\nabsl.logging.set_verbosity(absl.logging.INFO)", "Download example data\nYou download the example dataset for use in your TFX pipeline.\nThe dataset you're using is the Taxi Trips dataset released by the City of Chicago. The columns in this dataset are:\n<table>\n<tr><td>pickup_community_area</td><td>fare</td><td>trip_start_month</td></tr>\n<tr><td>trip_start_hour</td><td>trip_start_day</td><td>trip_start_timestamp</td></tr>\n<tr><td>pickup_latitude</td><td>pickup_longitude</td><td>dropoff_latitude</td></tr>\n<tr><td>dropoff_longitude</td><td>trip_miles</td><td>pickup_census_tract</td></tr>\n<tr><td>dropoff_census_tract</td><td>payment_type</td><td>company</td></tr>\n<tr><td>trip_seconds</td><td>dropoff_community_area</td><td>tips</td></tr>\n</table>\n\nWith this dataset, you will build a model that predicts the tips of a trip.", "_data_root = tempfile.mkdtemp(prefix='tfx-data')\nDATA_PATH = 'https://raw.githubusercontent.com/tensorflow/tfx/master/tfx/examples/chicago_taxi_pipeline/data/simple/data.csv'\n_data_filepath = # TODO: Your code goes here\nurllib.request.urlretrieve(DATA_PATH, _data_filepath)", "Take a quick look at the CSV file.", "# print first ten lines of the file\n!head {_data_filepath}", "Disclaimer: This site provides applications using data that has been modified for use from its original source, www.cityofchicago.org, the official website of the City of Chicago. The City of Chicago makes no claims as to the content, accuracy, timeliness, or completeness of any of the data provided at this site. The data provided at this site is subject to change at any time. It is understood that the data provided at this site is being used at one’s own risk.\nCreate the InteractiveContext\nLast, you create an InteractiveContext, which will allow you to run TFX components interactively in this notebook.", "# Here, you create an InteractiveContext using default parameters. This will\n# use a temporary directory with an ephemeral ML Metadata database instance.\n# To use your own pipeline root or database, the optional properties\n# `pipeline_root` and `metadata_connection_config` may be passed to\n# InteractiveContext. Calls to InteractiveContext are no-ops outside of the\n# notebook.\ncontext = InteractiveContext()", "Run TFX components interactively\nIn the cells that follow, you create TFX components one-by-one, run each of them, and visualize their output artifacts.\nExampleGen\nThe ExampleGen component is usually at the start of a TFX pipeline. It will:\n\nSplit data into training and evaluation sets (by default, 2/3 training + 1/3 eval)\nConvert data into the tf.Example format (learn more here)\nCopy data into the _tfx_root directory for other components to access\n\nExampleGen takes as input the path to your data source. In your case, this is the _data_root path that contains the downloaded CSV.\nNote: In this notebook, you can instantiate components one-by-one and run them with InteractiveContext.run(). By contrast, in a production setting, you would specify all the components upfront in a Pipeline to pass to the orchestrator (see the Building a TFX Pipeline Guide).\nEnabling the Cache\nWhen using the InteractiveContext in a notebook to develop a pipeline you can control when individual components will cache their outputs. Set enable_cache to True when you want to reuse the previous output artifacts that the component generated. Set enable_cache to False when you want to recompute the output artifacts for a component, if you are making changes to the code for example.", "example_gen = tfx.components.CsvExampleGen(input_base=_data_root)\ncontext.run(example_gen, enable_cache=True)", "Let's examine the output artifacts of ExampleGen. This component produces two artifacts, training examples and evaluation examples:", "artifact = example_gen.outputs['examples'].get()[0]\nprint(artifact.split_names, artifact.uri)", "You can also take a look at the first three training examples:", "# Get the URI of the output artifact representing the training examples, which is a directory\ntrain_uri = os.path.join(example_gen.outputs['examples'].get()[0].uri, 'Split-train')\n\n# Get the list of files in this directory (all compressed TFRecord files)\ntfrecord_filenames = [os.path.join(train_uri, name)\n for name in os.listdir(train_uri)]\n\n# Create a `TFRecordDataset` to read these files\ndataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type=\"GZIP\")\n\n# Iterate over the first 3 records and decode them.\nfor tfrecord in dataset.take(3):\n serialized_example = tfrecord.numpy()\n example = tf.train.Example()\n example.ParseFromString(serialized_example)\n pp.pprint(example)", "Now that ExampleGen has finished ingesting the data, the next step is data analysis.\nStatisticsGen\nThe StatisticsGen component computes statistics over your dataset for data analysis, as well as for use in downstream components. It uses the TensorFlow Data Validation library.\nStatisticsGen takes as input the dataset you just ingested using ExampleGen.", "statistics_gen = tfx.components.StatisticsGen(\n examples=example_gen.outputs['examples'])\ncontext.run(statistics_gen, enable_cache=True)", "After StatisticsGen finishes running, you can visualize the outputted statistics. Try playing with the different plots!", "context.show(statistics_gen.outputs['statistics'])", "SchemaGen\nThe SchemaGen component generates a schema based on your data statistics. (A schema defines the expected bounds, types, and properties of the features in your dataset.) It also uses the TensorFlow Data Validation library.\nNote: The generated schema is best-effort and only tries to infer basic properties of the data. It is expected that you review and modify it as needed.\nSchemaGen will take as input the statistics that you generated with StatisticsGen, looking at the training split by default.", "schema_gen = tfx.components.SchemaGen(\n statistics=statistics_gen.outputs['statistics'],\n infer_feature_shape=False)\ncontext.run(schema_gen, enable_cache=True)", "After SchemaGen finishes running, you can visualize the generated schema as a table.", "context.show(schema_gen.outputs['schema'])", "Each feature in your dataset shows up as a row in the schema table, alongside its properties. The schema also captures all the values that a categorical feature takes on, denoted as its domain.\nTo learn more about schemas, see the SchemaGen documentation.\nExampleValidator\nThe ExampleValidator component detects anomalies in your data, based on the expectations defined by the schema. It also uses the TensorFlow Data Validation library.\nExampleValidator will take as input the statistics from StatisticsGen, and the schema from SchemaGen.", "example_validator = tfx.components.ExampleValidator(\n statistics=statistics_gen.outputs['statistics'],\n schema=schema_gen.outputs['schema'])\ncontext.run(example_validator, enable_cache=True)", "After ExampleValidator finishes running, you can visualize the anomalies as a table.", "context.show(example_validator.outputs['anomalies'])", "In the anomalies table, you can see that there are no anomalies. This is what you'd expect, since this the first dataset that you've analyzed and the schema is tailored to it. You should review this schema -- anything unexpected means an anomaly in the data. Once reviewed, the schema can be used to guard future data, and anomalies produced here can be used to debug model performance, understand how your data evolves over time, and identify data errors.\nTransform\nThe Transform component performs feature engineering for both training and serving. It uses the TensorFlow Transform library.\nTransform will take as input the data from ExampleGen, the schema from SchemaGen, as well as a module that contains user-defined Transform code.\nLet's see an example of user-defined Transform code below (for an introduction to the TensorFlow Transform APIs, see the tutorial). First, you define a few constants for feature engineering:\nNote: The %%writefile cell magic will save the contents of the cell as a .py file on disk. This allows the Transform component to load your code as a module.", "_taxi_constants_module_file = 'taxi_constants.py'\n\n%%writefile {_taxi_constants_module_file}\n\nNUMERICAL_FEATURES = ['trip_miles', 'fare', 'trip_seconds']\n\nBUCKET_FEATURES = [\n 'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',\n 'dropoff_longitude'\n]\n# Number of buckets used by tf.transform for encoding each feature.\nFEATURE_BUCKET_COUNT = 10\n\nCATEGORICAL_NUMERICAL_FEATURES = [\n 'trip_start_hour', 'trip_start_day', 'trip_start_month',\n 'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',\n 'dropoff_community_area'\n]\n\nCATEGORICAL_STRING_FEATURES = [\n 'payment_type',\n 'company',\n]\n\n# Number of vocabulary terms used for encoding categorical features.\nVOCAB_SIZE = 1000\n\n# Count of out-of-vocab buckets in which unrecognized categorical are hashed.\nOOV_SIZE = 10\n\n# Keys\nLABEL_KEY = 'tips'\nFARE_KEY = 'fare'\n\ndef t_name(key):\n \"\"\"\n Rename the feature keys so that they don't clash with the raw keys when\n running the Evaluator component.\n Args:\n key: The original feature key\n Returns:\n key with '_xf' appended\n \"\"\"\n return key + '_xf'", "Next, you write a preprocessing_fn that takes in raw data as input, and returns transformed features that your model can train on:", "_taxi_transform_module_file = 'taxi_transform.py'\n\n%%writefile {_taxi_transform_module_file}\n\nimport tensorflow as tf\nimport tensorflow_transform as tft\n\n# Imported files such as taxi_constants are normally cached, so changes are\n# not honored after the first import. Normally this is good for efficiency, but\n# during development when you may be iterating code it can be a problem. To\n# avoid this problem during development, reload the file.\nimport taxi_constants\nimport sys\nif 'google.colab' in sys.modules: # Testing to see if you're doing development\n import importlib\n importlib.reload(taxi_constants)\n\n_NUMERICAL_FEATURES = taxi_constants.NUMERICAL_FEATURES\n_BUCKET_FEATURES = taxi_constants.BUCKET_FEATURES\n_FEATURE_BUCKET_COUNT = taxi_constants.FEATURE_BUCKET_COUNT\n_CATEGORICAL_NUMERICAL_FEATURES = taxi_constants.CATEGORICAL_NUMERICAL_FEATURES\n_CATEGORICAL_STRING_FEATURES = taxi_constants.CATEGORICAL_STRING_FEATURES\n_VOCAB_SIZE = taxi_constants.VOCAB_SIZE\n_OOV_SIZE = taxi_constants.OOV_SIZE\n_FARE_KEY = taxi_constants.FARE_KEY\n_LABEL_KEY = taxi_constants.LABEL_KEY\n\n\ndef _make_one_hot(x, key):\n \"\"\"Make a one-hot tensor to encode categorical features.\n Args:\n X: A dense tensor\n key: A string key for the feature in the input\n Returns:\n A dense one-hot tensor as a float list\n \"\"\"\n integerized = tft.compute_and_apply_vocabulary(x,\n top_k=_VOCAB_SIZE,\n num_oov_buckets=_OOV_SIZE,\n vocab_filename=key, name=key)\n depth = (\n tft.experimental.get_vocabulary_size_by_name(key) + _OOV_SIZE)\n one_hot_encoded = tf.one_hot(\n integerized,\n depth=tf.cast(depth, tf.int32),\n on_value=1.0,\n off_value=0.0)\n return tf.reshape(one_hot_encoded, [-1, depth])\n\n\ndef _fill_in_missing(x):\n \"\"\"Replace missing values in a SparseTensor.\n Fills in missing values of `x` with '' or 0, and converts to a dense tensor.\n Args:\n x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1\n in the second dimension.\n Returns:\n A rank 1 tensor where missing values of `x` have been filled in.\n \"\"\"\n if not isinstance(x, tf.sparse.SparseTensor):\n return x\n\n default_value = '' if x.dtype == tf.string else 0\n return tf.squeeze(\n tf.sparse.to_dense(\n tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),\n default_value),\n axis=1)\n\n\ndef preprocessing_fn(inputs):\n \"\"\"tf.transform's callback function for preprocessing inputs.\n Args:\n inputs: map from feature keys to raw not-yet-transformed features.\n Returns:\n Map from string feature key to transformed feature operations.\n \"\"\"\n outputs = {}\n for key in _NUMERICAL_FEATURES:\n # If sparse make it dense, setting nan's to 0 or '', and apply zscore.\n outputs[taxi_constants.t_name(key)] = tft.scale_to_z_score(\n _fill_in_missing(inputs[key]), name=key)\n\n for key in _BUCKET_FEATURES:\n outputs[taxi_constants.t_name(key)] = tf.cast(tft.bucketize(\n _fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT, name=key),\n dtype=tf.float32)\n\n for key in _CATEGORICAL_STRING_FEATURES:\n outputs[taxi_constants.t_name(key)] = _make_one_hot(_fill_in_missing(inputs[key]), key)\n\n for key in _CATEGORICAL_NUMERICAL_FEATURES:\n outputs[taxi_constants.t_name(key)] = _make_one_hot(tf.strings.strip(\n tf.strings.as_string(_fill_in_missing(inputs[key]))), key)\n\n # Was this passenger a big tipper?\n taxi_fare = _fill_in_missing(inputs[_FARE_KEY])\n tips = _fill_in_missing(inputs[_LABEL_KEY])\n outputs[_LABEL_KEY] = tf.where(\n tf.math.is_nan(taxi_fare),\n tf.cast(tf.zeros_like(taxi_fare), tf.int64),\n # Test if the tip was > 20% of the fare.\n tf.cast(\n tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))\n\n return outputs", "Now, you pass in this feature engineering code to the Transform component and run it to transform your data.", "transform = tfx.components.Transform(\n examples=example_gen.outputs['examples'],\n schema=schema_gen.outputs['schema'],\n module_file=os.path.abspath(_taxi_transform_module_file))\ncontext.run(transform, enable_cache=True)", "Let's examine the output artifacts of Transform. This component produces two types of outputs:\n\ntransform_graph is the graph that can perform the preprocessing operations (this graph will be included in the serving and evaluation models).\ntransformed_examples represents the preprocessed training and evaluation data.", "transform.outputs", "Take a peek at the transform_graph artifact. It points to a directory containing three subdirectories.", "train_uri = transform.outputs['transform_graph'].get()[0].uri\nos.listdir(train_uri)", "The transformed_metadata subdirectory contains the schema of the preprocessed data. The transform_fn subdirectory contains the actual preprocessing graph. The metadata subdirectory contains the schema of the original data.\nYou can also take a look at the first three transformed examples:", "# Get the URI of the output artifact representing the transformed examples, which is a directory\ntrain_uri = os.path.join(transform.outputs['transformed_examples'].get()[0].uri, 'Split-train')\n\n# Get the list of files in this directory (all compressed TFRecord files)\ntfrecord_filenames = [os.path.join(train_uri, name)\n for name in os.listdir(train_uri)]\n\n# Create a `TFRecordDataset` to read these files\ndataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type=\"GZIP\")\n\n# Iterate over the first 3 records and decode them.\nfor tfrecord in dataset.take(3):\n serialized_example = tfrecord.numpy()\n example = tf.train.Example()\n example.ParseFromString(serialized_example)\n pp.pprint(example)", "After the Transform component has transformed your data into features, and the next step is to train a model.\nTrainer\nThe Trainer component will train a model that you define in TensorFlow. Default Trainer support Estimator API, to use Keras API, you need to specify Generic Trainer by setup custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor) in Trainer's contructor.\nTrainer takes as input the schema from SchemaGen, the transformed data and graph from Transform, training parameters, as well as a module that contains user-defined model code.\nLet's see an example of user-defined model code below (for an introduction to the TensorFlow Keras APIs, see the tutorial):", "_taxi_trainer_module_file = 'taxi_trainer.py'\n\n%%writefile {_taxi_trainer_module_file}\n\nfrom typing import Dict, List, Text\n\nimport os\nimport glob\nfrom absl import logging\n\nimport datetime\nimport tensorflow as tf\nimport tensorflow_transform as tft\n\nfrom tfx import v1 as tfx\nfrom tfx_bsl.public import tfxio\nfrom tensorflow_transform import TFTransformOutput\n\n# Imported files such as taxi_constants are normally cached, so changes are\n# not honored after the first import. Normally this is good for efficiency, but\n# during development when you may be iterating code it can be a problem. To\n# avoid this problem during development, reload the file.\nimport taxi_constants\nimport sys\nif 'google.colab' in sys.modules: # Testing to see if you're doing development\n import importlib\n importlib.reload(taxi_constants)\n\n_LABEL_KEY = taxi_constants.LABEL_KEY\n\n_BATCH_SIZE = 40\n\n\ndef _input_fn(file_pattern: List[Text],\n data_accessor: tfx.components.DataAccessor,\n tf_transform_output: tft.TFTransformOutput,\n batch_size: int = 200) -> tf.data.Dataset:\n \"\"\"Generates features and label for tuning/training.\n\n Args:\n file_pattern: List of paths or patterns of input tfrecord files.\n data_accessor: DataAccessor for converting input to RecordBatch.\n tf_transform_output: A TFTransformOutput.\n batch_size: representing the number of consecutive elements of returned\n dataset to combine in a single batch\n\n Returns:\n A dataset that contains (features, indices) tuple where features is a\n dictionary of Tensors, and indices is a single Tensor of label indices.\n \"\"\"\n return data_accessor.tf_dataset_factory(\n file_pattern,\n tfxio.TensorFlowDatasetOptions(\n batch_size=batch_size, label_key=_LABEL_KEY),\n tf_transform_output.transformed_metadata.schema)\n\ndef _get_tf_examples_serving_signature(model, tf_transform_output):\n \"\"\"Returns a serving signature that accepts `tensorflow.Example`.\"\"\"\n\n # You need to track the layers in the model in order to save it.\n model.tft_layer_inference = tf_transform_output.transform_features_layer()\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[None], dtype=tf.string, name='examples')\n ])\n def serve_tf_examples_fn(serialized_tf_example):\n \"\"\"Returns the output to be used in the serving signature.\"\"\"\n raw_feature_spec = tf_transform_output.raw_feature_spec()\n # Remove label feature since these will not be present at serving time.\n raw_feature_spec.pop(_LABEL_KEY)\n raw_features = tf.io.parse_example(serialized_tf_example, raw_feature_spec)\n transformed_features = model.tft_layer_inference(raw_features)\n logging.info('serve_transformed_features = %s', transformed_features)\n\n outputs = model(transformed_features)\n return {'outputs': outputs}\n\n return serve_tf_examples_fn\n\n\ndef _get_transform_features_signature(model, tf_transform_output):\n \"\"\"Returns a serving signature that applies tf.Transform to features.\"\"\"\n\n # You need to track the layers in the model in order to save it.\n model.tft_layer_eval = tf_transform_output.transform_features_layer()\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[None], dtype=tf.string, name='examples')\n ])\n def transform_features_fn(serialized_tf_example):\n \"\"\"Returns the transformed_features to be fed as input to evaluator.\"\"\"\n raw_feature_spec = tf_transform_output.raw_feature_spec()\n raw_features = tf.io.parse_example(serialized_tf_example, raw_feature_spec)\n transformed_features = model.tft_layer_eval(raw_features)\n logging.info('eval_transformed_features = %s', transformed_features)\n return transformed_features\n\n return transform_features_fn\n\n\ndef export_serving_model(tf_transform_output, model, output_dir):\n \"\"\"Exports a keras model for serving.\n Args:\n tf_transform_output: Wrapper around output of tf.Transform.\n model: A keras model to export for serving.\n output_dir: A directory where the model will be exported to.\n \"\"\"\n # The layer has to be saved to the model for keras tracking purpases.\n model.tft_layer = tf_transform_output.transform_features_layer()\n\n signatures = {\n 'serving_default':\n _get_tf_examples_serving_signature(model, tf_transform_output),\n 'transform_features':\n _get_transform_features_signature(model, tf_transform_output),\n }\n\n model.save(output_dir, save_format='tf', signatures=signatures)\n\n\ndef _build_keras_model(tf_transform_output: TFTransformOutput\n ) -> tf.keras.Model:\n \"\"\"Creates a DNN Keras model for classifying taxi data.\n\n Args:\n tf_transform_output: [TFTransformOutput], the outputs from Transform\n\n Returns:\n A keras Model.\n \"\"\"\n feature_spec = tf_transform_output.transformed_feature_spec().copy()\n feature_spec.pop(_LABEL_KEY)\n\n inputs = {}\n for key, spec in feature_spec.items():\n if isinstance(spec, tf.io.VarLenFeature):\n inputs[key] = tf.keras.layers.Input(\n shape=[None], name=key, dtype=spec.dtype, sparse=True)\n elif isinstance(spec, tf.io.FixedLenFeature):\n inputs[key] = tf.keras.layers.Input(\n shape=spec.shape or [1], name=key, dtype=spec.dtype)\n else:\n raise ValueError('Spec type is not supported: ', key, spec)\n \n output = tf.keras.layers.Concatenate()(tf.nest.flatten(inputs))\n output = tf.keras.layers.Dense(100, activation='relu')(output)\n output = tf.keras.layers.Dense(70, activation='relu')(output)\n output = tf.keras.layers.Dense(50, activation='relu')(output)\n output = tf.keras.layers.Dense(20, activation='relu')(output)\n output = tf.keras.layers.Dense(1)(output)\n return tf.keras.Model(inputs=inputs, outputs=output)\n\n\n# TFX Trainer will call this function.\ndef run_fn(fn_args: tfx.components.FnArgs):\n \"\"\"Train the model based on given args.\n\n Args:\n fn_args: Holds args used to train the model as name/value pairs.\n \"\"\"\n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n\n train_dataset = _input_fn(fn_args.train_files, fn_args.data_accessor, \n tf_transform_output, _BATCH_SIZE)\n eval_dataset = _input_fn(fn_args.eval_files, fn_args.data_accessor, \n tf_transform_output, _BATCH_SIZE)\n\n model = _build_keras_model(tf_transform_output)\n\n model.compile(\n loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),\n metrics=[tf.keras.metrics.BinaryAccuracy()])\n\n tensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=fn_args.model_run_dir, update_freq='batch')\n\n model.fit(\n train_dataset,\n steps_per_epoch=fn_args.train_steps,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=[tensorboard_callback])\n\n # Export the model.\n export_serving_model(tf_transform_output, model, fn_args.serving_model_dir)", "Now, you pass in this model code to the Trainer component and run it to train the model.", "# use a TFX component to train a TensorFlow model\ntrainer = tfx.components.Trainer(\n module_file= # TODO: Your code goes here,\n examples=transform.outputs['transformed_examples'],\n transform_graph=transform.outputs['transform_graph'],\n schema=schema_gen.outputs['schema'],\n train_args=tfx.proto.TrainArgs(num_steps=10000),\n eval_args=tfx.proto.EvalArgs(num_steps=5000))\ncontext.run(trainer, enable_cache=True)", "Analyze Training with TensorBoard\nTake a peek at the trainer artifact. It points to a directory containing the model subdirectories.", "model_artifact_dir = trainer.outputs['model'].get()[0].uri\npp.pprint(os.listdir(model_artifact_dir))\nmodel_dir = os.path.join(model_artifact_dir, 'Format-Serving')\npp.pprint(os.listdir(model_dir))", "Optionally, you can connect TensorBoard to the Trainer to analyze your model's training curves.", "model_run_artifact_dir = trainer.outputs['model_run'].get()[0].uri\n\n%load_ext tensorboard\n%tensorboard --logdir {model_run_artifact_dir}", "Evaluator\nThe Evaluator component computes model performance metrics over the evaluation set. It uses the TensorFlow Model Analysis library. The Evaluator can also optionally validate that a newly trained model is better than the previous model. This is useful in a production pipeline setting where you may automatically train and validate a model every day. In this notebook, you only train one model, so the Evaluator automatically will label the\nmodel as \"good\".\nEvaluator will take as input the data from ExampleGen, the trained model from Trainer, and slicing configuration. The slicing configuration allows you to slice your metrics on feature values (e.g. how does your model perform on taxi trips that start at 8am versus 8pm?). See an example of this configuration below:", "# Imported files such as taxi_constants are normally cached, so changes are\n# not honored after the first import. Normally this is good for efficiency, but\n# during development when you may be iterating code it can be a problem. To\n# avoid this problem during development, reload the file.\nimport taxi_constants\nimport sys\nif 'google.colab' in sys.modules: # Testing to see if you're doing development\n import importlib\n importlib.reload(taxi_constants)\n\neval_config = tfma.EvalConfig(\n model_specs=[\n # This assumes a serving model with signature 'serving_default'. If\n # using estimator based EvalSavedModel, add signature_name: 'eval' and\n # remove the label_key.\n tfma.ModelSpec(\n signature_name='serving_default',\n label_key=taxi_constants.LABEL_KEY,\n preprocessing_function_names=['transform_features'],\n )\n ],\n metrics_specs=[\n tfma.MetricsSpec(\n # The metrics added here are in addition to those saved with the\n # model (assuming either a keras model or EvalSavedModel is used).\n # Any metrics added into the saved model (for example using\n # model.compile(..., metrics=[...]), etc) will be computed\n # automatically.\n # To add validation thresholds for metrics saved with the model,\n # add them keyed by metric name to the thresholds map.\n metrics=[\n tfma.MetricConfig(class_name='ExampleCount'),\n tfma.MetricConfig(class_name='BinaryAccuracy',\n threshold=tfma.MetricThreshold(\n value_threshold=tfma.GenericValueThreshold(\n lower_bound={'value': 0.5}),\n # Change threshold will be ignored if there is no\n # baseline model resolved from MLMD (first run).\n change_threshold=tfma.GenericChangeThreshold(\n direction=tfma.MetricDirection.HIGHER_IS_BETTER,\n absolute={'value': -1e-10})))\n ]\n )\n ],\n slicing_specs=[\n # An empty slice spec means the overall slice, i.e. the whole dataset.\n tfma.SlicingSpec(),\n # Data can be sliced along a feature column. In this case, data is\n # sliced along feature column trip_start_hour.\n tfma.SlicingSpec(\n feature_keys=['trip_start_hour'])\n ])", "Next, you give this configuration to Evaluator and run it.", "# Use TFMA to compute a evaluation statistics over features of a model and\n# validate them against a baseline.\n\n# The model resolver is only required if performing model validation in addition\n# to evaluation. In this case you validate against the latest blessed model. If\n# no model has been blessed before (as in this case) the evaluator will make your\n# candidate the first blessed model.\nmodel_resolver = tfx.dsl.Resolver(\n strategy_class=tfx.dsl.experimental.LatestBlessedModelStrategy,\n model=tfx.dsl.Channel(type=tfx.types.standard_artifacts.Model),\n model_blessing=tfx.dsl.Channel(\n type=tfx.types.standard_artifacts.ModelBlessing)).with_id(\n 'latest_blessed_model_resolver')\ncontext.run(model_resolver, enable_cache=True)\n\nevaluator = tfx.components.Evaluator(\n examples=example_gen.outputs['examples'],\n model=trainer.outputs['model'],\n baseline_model=model_resolver.outputs['model'],\n eval_config=eval_config)\ncontext.run(evaluator, enable_cache=True)", "Now let's examine the output artifacts of Evaluator.", "evaluator.outputs", "Using the evaluation output you can show the default visualization of global metrics on the entire evaluation set.", "context.show(evaluator.outputs['evaluation'])", "To see the visualization for sliced evaluation metrics, you can directly call the TensorFlow Model Analysis library.", "import tensorflow_model_analysis as tfma\n\n# Get the TFMA output result path and load the result.\nPATH_TO_RESULT = evaluator.outputs['evaluation'].get()[0].uri\ntfma_result = tfma.load_eval_result(PATH_TO_RESULT)\n\n# Show data sliced along feature column trip_start_hour.\ntfma.view.render_slicing_metrics(\n tfma_result, slicing_column='trip_start_hour')", "This visualization shows the same metrics, but computed at every feature value of trip_start_hour instead of on the entire evaluation set.\nTensorFlow Model Analysis supports many other visualizations, such as Fairness Indicators and plotting a time series of model performance. To learn more, see the tutorial.\nSince you added thresholds to your config, validation output is also available. The precence of a blessing artifact indicates that your model passed validation. Since this is the first validation being performed the candidate is automatically blessed.", "blessing_uri = evaluator.outputs['blessing'].get()[0].uri\n!ls -l {blessing_uri}", "Now can also verify the success by loading the validation result record:", "PATH_TO_RESULT = evaluator.outputs['evaluation'].get()[0].uri\nprint(tfma.load_validation_result(PATH_TO_RESULT))", "Pusher\nThe Pusher component is usually at the end of a TFX pipeline. It checks whether a model has passed validation, and if so, exports the model to _serving_model_dir.", "pusher = tfx.components.Pusher(\n model=trainer.outputs['model'],\n model_blessing=evaluator.outputs['blessing'],\n push_destination=tfx.proto.PushDestination(\n filesystem=tfx.proto.PushDestination.Filesystem(\n base_directory=_serving_model_dir)))\ncontext.run(pusher, enable_cache=True)", "Let's examine the output artifacts of Pusher.", "pusher.outputs", "In particular, the Pusher will export your model in the SavedModel format, which looks like this:", "push_uri = pusher.outputs['pushed_model'].get()[0].uri\nmodel = tf.saved_model.load(push_uri)\n\nfor item in model.signatures.items():\n pp.pprint(item)", "You're finished your tour of built-in TFX components!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
tensorflow/lattice
docs/tutorials/aggregate_function_models.ipynb
apache-2.0
[ "Copyright 2020 The TensorFlow Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "TF Lattice Aggregate Function Models\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/lattice/tutorials/aggregate_function_models\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/lattice/blob/master/docs/tutorials/aggregate_function_models.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/lattice/blob/master/docs/tutorials/aggregate_function_models.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/lattice/docs/tutorials/aggregate_function_models.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>\n\nOverview\nTFL Premade Aggregate Function Models are quick and easy ways to build TFL tf.keras.model instances for learning complex aggregation functions. This guide outlines the steps needed to construct a TFL Premade Aggregate Function Model and train/test it. \nSetup\nInstalling TF Lattice package:", "#@test {\"skip\": true}\n!pip install tensorflow-lattice pydot", "Importing required packages:", "import tensorflow as tf\n\nimport collections\nimport logging\nimport numpy as np\nimport pandas as pd\nimport sys\nimport tensorflow_lattice as tfl\nlogging.disable(sys.maxsize)", "Downloading the Puzzles dataset:", "train_dataframe = pd.read_csv(\n 'https://raw.githubusercontent.com/wbakst/puzzles_data/master/train.csv')\ntrain_dataframe.head()\n\ntest_dataframe = pd.read_csv(\n 'https://raw.githubusercontent.com/wbakst/puzzles_data/master/test.csv')\ntest_dataframe.head()", "Extract and convert features and labels", "# Features:\n# - star_rating rating out of 5 stars (1-5)\n# - word_count number of words in the review\n# - is_amazon 1 = reviewed on amazon; 0 = reviewed on artifact website\n# - includes_photo if the review includes a photo of the puzzle\n# - num_helpful number of people that found this review helpful\n# - num_reviews total number of reviews for this puzzle (we construct)\n#\n# This ordering of feature names will be the exact same order that we construct\n# our model to expect.\nfeature_names = [\n 'star_rating', 'word_count', 'is_amazon', 'includes_photo', 'num_helpful',\n 'num_reviews'\n]\n\ndef extract_features(dataframe, label_name):\n # First we extract flattened features.\n flattened_features = {\n feature_name: dataframe[feature_name].values.astype(float)\n for feature_name in feature_names[:-1]\n }\n\n # Construct mapping from puzzle name to feature.\n star_rating = collections.defaultdict(list)\n word_count = collections.defaultdict(list)\n is_amazon = collections.defaultdict(list)\n includes_photo = collections.defaultdict(list)\n num_helpful = collections.defaultdict(list)\n labels = {}\n\n # Extract each review.\n for i in range(len(dataframe)):\n row = dataframe.iloc[i]\n puzzle_name = row['puzzle_name']\n star_rating[puzzle_name].append(float(row['star_rating']))\n word_count[puzzle_name].append(float(row['word_count']))\n is_amazon[puzzle_name].append(float(row['is_amazon']))\n includes_photo[puzzle_name].append(float(row['includes_photo']))\n num_helpful[puzzle_name].append(float(row['num_helpful']))\n labels[puzzle_name] = float(row[label_name])\n\n # Organize data into list of list of features.\n names = list(star_rating.keys())\n star_rating = [star_rating[name] for name in names]\n word_count = [word_count[name] for name in names]\n is_amazon = [is_amazon[name] for name in names]\n includes_photo = [includes_photo[name] for name in names]\n num_helpful = [num_helpful[name] for name in names]\n num_reviews = [[len(ratings)] * len(ratings) for ratings in star_rating]\n labels = [labels[name] for name in names]\n\n # Flatten num_reviews\n flattened_features['num_reviews'] = [len(reviews) for reviews in num_reviews]\n\n # Convert data into ragged tensors.\n star_rating = tf.ragged.constant(star_rating)\n word_count = tf.ragged.constant(word_count)\n is_amazon = tf.ragged.constant(is_amazon)\n includes_photo = tf.ragged.constant(includes_photo)\n num_helpful = tf.ragged.constant(num_helpful)\n num_reviews = tf.ragged.constant(num_reviews)\n labels = tf.constant(labels)\n\n # Now we can return our extracted data.\n return (star_rating, word_count, is_amazon, includes_photo, num_helpful,\n num_reviews), labels, flattened_features\n\ntrain_xs, train_ys, flattened_features = extract_features(train_dataframe, 'Sales12-18MonthsAgo')\ntest_xs, test_ys, _ = extract_features(test_dataframe, 'SalesLastSixMonths')\n\n# Let's define our label minimum and maximum.\nmin_label, max_label = float(np.min(train_ys)), float(np.max(train_ys))\nmin_label, max_label = float(np.min(train_ys)), float(np.max(train_ys))", "Setting the default values used for training in this guide:", "LEARNING_RATE = 0.1\nBATCH_SIZE = 128\nNUM_EPOCHS = 500\nMIDDLE_DIM = 3\nMIDDLE_LATTICE_SIZE = 2\nMIDDLE_KEYPOINTS = 16\nOUTPUT_KEYPOINTS = 8", "Feature Configs\nFeature calibration and per-feature configurations are set using tfl.configs.FeatureConfig. Feature configurations include monotonicity constraints, per-feature regularization (see tfl.configs.RegularizerConfig), and lattice sizes for lattice models.\nNote that we must fully specify the feature config for any feature that we want our model to recognize. Otherwise the model will have no way of knowing that such a feature exists. For aggregation models, these features will automaticaly be considered and properly handled as ragged.\nCompute Quantiles\nAlthough the default setting for pwl_calibration_input_keypoints in tfl.configs.FeatureConfig is 'quantiles', for premade models we have to manually define the input keypoints. To do so, we first define our own helper function for computing quantiles.", "def compute_quantiles(features,\n num_keypoints=10,\n clip_min=None,\n clip_max=None,\n missing_value=None):\n # Clip min and max if desired.\n if clip_min is not None:\n features = np.maximum(features, clip_min)\n features = np.append(features, clip_min)\n if clip_max is not None:\n features = np.minimum(features, clip_max)\n features = np.append(features, clip_max)\n # Make features unique.\n unique_features = np.unique(features)\n # Remove missing values if specified.\n if missing_value is not None:\n unique_features = np.delete(unique_features,\n np.where(unique_features == missing_value))\n # Compute and return quantiles over unique non-missing feature values.\n return np.quantile(\n unique_features,\n np.linspace(0., 1., num=num_keypoints),\n interpolation='nearest').astype(float)", "Defining Our Feature Configs\nNow that we can compute our quantiles, we define a feature config for each feature that we want our model to take as input.", "# Feature configs are used to specify how each feature is calibrated and used.\nfeature_configs = [\n tfl.configs.FeatureConfig(\n name='star_rating',\n lattice_size=2,\n monotonicity='increasing',\n pwl_calibration_num_keypoints=5,\n pwl_calibration_input_keypoints=compute_quantiles(\n flattened_features['star_rating'], num_keypoints=5),\n ),\n tfl.configs.FeatureConfig(\n name='word_count',\n lattice_size=2,\n monotonicity='increasing',\n pwl_calibration_num_keypoints=5,\n pwl_calibration_input_keypoints=compute_quantiles(\n flattened_features['word_count'], num_keypoints=5),\n ),\n tfl.configs.FeatureConfig(\n name='is_amazon',\n lattice_size=2,\n num_buckets=2,\n ),\n tfl.configs.FeatureConfig(\n name='includes_photo',\n lattice_size=2,\n num_buckets=2,\n ),\n tfl.configs.FeatureConfig(\n name='num_helpful',\n lattice_size=2,\n monotonicity='increasing',\n pwl_calibration_num_keypoints=5,\n pwl_calibration_input_keypoints=compute_quantiles(\n flattened_features['num_helpful'], num_keypoints=5),\n # Larger num_helpful indicating more trust in star_rating.\n reflects_trust_in=[\n tfl.configs.TrustConfig(\n feature_name=\"star_rating\", trust_type=\"trapezoid\"),\n ],\n ),\n tfl.configs.FeatureConfig(\n name='num_reviews',\n lattice_size=2,\n monotonicity='increasing',\n pwl_calibration_num_keypoints=5,\n pwl_calibration_input_keypoints=compute_quantiles(\n flattened_features['num_reviews'], num_keypoints=5),\n )\n]", "Aggregate Function Model\nTo construct a TFL premade model, first construct a model configuration from tfl.configs. An aggregate function model is constructed using the tfl.configs.AggregateFunctionConfig. It applies piecewise-linear and categorical calibration, followed by a lattice model on each dimension of the ragged input. It then applies an aggregation layer over the output for each dimension. This is then followed by an optional output piecewise-linear calibration.", "# Model config defines the model structure for the aggregate function model.\naggregate_function_model_config = tfl.configs.AggregateFunctionConfig(\n feature_configs=feature_configs,\n middle_dimension=MIDDLE_DIM,\n middle_lattice_size=MIDDLE_LATTICE_SIZE,\n middle_calibration=True,\n middle_calibration_num_keypoints=MIDDLE_KEYPOINTS,\n middle_monotonicity='increasing',\n output_min=min_label,\n output_max=max_label,\n output_calibration=True,\n output_calibration_num_keypoints=OUTPUT_KEYPOINTS,\n output_initialization=np.linspace(\n min_label, max_label, num=OUTPUT_KEYPOINTS))\n# An AggregateFunction premade model constructed from the given model config.\naggregate_function_model = tfl.premade.AggregateFunction(\n aggregate_function_model_config)\n# Let's plot our model.\ntf.keras.utils.plot_model(\n aggregate_function_model, show_layer_names=False, rankdir='LR')", "The output of each Aggregation layer is the averaged output of a calibrated lattice over the ragged inputs. Here is the model used inside the first Aggregation layer:", "aggregation_layers = [\n layer for layer in aggregate_function_model.layers\n if isinstance(layer, tfl.layers.Aggregation)\n]\ntf.keras.utils.plot_model(\n aggregation_layers[0].model, show_layer_names=False, rankdir='LR')", "Now, as with any other tf.keras.Model, we compile and fit the model to our data.", "aggregate_function_model.compile(\n loss='mae',\n optimizer=tf.keras.optimizers.Adam(LEARNING_RATE))\naggregate_function_model.fit(\n train_xs, train_ys, epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, verbose=False)", "After training our model, we can evaluate it on our test set.", "print('Test Set Evaluation...')\nprint(aggregate_function_model.evaluate(test_xs, test_ys))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
voytekresearch/laxbro
t_test_validation.ipynb
mit
[ "# settings and modules\n%config InlineBackend.figure_format = 'retina'\n%pylab inline\nfrom nsaba.nsaba import nsaba\nfrom nsaba.nsaba import analysis\n\nns_path = \"/Users/Torben/Documents/ABI analysis/current_data_new/\"\naba_path = '/Users/Torben/Documents/ABI analysis/normalized_microarray_donor9861/'\n#nsaba.Nsaba.aba_load(aba_path)\nnsaba.Nsaba.ns_load(ns_path)\nnsaba.Nsaba.aba_load(aba_path)\nN = nsaba.Nsaba()\nN.load_ge_pickle(pkl_file='/Users/Torben/Documents/ABI analysis/normalized_microarray_donor9861/Nsaba_ABA_ge.pkl')", "Loading in terms with given method and smoothing parameters", "N.get_ns_act('depression', thresh=-1, method='knn',smoothing='sum')\nN.get_ns_act('dopamine', thresh=-1, method='knn',smoothing='sum')\nN.get_ns_act('reward', thresh=-1, method='knn',smoothing='sum')\nN.get_ns_act('serotonin', thresh=-1, method='knn',smoothing='sum')\nN.get_ns_act('anxiety', thresh=-1, method='knn',smoothing='sum')\nN.get_ns_act('schizophrenia', thresh=-1, method='knn',smoothing='sum')", "Loading in gene lists", "depression_genes = analysis.load_gene_list('/Users/Torben/Documents/ABI analysis/gene_collections/','DepressionGenes.csv')\ndopamine_genes = analysis.load_gene_list('/Users/Torben/Documents/ABI analysis/gene_collections/','DopamineGenes2.csv')\nreward_genes = analysis.load_gene_list('/Users/Torben/Documents/ABI analysis/gene_collections/','rewardGenes2.csv')\nserotonin_genes = analysis.load_gene_list('/Users/Torben/Documents/ABI analysis/gene_collections/','SerotoninGenes.csv')\nanxiety_genes = analysis.load_gene_list('/Users/Torben/Documents/ABI analysis/gene_collections/','AnxietyGenes.csv')\nschizophrenia_genes = analysis.load_gene_list('/Users/Torben/Documents/ABI analysis/gene_collections/','SchizophreniaGenes.csv')", "performing a t test on correlations of genes associated with their term.\ni.e. are these genes associated with this term more than by chance?\nI do this with 4 correlation methods: pearson's r, spearman's r, slope of linear regression, and a t test", "import scipy.stats as stats\nA = analysis.NsabaAnalysis(N)\nall_analyses = np.zeros((6,4))\nmethods = ['pearson','spearman','regression','t_test']\n\nfor m in xrange(len(methods)):\n all_analyses[0,m]= stats.ttest_1samp(A.validate_with_t_test('depression',depression_genes,method=methods[m],quant=85)[0],0)[1]\n all_analyses[1,m]= stats.ttest_1samp(A.validate_with_t_test('dopamine',dopamine_genes,method=methods[m],quant=85)[0],0)[1]\n all_analyses[2,m]= stats.ttest_1samp(A.validate_with_t_test('reward',reward_genes,method=methods[m],quant=85)[0],0)[1]\n all_analyses[3,m]= stats.ttest_1samp(A.validate_with_t_test('serotonin',serotonin_genes,method=methods[m],quant=85)[0],0)[1]\n all_analyses[4,m]= stats.ttest_1samp(A.validate_with_t_test('anxiety',anxiety_genes,method=methods[m],quant=85)[0],0)[1]\n all_analyses[5,m]= stats.ttest_1samp(A.validate_with_t_test('schizophrenia',schizophrenia_genes,method=methods[m],quant=85)[0],0)[1]\nprint all_analyses", "Testing different cutoff values and methods for splitting term/non-term groups for t tests\nMachine learning methods are kmeans and mixture of gaussians.", "\nt_test_analyses = np.zeros((6,6))\nquants = [50,75,85,95]\n\nfor q in xrange(len(quants)):\n t_test_analyses[0,q]= stats.ttest_1samp(A.validate_with_t_test('depression',depression_genes,quant=quants[q])[0],0)[1]\n t_test_analyses[1,q]= stats.ttest_1samp(A.validate_with_t_test('dopamine',dopamine_genes,quant=quants[q])[0],0)[1]\n t_test_analyses[2,q]= stats.ttest_1samp(A.validate_with_t_test('reward',reward_genes,quant=quants[q])[0],0)[1]\n t_test_analyses[3,q]= stats.ttest_1samp(A.validate_with_t_test('serotonin',serotonin_genes,quant=quants[q])[0],0)[1]\n t_test_analyses[4,q]= stats.ttest_1samp(A.validate_with_t_test('anxiety',anxiety_genes,quant=quants[q])[0],0)[1]\n t_test_analyses[5,q]= stats.ttest_1samp(A.validate_with_t_test('schizophrenia',schizophrenia_genes,quant=quants[q])[0],0)[1]\n\nmethods = ['kmeans','mog']\nfor m in xrange(len(methods)):\n t_test_analyses[0,m+4]= stats.ttest_1samp(A.validate_with_t_test('depression',depression_genes,split_method=methods[m])[0],0)[1]\n t_test_analyses[1,m+4]= stats.ttest_1samp(A.validate_with_t_test('dopamine',dopamine_genes,split_method=methods[m])[0],0)[1]\n t_test_analyses[2,m+4]= stats.ttest_1samp(A.validate_with_t_test('reward',reward_genes,split_method=methods[m])[0],0)[1]\n t_test_analyses[3,m+4]= stats.ttest_1samp(A.validate_with_t_test('serotonin',serotonin_genes,split_method=methods[m])[0],0)[1]\n t_test_analyses[4,m+4]= stats.ttest_1samp(A.validate_with_t_test('anxiety',anxiety_genes,split_method=methods[m])[0],0)[1]\n t_test_analyses[5,m+4]= stats.ttest_1samp(A.validate_with_t_test('schizophrenia',schizophrenia_genes,split_method=methods[m])[0],0)[1]\n \nprint t_test_analyses\n\nimport csv\n\nwith open('/Users/Torben/Documents/ABI analysis/validation/summed_validation.csv', 'wb') as csvfile:\n spamwriter = csv.writer(csvfile)\n for a in all_analyses:\n spamwriter.writerow(a)\n\nwith open('/Users/Torben/Documents/ABI analysis/validation/summed_validation.csv', 'wb') as csvfile:\n spamwriter = csv.writer(csvfile)\n for a in t_test_analyses:\n spamwriter.writerow(a)\n\na= A.validate_with_t_test('anxiety',anxiety_genes,split_method=methods[m])[0]\n\n[1 for t in xrange(10)]" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
palrogg/foundations-homework
Data_and_databases/.ipynb_checkpoints/Homework_2_Paul_Ronga-checkpoint.ipynb
mit
[ "Homework 2: Working with SQL (Data and Databases 2016)\nThis homework assignment takes the form of an IPython Notebook. There are a number of exercises below, with notebook cells that need to be completed in order to meet particular criteria. Your job is to fill in the cells as appropriate.\nYou'll need to download this notebook file to your computer before you can complete the assignment. To do so, follow these steps:\n\nMake sure you're viewing this notebook in Github.\nCtrl+click (or right click) on the \"Raw\" button in the Github interface, and select \"Save Link As...\" or your browser's equivalent. Save the file in a convenient location on your own computer.\nRename the notebook file to include your own name somewhere in the filename (e.g., Homework_2_Allison_Parrish.ipynb).\nOpen the notebook on your computer using your locally installed version of IPython Notebook.\nWhen you've completed the notebook to your satisfaction, e-mail the completed file to the address of the teaching assistant (as discussed in class).\n\nSetting the scene\nThese problem sets address SQL, with a focus on joins and aggregates.\nI've prepared a SQL version of the MovieLens data for you to use in this homework. Download this .psql file here. You'll be importing this data into your own local copy of PostgreSQL.\nTo import the data, follow these steps:\n\nLaunch psql.\nAt the prompt, type CREATE DATABASE homework2;\nConnect to the database you just created by typing \\c homework2\nImport the .psql file you downloaded earlier by typing \\i followed by the path to the .psql file.\n\nAfter you run the \\i command, you should see the following output:\nCREATE TABLE\nCREATE TABLE\nCREATE TABLE\nCOPY 100000\nCOPY 1682\nCOPY 943\nThe table schemas for the data look like this:\nTable \"public.udata\"\n Column | Type | Modifiers \n-----------+---------+-----------\n user_id | integer | \n item_id | integer | \n rating | integer | \n timestamp | integer |\nTable \"public.uuser\"\n Column | Type | Modifiers \n------------+-----------------------+-----------\n user_id | integer | \n age | integer | \n gender | character varying(1) | \n occupation | character varying(80) | \n zip_code | character varying(10) |\nTable \"public.uitem\"\n Column | Type | Modifiers \n--------------------+------------------------+-----------\n movie_id | integer | not null\n movie_title | character varying(81) | not null\n release_date | date | \n video_release_date | character varying(32) | \n imdb_url | character varying(134) | \n unknown | integer | not null\n action | integer | not null\n adventure | integer | not null\n animation | integer | not null\n childrens | integer | not null\n comedy | integer | not null\n crime | integer | not null\n documentary | integer | not null\n drama | integer | not null\n fantasy | integer | not null\n film_noir | integer | not null\n horror | integer | not null\n musical | integer | not null\n mystery | integer | not null\n romance | integer | not null\n scifi | integer | not null\n thriller | integer | not null\n war | integer | not null\n western | integer | not null\nRun the cell below to create a connection object. This should work whether you have pg8000 installed or psycopg2.", "import pg8000\nconn = pg8000.connect(database=\"homework2\")", "If you get an error stating that database \"homework2\" does not exist, make sure that you followed the instructions above exactly. If necessary, drop the database you created (with, e.g., DROP DATABASE your_database_name) and start again.\nIn all of the cells below, I've provided the necessary Python scaffolding to perform the query and display the results. All you need to do is write the SQL statements.\nAs noted in the tutorial, if your SQL statement has a syntax error, you'll need to rollback your connection before you can fix the error and try the query again. As a convenience, I've included the following cell, which performs the rollback process. Run it whenever you hit trouble.", "conn.rollback()", "Problem set 1: WHERE and ORDER BY\nIn the cell below, fill in the string assigned to the variable statement with a SQL query that finds all movies that belong to both the science fiction (scifi) and horror genres. Return these movies in reverse order by their release date. (Hint: movies are located in the uitem table. A movie's membership in a genre is indicated by a value of 1 in the uitem table column corresponding to that genre.) Run the cell to execute the query.\nExpected output:\nDeep Rising (1998)\nAlien: Resurrection (1997)\nHellraiser: Bloodline (1996)\nRobert A. Heinlein's The Puppet Masters (1994)\nBody Snatchers (1993)\nArmy of Darkness (1993)\nBody Snatchers (1993)\nAlien 3 (1992)\nHeavy Metal (1981)\nAlien (1979)\nNight of the Living Dead (1968)\nBlob, The (1958)", "cursor = conn.cursor()\nstatement = \"SELECT movie_title FROM uitem WHERE scifi = 1 AND horror = 1 ORDER BY release_date DESC\"\ncursor.execute(statement)\nfor row in cursor:\n print(row[0])", "Problem set 2: Aggregation, GROUP BY and HAVING\nIn the cell below, fill in the string assigned to the statement variable with a SQL query that returns the number of movies that are either musicals or children's movies (columns musical and childrens respectively). Hint: use the count(*) aggregate.\nExpected output: 157", "cursor = conn.cursor()\nstatement = \"SELECT COUNT(*) FROM uitem WHERE musical = 1 OR childrens = 1\"\ncursor.execute(statement)\nfor row in cursor:\n print(row[0])", "Nicely done. Now, in the cell below, fill in the indicated string with a SQL statement that returns all occupations, along with their count, from the uuser table that have more than fifty users listed for that occupation. (I.e., the occupation librarian is listed for 51 users, so it should be included in these results. There are only 12 lawyers, so lawyer should not be included in the result.)\nExpected output:\nadministrator 79\nprogrammer 66\nlibrarian 51\nstudent 196\nother 105\nengineer 67\neducator 95\nHint: use GROUP BY and HAVING. (If you're stuck, try writing the query without the HAVING first.)", "cursor = conn.cursor()\nstatement = \"SELECT DISTINCT(occupation), COUNT(*) FROM uuser GROUP BY occupation HAVING COUNT(*) > 50\"\ncursor.execute(statement)\nfor row in cursor:\n print(row[0], row[1])", "Problem set 3: Joining tables\nIn the cell below, fill in the indicated string with a query that finds the titles of movies in the Documentary genre released before 1992 that received a rating of 5 from any user. Expected output:\nMadonna: Truth or Dare (1991)\nKoyaanisqatsi (1983)\nParis Is Burning (1990)\nThin Blue Line, The (1988)\nHints:\n\nJOIN the udata and uitem tables.\nUse DISTINCT() to get a list of unique movie titles (no title should be listed more than once).\nThe SQL expression to include in order to find movies released before 1992 is uitem.release_date &lt; '1992-01-01'.", "cursor = conn.cursor()\nstatement = \"SELECT DISTINCT(movie_title) FROM udata JOIN uitem ON uitem.movie_id = udata.item_id WHERE EXTRACT(YEAR FROM release_date) < 1992 AND rating = 5 GROUP BY movie_title\"\n# if \"any\" has to be taken in the sense of \"every\": \n# statement = \"SELECT movie_title FROM uitem JOIN udata ON uitem.movie_id = udata.item_id WHERE EXTRACT(YEAR FROM release_date) < 1992 GROUP BY movie_title HAVING MIN(rating) = 5\"\ncursor.execute(statement)\nfor row in cursor:\n print(row[0])", "Problem set 4: Joins and aggregations... together at last\nThis one's tough, so prepare yourself. Go get a cup of coffee. Stretch a little bit. Deep breath. There you go.\nIn the cell below, fill in the indicated string with a query that produces a list of the ten lowest rated movies in the Horror genre. For the purposes of this problem, take \"lowest rated\" to mean \"has the lowest average rating.\" The query should display the titles of the movies, not their ID number. (So you'll have to use a JOIN.)\nExpected output:\nAmityville 1992: It's About Time (1992) 1.00\nBeyond Bedlam (1993) 1.00\nAmityville: Dollhouse (1996) 1.00\nAmityville: A New Generation (1993) 1.00\nAmityville 3-D (1983) 1.17\nCastle Freak (1995) 1.25\nAmityville Curse, The (1990) 1.25\nChildren of the Corn: The Gathering (1996) 1.32\nMachine, The (1994) 1.50\nBody Parts (1991) 1.62", "conn.rollback()\ncursor = conn.cursor()\nstatement = \"SELECT movie_title), AVG(rating) FROM udata JOIN uitem ON uitem.movie_id = udata.item_id WHERE horror = 1 GROUP BY movie_title ORDER BY AVG(rating) LIMIT 10\"\ncursor.execute(statement)\nfor row in cursor:\n print(row[0], \"%0.2f\" % row[1])", "BONUS: Extend the query above so that it only includes horror movies that have ten or more ratings. Fill in the query as indicated below.\nExpected output:\nChildren of the Corn: The Gathering (1996) 1.32\nBody Parts (1991) 1.62\nAmityville II: The Possession (1982) 1.64\nJaws 3-D (1983) 1.94\nHellraiser: Bloodline (1996) 2.00\nTales from the Hood (1995) 2.04\nAudrey Rose (1977) 2.17\nAddiction, The (1995) 2.18\nHalloween: The Curse of Michael Myers (1995) 2.20\nPhantoms (1998) 2.23", "cursor = conn.cursor()\nstatement = \"SELECT movie_title, AVG(rating) FROM udata JOIN uitem ON uitem.movie_id = udata.item_id WHERE horror = 1 GROUP BY movie_title HAVING COUNT(rating) > 10 ORDER BY AVG(rating) LIMIT 10;\"\ncursor.execute(statement)\nfor row in cursor:\n print(row[0], \"%0.2f\" % row[1])", "Great work! You're done." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
agarsev/grafeno
docs/_examples/visualization.ipynb
agpl-3.0
[ "Interactive visualization of grafeno Graphs\nWe use the great D3 library: https://d3js.org/", "from grafeno import Graph\nfrom grafeno.transformers import get_pipeline\nfrom grafeno.jupyter import visualize\n\nimport yaml\nsemantic_pipeline = yaml.load(open('../../configs/semantic.yaml'))\nT = get_pipeline(['spacy_parse']+semantic_pipeline.get('transformers'))", "One sentence", "sentence = \"\"\"\nJohn writes a short program that works correctly and he comments his code like a good student.\n\"\"\"\n\nG1 = Graph(text=sentence, transformer=T)\n\nvisualize(G1)", "Bigger graph (from the simple.wikipedia page of AI)", "text = \"\"\"\nAn extreme goal of AI research is to create computer programs that can learn, solve problems, and think logically.\nIn practice, however, most applications have picked on problems which computers can do well.\nSearching data bases and doing calculations are things computers do better than people.\nOn the other hand, \"perceiving its environment\" in any real sense is way beyond present-day computing.\n\"\"\"\n\nG2 = Graph(text=text, transformer=T)\n\nvisualize(G2)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
keylime1/courses_12-752
assignments/2/12-752_Assignment_2_Starter.ipynb
mit
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport datetime as dt\n%matplotlib inline", "Section 1.1 - Importing the Data\nLet's begin in the same way we did for Assignment #2 of 2014, but this time let's start with importing the temperature data:", "temperatureDateConverter = lambda d : dt.datetime.strptime(d,'%Y-%m-%d %H:%M:%S')\ntemperature = np.genfromtxt('../../data/temperature.csv',delimiter=\",\",dtype=[('timestamp', type(dt.datetime.now)),('tempF', 'f8')],converters={0: temperatureDateConverter}, skiprows=1)", "Notice that, because we are asking for the data to be interpreted as having different types for each column, and the the numpy.ndarray can only handle homoegenous types (i.e., all the elements of the array must be of the same type) then the resulting array is a one dimensional ndarray of tuples. Each tuple corresponds to a row in the file and in it, then, are the three columns for the row.\nFormally, this is called a Structured Array and is something you should read up on if you want to fully understand what it means and how to handle these types of data structures:\nhttps://docs.scipy.org/doc/numpy/user/basics.rec.html", "print \"The variable 'temperature' is a \" + str(type(temperature)) + \" and it has the following shape: \" + str(temperature.shape)", "Fortunately, these structured arrays allow us to access the content inside the tuples directly by calling the field names. Let's figure out what those field names are:", "temperature.dtype.fields", "Now let's see what the timestamps look like, for this dataset:", "plt.plot(temperature['timestamp'])", "Seems as if there are no gaps, but let's make sure about that. First, let's compute the minimum and maximum difference between any two consecutive timestamps:", "print \"The minimum difference between any two consecutive timestamps is: \" + str(np.min(np.diff(temperature['timestamp'])))\nprint \"The maximum difference between any two consecutive timestamps is: \" + str(np.max(np.diff(temperature['timestamp']))) ", "Given that they both are 5 minutes, then it means that there really is no gap in the datset, and all temperature measurements were taken 5 minutes apart.\nSince we need temperature readings every 15 minutes we can downsample this dataset. There are many ways to do the downsampling, and it is important to understand the effects each of them may have on the final result we are seeking. However, this is beyond the scope of the class, so I will pick a very naïve approach and simply select every third sample:", "temperature = temperature[0:-1:3]", "Finally, let's make a note of when the first and last timestamp are:", "print \"First timestamp is on \\t{}. \\nLast timestamp is on \\t{}.\".format(temperature['timestamp'][0], temperature['timestamp'][-1])", "Loading the Power Data\nJust as we did before, we start with the genfromtxt function:", "dateConverter = lambda d : dt.datetime.strptime(d,'%Y/%m/%d %H:%M:%S')\npower = np.genfromtxt('../../data/campusDemand.csv',delimiter=\",\",names=True,dtype=['S255',dt.datetime,'f8'],converters={1: dateConverter})", "Let's figure out how many meters there are, and where they are in the ndarray, as well as how many datapoints they have.", "name, indices, counts = np.unique(power['Point_name'], return_index=True,return_counts=True)", "Now let's print that information in a more readable fashion:", "for i in range(len(name)):\n print str(name[i])+\"\\n\\t from \"+str(power[indices[i]]['Time'])+\" to \"+str(power[indices[i]+counts[i]-1]['Time'])+\"\\n\\t or \"+str(power[indices[i]+counts[i]-1]['Time']-power[indices[i]]['Time'])\n ", "Since only one meter needs to be used, pick the one you like and discard the rest:", "power=power[power['Point_name']==name[3]]", "Let's make sure the data is sorted by time and then let's plot it", "power = np.sort(power,order='Time')\n\nfig1= plt.figure(figsize=(15,5))\nplt.plot(power['Time'],power['Value'])\nplt.title(name[0])\nplt.xlabel('Time')\nplt.ylabel('Power [Watts]')", "Are there gaps in this dataset?", "power = np.sort(power,order='Time')\nprint \"The minimum difference between any two consecutive timestamps is: \" + str(np.min(np.diff(power['Time'])))\nprint \"The maximum difference between any two consecutive timestamps is: \" + str(np.max(np.diff(power['Time']))) ", "And when is the first and last timestamp for this dataset? (We would like them to overlap as much as possible):", "print \"First timestamp is on \\t{}. \\nLast timestamp is on \\t{}.\".format(power['Time'][0], power['Time'][-1])", "So let's summarize the differences in terms of the timestamps:\n\n\nThere is at least one significant gap (1 day and a few hours), and there's also a strange situation that causes two consecutive samples to have the same timestamp (i.e., the minimum difference is zero).\n\n\nThe temperature dataset starts a little later, and ends almost a full day later than the power dataset.\n\n\nYes, this is incovnenient, I know. It is painful to know that not only are the two datasets sampled at different rates, but they are also of different lengths of time and one of them has gaps. \nThis is what real data looks like, in case you were wondering.\nAt this point, one of the simplest ways to move forward without having to re-invent the wheel would be to rely on the help of more powerful libraries such as Pandas. \nHowever, just to make things more fun and instructional, I am going to go through the trouble of implementing a interpolation function myself and will use it to obtain power values at exactly the same timestamps as the temperature data is providing.\nIn other words, let's assume that the timestamps for the temperature data are $t^T_i$ $\\forall i \\in [1, 2, \\ldots n_T]$, and that the timestamps for the power data are $t^P_i$ $\\forall i \\in [1, 2, \\ldots n_P]$, where $n_T$ and $n_P$ are the number of records in the temperature and power datasets, respectively. What I am interested in doing is finding the values of power $P$ at exactly all of the $n_T$ temperature timestamps, i.e. find $P(t^T_i)$ $\\forall i$.\nWe will do all of these things in the next section.\nHarmonizing the time series\nFirst let's remember what times the two time series (power and temperature) start and end:", "print \"Power data from {0} to {1}.\\nTemperature data from {2} to {3}\".format(power['Time'][0], power['Time'][-1], temperature['timestamp'][0], temperature['timestamp'][-1])", "Clearly, we don't need the portion of the temperature data that is collected beyond the dates that we have power data. Let's remove this (note that the magic number 24 corresponds to 360 minutes or 6 hours):", "temperature = temperature[0:-24] ", "Now let's create the interpolation function:", "def power_interp(tP, P, tT):\n # This function assumes that the input is an numpy.ndarray of datetime objects\n\n # Most useful interpolation tools don't work well with datetime objects\n # so we convert all datetime objects into the number of seconds elapsed\n # since 1/1/1970 at midnight (also called the UNIX Epoch, or POSIX time):\n \n toposix = lambda d: (d - dt.datetime(1970,1,1,0,0,0)).total_seconds()\n tP = map(toposix, tP)\n tT = map(toposix, tT)\n \n # Now we interpolate\n from scipy.interpolate import interp1d\n f = interp1d(tP, P,'linear')\n \n return f(tT)\n ", "And let's use that funciton to get a copy of the interpolated power values, extracted at exactly the same timestamps as the temperature dataset:", "newPowerValues = power_interp(power['Time'], power['Value'], temperature['timestamp'])", "Finally, to keep things simple, let's restate the variables that matter:", "toposix = lambda d: (d - dt.datetime(1970,1,1,0,0,0)).total_seconds()\n\ntimestamp_in_seconds = map(toposix,temperature['timestamp'])\ntimestamps = temperature['timestamp']\ntemp_values = temperature['tempF']\npower_values = newPowerValues", "And let's plot it to see what it looks like.", "plt.figure(figsize=(15,15))\nplt.plot(timestamps,power_values,'ro')\nplt.figure(figsize=(15,15))\nplt.plot(timestamps, temp_values, '--b')", "Task #1\nNow let's put all of this data into a single structured array.\nTask #2\nSince we have the timestamps in 'datetime' format we can easily do the extraction of the indeces:", "weekday = map(lambda t: t.weekday(), timestamps)\nweekends = np.where( ) ## Note that depending on how you do this, the result could be a tuple of ndarrays.\nweekdays = np.where( )", "Did we do this correctly?", "len(weekday) == len(weekends[0]) + len(weekdays[0]) ## This is assuming you have a tuple of ndarrays", "Seems like we did.\nTask #3\nSimilar as in the previous task...", "hour = map(lambda t: t.hour, timestamps)\noccupied = np.where( )\nunoccupied = np.where( )\n", "Task #4\nLet's calculate the temperature components, by creating a function that does just that:", "def Tc(temperature, T_bound):\n # The return value will be a matrix with as many rows as the temperature\n # array, and as many columns as len(T_bound) [assuming that 0 is the first boundary]\n \n Tc_matrix = np.zeros((len(temperature), len(T_bound)))\n \n return Tc_matrix" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
python-visualization/folium
examples/Plugins.ipynb
mit
[ "Examples of plugins usage in folium\nIn this notebook we show a few illustrations of folium's plugin extensions. These are available after importing folium.plugins.\nScrollZoomToggler\nAdds a button to enable/disable zoom scrolling.", "import folium\nfrom folium import plugins\n\nm = folium.Map([45, 3], zoom_start=4)\n\nplugins.ScrollZoomToggler().add_to(m)\n\nm", "MarkerCluster\nAdds a MarkerCluster layer on the map.", "import numpy as np\n\n\nN = 100\ndata = np.array(\n [\n np.random.uniform(low=35, high=60, size=N), # Random latitudes in Europe.\n np.random.uniform(low=-12, high=30, size=N), # Random longitudes in Europe.\n ]\n).T\npopups = [str(i) for i in range(N)] # Popups texts are simple numbers.\n\nm = folium.Map([45, 3], zoom_start=4)\n\nplugins.MarkerCluster(data, popups=popups).add_to(m)\n\nm", "Terminator", "m = folium.Map([45, 3], zoom_start=1)\n\nplugins.Terminator().add_to(m)\n\nm", "BoatMarker", "m = folium.Map([30, 0], zoom_start=3)\n\nplugins.BoatMarker(\n location=(34, -43), heading=45, wind_heading=150, wind_speed=45, color=\"#8f8\"\n).add_to(m)\n\nplugins.BoatMarker(\n location=(46, -30), heading=-20, wind_heading=46, wind_speed=25, color=\"#88f\"\n).add_to(m)\n\nm", "BeautifyIcon", "m = folium.Map([45.5, -122], zoom_start=3)\n\nicon_plane = plugins.BeautifyIcon(\n icon=\"plane\", border_color=\"#b3334f\", text_color=\"#b3334f\", icon_shape=\"triangle\"\n)\n\nicon_number = plugins.BeautifyIcon(\n border_color=\"#00ABDC\",\n text_color=\"#00ABDC\",\n number=10,\n inner_icon_style=\"margin-top:0;\",\n)\n\nfolium.Marker(location=[46, -122], popup=\"Portland, OR\", icon=icon_plane).add_to(m)\n\nfolium.Marker(location=[50, -122], popup=\"Portland, OR\", icon=icon_number).add_to(m)\n\nm", "Fullscreen", "m = folium.Map(location=[41.9, -97.3], zoom_start=4)\n\nplugins.Fullscreen(\n position=\"topright\",\n title=\"Expand me\",\n title_cancel=\"Exit me\",\n force_separate_button=True,\n).add_to(m)\n\nm", "Timestamped GeoJSON", "m = folium.Map(location=[35.68159659061569, 139.76451516151428], zoom_start=16)\n\n# Lon, Lat order.\nlines = [\n {\n \"coordinates\": [\n [139.76451516151428, 35.68159659061569],\n [139.75964426994324, 35.682590062684206],\n ],\n \"dates\": [\"2017-06-02T00:00:00\", \"2017-06-02T00:10:00\"],\n \"color\": \"red\",\n },\n {\n \"coordinates\": [\n [139.75964426994324, 35.682590062684206],\n [139.7575843334198, 35.679505030038506],\n ],\n \"dates\": [\"2017-06-02T00:10:00\", \"2017-06-02T00:20:00\"],\n \"color\": \"blue\",\n },\n {\n \"coordinates\": [\n [139.7575843334198, 35.679505030038506],\n [139.76337790489197, 35.678040905014065],\n ],\n \"dates\": [\"2017-06-02T00:20:00\", \"2017-06-02T00:30:00\"],\n \"color\": \"green\",\n \"weight\": 15,\n },\n {\n \"coordinates\": [\n [139.76337790489197, 35.678040905014065],\n [139.76451516151428, 35.68159659061569],\n ],\n \"dates\": [\"2017-06-02T00:30:00\", \"2017-06-02T00:40:00\"],\n \"color\": \"#FFFFFF\",\n },\n]\n\nfeatures = [\n {\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"LineString\",\n \"coordinates\": line[\"coordinates\"],\n },\n \"properties\": {\n \"times\": line[\"dates\"],\n \"style\": {\n \"color\": line[\"color\"],\n \"weight\": line[\"weight\"] if \"weight\" in line else 5,\n },\n },\n }\n for line in lines\n]\n\nplugins.TimestampedGeoJson(\n {\n \"type\": \"FeatureCollection\",\n \"features\": features,\n },\n period=\"PT1M\",\n add_last_point=True,\n).add_to(m)\n\nm\n\ntable = \"\"\"\\\n<table style=\\'width:100%\\'>\n <tr>\n <th>Firstname</th>\n <th>Lastname</th>\n <th>Age</th>\n </tr>\n <tr>\n <td>Jill</td>\n <td>Smith</td>\n <td>50</td>\n </tr>\n <tr>\n <td>Eve</td>\n <td>Jackson</td>\n <td>94</td>\n </tr>\n</table>\n\"\"\"\n\npoints = [\n {\n \"time\": \"2017-06-02\",\n \"popup\": \"<h1>address1</h1>\",\n \"coordinates\": [-2.548828, 51.467697],\n },\n {\n \"time\": \"2017-07-02\",\n \"popup\": \"<h2 style='color:blue;'>address2<h2>\",\n \"coordinates\": [-0.087891, 51.536086],\n },\n {\n \"time\": \"2017-08-02\",\n \"popup\": \"<h2 style='color:orange;'>address3<h2>\",\n \"coordinates\": [-6.240234, 53.383328],\n },\n {\n \"time\": \"2017-09-02\",\n \"popup\": \"<h2 style='color:green;'>address4<h2>\",\n \"coordinates\": [-1.40625, 60.261617],\n },\n {\"time\": \"2017-10-02\", \"popup\": table, \"coordinates\": [-1.516113, 53.800651]},\n]\n\nfeatures = [\n {\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": point[\"coordinates\"],\n },\n \"properties\": {\n \"time\": point[\"time\"],\n \"popup\": point[\"popup\"],\n \"id\": \"house\",\n \"icon\": \"marker\",\n \"iconstyle\": {\n \"iconUrl\": \"https://leafletjs.com/examples/geojson/baseball-marker.png\",\n \"iconSize\": [20, 20],\n },\n },\n }\n for point in points\n]\n\nfeatures.append(\n {\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"LineString\",\n \"coordinates\": [\n [-2.548828, 51.467697],\n [-0.087891, 51.536086],\n [-6.240234, 53.383328],\n [-1.40625, 60.261617],\n [-1.516113, 53.800651],\n ],\n },\n \"properties\": {\n \"popup\": \"Current address\",\n \"times\": [\n \"2017-06-02\",\n \"2017-07-02\",\n \"2017-08-02\",\n \"2017-09-02\",\n \"2017-10-02\",\n ],\n \"icon\": \"circle\",\n \"iconstyle\": {\n \"fillColor\": \"green\",\n \"fillOpacity\": 0.6,\n \"stroke\": \"false\",\n \"radius\": 13,\n },\n \"style\": {\"weight\": 0},\n \"id\": \"man\",\n },\n }\n)\n\nm = folium.Map(\n location=[56.096555, -3.64746],\n tiles=\"cartodbpositron\",\n zoom_start=5,\n)\n\nplugins.TimestampedGeoJson(\n {\"type\": \"FeatureCollection\", \"features\": features},\n period=\"P1M\",\n add_last_point=True,\n auto_play=False,\n loop=False,\n max_speed=1,\n loop_button=True,\n date_options=\"YYYY/MM/DD\",\n time_slider_drag_update=True,\n duration=\"P2M\",\n).add_to(m)\n\nm", "FeatureGroupSubGroup\nSub categories\nDisable all markers in the category, or just one of the subgroup.", "m = folium.Map(location=[0, 0], zoom_start=6)\n\nfg = folium.FeatureGroup(name=\"groups\")\nm.add_child(fg)\n\ng1 = plugins.FeatureGroupSubGroup(fg, \"group1\")\nm.add_child(g1)\n\ng2 = plugins.FeatureGroupSubGroup(fg, \"group2\")\nm.add_child(g2)\n\nfolium.Marker([-1, -1]).add_to(g1)\nfolium.Marker([1, 1]).add_to(g1)\n\nfolium.Marker([-1, 1]).add_to(g2)\nfolium.Marker([1, -1]).add_to(g2)\n\nfolium.LayerControl(collapsed=False).add_to(m)\n\nm", "Marker clusters across groups\nCreate two subgroups, but cluster markers together.", "m = folium.Map(location=[0, 0], zoom_start=6)\n\nmcg = folium.plugins.MarkerCluster(control=False)\nm.add_child(mcg)\n\ng1 = folium.plugins.FeatureGroupSubGroup(mcg, \"group1\")\nm.add_child(g1)\n\ng2 = folium.plugins.FeatureGroupSubGroup(mcg, \"group2\")\nm.add_child(g2)\n\nfolium.Marker([-1, -1]).add_to(g1)\nfolium.Marker([1, 1]).add_to(g1)\n\nfolium.Marker([-1, 1]).add_to(g2)\nfolium.Marker([1, -1]).add_to(g2)\n\nfolium.LayerControl(collapsed=False).add_to(m)\n\nm", "Minimap\nAdds a locator minimap to a folium document.", "m = folium.Map(location=(30, 20), zoom_start=4)\n\nminimap = plugins.MiniMap()\nm.add_child(minimap)\n\nm", "DualMap\nThe DualMap plugin can be used to display two maps side by side, where panning and zooming is synchronized.\nThe DualMap class can be used just like the normal Map class. The two sub-maps can be accessed with its m1 and m2 attributes.", "m = plugins.DualMap(location=(52.1, 5.1), tiles=None, zoom_start=8)\n\nfolium.TileLayer(\"cartodbpositron\").add_to(m.m2)\nfolium.TileLayer(\"openstreetmap\").add_to(m)\n\nfg_both = folium.FeatureGroup(name=\"markers_both\").add_to(m)\nfg_1 = folium.FeatureGroup(name=\"markers_1\").add_to(m.m1)\nfg_2 = folium.FeatureGroup(name=\"markers_2\").add_to(m.m2)\n\nicon_red = folium.Icon(color=\"red\")\nfolium.Marker((52, 5), tooltip=\"both\", icon=icon_red).add_to(fg_both)\nfolium.Marker((52.4, 5), tooltip=\"left\").add_to(fg_1)\nfolium.Marker((52, 5.4), tooltip=\"right\").add_to(fg_2)\n\nfolium.LayerControl(collapsed=False).add_to(m)\n\n\nm", "Locate control\nAdds a control button that when clicked, the user device geolocation is displayed.\nFor list of all possible keyword options see:\nhttps://github.com/domoritz/leaflet-locatecontrol#possible-options\nTo work properly in production, the connection needs to be encrypted (HTTPS), otherwise browser will not\nallow users to share their location.", "m = folium.Map([41.97, 2.81])\n\nplugins.LocateControl().add_to(m)\n\n# If you want get the user device position after load the map, set auto_start=True\nplugins.LocateControl(auto_start=True).add_to(m)\n\nm", "SemiCircle\nThis can be used to display a semicircle or sector on a map. Whilst called SemiCircle it is not limited to 180 degree angles and can be used to display a sector of any angle. \nThe semicircle is defined with a location (the central point, if it was a full circle), a radius and will either have a direction and an arc or a start angle and a stop angle.", "m = folium.Map([45, 3], zoom_start=5)\n\nplugins.SemiCircle(\n (45, 3),\n radius=400000,\n start_angle=50,\n stop_angle=200,\n color=\"green\",\n fill_color=\"green\",\n opacity=0,\n popup=\"start angle - 50 degrees, stop angle - 200 degrees\",\n).add_to(m)\n\nplugins.SemiCircle(\n (46.5, 9.5),\n radius=200000,\n direction=360,\n arc=90,\n color=\"red\",\n fill_color=\"red\",\n opacity=0,\n popup=\"Direction - 0 degrees, arc 90 degrees\",\n).add_to(m)\n\nm", "Geocoder\nAdds a search box to the map to search for geographic features like cities, countries, etc. You can search with names or addresses.\nUses the Nomatim service from OpenStreetMap. Please respect their usage policy: https://operations.osmfoundation.org/policies/nominatim/", "m = folium.Map()\n\nplugins.Geocoder().add_to(m)\n\nm" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
usantamaria/ipynb_para_docencia
12_ejemplos_graficos_parametricos/graficos_parametricos.ipynb
mit
[ "<img src=\"images/utfsm.png\" alt=\"\" width=\"200px\" align=\"right\"/>\nUSM Numérica\nTema del Notebook\nObjetivos\n\nPrimero objetivo del Notebook\nSegundo objetivo del Notebook\n\n0.1 Instrucciones\nLas instrucciones de instalación y uso de un ipython notebook se encuentran en el siguiente link.\nDespués de descargar y abrir el presente notebook, recuerden:\n* Desarrollar los problemas de manera secuencial.\n* Guardar constantemente con Ctr-S para evitar sorpresas.\n* Reemplazar en las celdas de código donde diga FIX_ME por el código correspondiente.\n* Ejecutar cada celda de código utilizando Ctr-Enter\n0.2 Licenciamiento y Configuración\nEjecutar la siguiente celda mediante Ctr-Enter.", "\"\"\"\nIPython Notebook v4.0 para python 3.0\nLibrerías adicionales: numpy, scipy, matplotlib. (EDITAR EN FUNCION DEL NOTEBOOK!!!)\nContenido bajo licencia CC-BY 4.0. Código bajo licencia MIT. \n(c) Sebastian Flores, Christopher Cooper, Alberto Rubio, Pablo Bunout.\n\"\"\"\n# Configuración para recargar módulos y librerías dinámicamente\n%reload_ext autoreload\n%autoreload 2\n\n# Configuración para graficos en línea\n%matplotlib inline\n\n# Configuración de estilo\nfrom IPython.core.display import HTML\nHTML(open(\"./style/style.css\", \"r\").read())", "1. Archivos\nLos archivos de los laboratorios deben tener la siguiente estructura:\n1. Archivo .ipynb de extensión ipython notebook, para python 3. Nombre varía de laboratorio a laboratorio.\n1. Carpeta code/ que contiene los códigos del laboratorio. Varía de laboratorio a laboratorio, pero debe tener __init__.py y lab.py.\n1. Carpeta data/ (opcional) que contiene los archivos de datos a utilizar en el laboratorio.\n1. Carpeta images/ que contiene las imágenes del laboratorio. Varía de laboratorio a laboratorio, pero debería al menos contener utfsm.png.\n1. Carpeta style/ que contiene el archivo lab.css con el estilo predefinido y común a los laboratorios.\n2 Uso de ipython notebook\nLos siguientes son ejemplos de acciones comunes en ipython notebook", "import numpy as np\nfrom matplotlib import pyplot as plt\n\n# Presionar tabulación con el cursor despues de np.arr\nnp.arr\n\n# Presionar Ctr-Enter para obtener la documentacion de la funcion np.array usando \"?\"\nnp.array?\n\n# Presionar Ctr-Enter\n%whos\n\nx = 10\n%whos", "Tipos de letras\nSe tienen los siguientes tipos de letras, gentileza del markdown y estilo lab.css:\n1. emphasis\n2. strong\n3. strong and emphasis\n4. code\n5. code and emphasis\n6. code and strong\n7. code, strong and emphasis.\n8. Código en python\nPython\n for i in range(n):\n print(i)\n9. Código en bash\nBash\necho \"hola\"\nDesafío 1 (10%)\nDespués de pasar un contenido, es importante realizar una evaluación mediante algún problema o desafío. Indicar el porcentaje asociado a la evaluación.\nPor ejemplo, aquí se requiere implementar la regla de cuadratura (integración) trapezoidal.", "def mi_funcion(x):\n f = 1 + x + x**3 + x**5 + np.sin(x)\n return f\n\nN = 5\nx = np.linspace(-1,1,N)\ny = mi_funcion(x)\n# FIX ME\nI = 0 # FIX ME\n# FIX ME\nprint(\"Area bajo la curva: %.3f\" %I)\n\n# Ilustración gráfica\nx_aux = np.linspace(x.min(),x.max(),N**2)\nfig = plt.figure(figsize=(12,8))\nfig.gca().fill_between(x, 0, y, alpha=0.25)\nplt.plot(x_aux, mi_funcion(x_aux), 'k')\nplt.plot(x, y, 'r.-')\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
rcrehuet/Python_for_Scientists_2017
notebooks/solutions/4_1.Molecular weight.ipynb
gpl-3.0
[ "Molecular weight\nWe want to calculate the molecular weight of a chemical formula. So given $\\mathrm{C_6H_6}$ we want the result to be $12\\times 6+1\\times 6 = 18$. We can use integer as an approximation to the atomic weight to ease our mental calculations. Improving that at the end is trivial.\nThe most straightforward way to keep the atomic weights of elements is in a dicctionary:", "weightDict = {\n'C':12,\n'H':1,\n'O':16,\n'Cl':35\n#add more if needed.\n}", "Parsing the molecular formula is not a trivial task that we will do later. We start by assuming that the formula has been parsed.\nWe will start by keeping the formula as a dictionary. In a condensed formula, each element appears only once, and its subindex indicates the total amount of atoms of that element in the molecule. Ethanol is $\\mathrm{C_2H_6O}$.", "ethanol = {'C':2, 'H':6, 'O':1}\nwater = {'H':2, 'O':1}\nHCl = {'H':1, 'Cl':1}", "From that, calculate the total weight:", "#Finish...", "Now imagine we also accept formulas in an extended way, for example ethanol as $\\mathrm{CH_3CH_2OH}$. In that case it makes sense that our parsing procedure returns a list of tuples such as:", "ethanol2 = [('C',1), ('H',3), ('C',1), ('H',2), ('O',1), ('H',1)]\nacetic2 = [('C',1), ('H',3), ('C',1), ('O',1), ('O',1), ('H',1)]", "From that, we could also create a dictionary such as the previous one, but we can also calculate the weight directly:", "#Finish", "Parsing\nParsing the formula is not a trivial task. You have to remember the follwing:\n\nSome elements have 1 letter names, others have 2. In that case the second letter is always lower-case.\nSome numbers can be higher than 9, i.e. use 2 or more figures.\nWhen the number is 1, it us usually not written.\n\nWhen coding a complex situation such as this one, it makes sense to plan a strategy for all these scenarios, but start coding and testing the simple cases (1 letter per element, etc)\nPython has a built-in module to work with regular expressions that could ease the parsing. But in this case the problem is simple enough that you can solve it without the use of regular expressions.\nHere is a possible solution, but different (and possibly better!) approaches are surely possible:", "#Try to do it before looking at the answer!\n\ndef weight(formula):\n \"\"\"\n Calcula el pes atòmic d'una formula química\n \"\"\"\n def parsing(formula):\n \"\"\"\n Parse the formula and return a list of pairs such as ('C', 3)\n \"\"\" \n formList = []\n number = None\n symbol=''\n for s in formula:\n if s.isdigit():\n try:\n number = number + s\n except:\n number = s\n elif s.islower(): #we're reading a 2-letter symbol\n symbol = symbol + s \n else: #We're reading a new symbol\n if not number and symbol: #If the symbol does not have subindex\n formList.append((symbol,1))\n elif number:\n formList.append((symbol,int(number)))\n symbol = s\n number = None\n #Add the last element\n if number: \n formList.append((symbol,int(number)))\n else:\n formList.append((symbol,1))\n return formList\n \n formulaList = parsing(formula)\n # With the list, calculate the weight\n weightDict={'C':12, 'O':16, 'H':1, 'Cl':35, 'S':32,'Na':23}\n weight = 0\n for elem, quant in formulaList:\n weight += weightDict[elem]*quant\n return weight\n\n(weight('C2H6O'), \nweight('CH3CH2OH'),\nweight('CH3COOH'),\nweight('C56H3CCl3H2OClNa3Na'))\n\nmolecule = input(\"Write a molecule: \")\n\nAtomic_dict = Atomic_dict_fun()\nlista = list(\"C2H6\")\nvS,vN = read_molecule(lista)\nprint(\"The result is\", molecular_weight_number(vS,vN))\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
DakotaNelson/discrete-stego
Blurred Image Comparison.ipynb
mit
[ "%matplotlib inline\n\nfrom compare_bmp import compare_images\nfrom PIL import Image, ImageFilter\nfrom matplotlib.pyplot import imshow", "We'll start with this image:", "imgpath = 'images/original/image.bmp'\nblurredpath = 'images/image_blurred.bmp'\n\nimg = Image.open(imgpath)\nblurred = img.copy().filter(ImageFilter.BLUR)\n\nblurred.save(blurredpath)", "And here it is now that we've blurred it:\n\nNow, let's compare the two to see what kind of error rates we can expect:", "[red_flipped, green_flipped, blue_flipped] = compare_images(imgpath, blurredpath)", "It looks like we can expect (in this scenario) a worst-case of about 50% of the image's least significant bits to be flipped." ]
[ "code", "markdown", "code", "markdown", "code", "markdown" ]
sysid/nbs
LP/Introduction-to-linear-programming/Introduction to Linear Programming with Python - Part 3.ipynb
mit
[ "Introduction to Linear Programming with Python - Part 3\nReal world examples - Resourcing Problem\nWe'll now look at 2 more real world examples. \nThe first is a resourcing problem and the second is a blending problem.\nResourcing Problem\nWe're consulting for a boutique car manufacturer, producing luxury cars.\nThey run on one month (30 days) cycles, we have one cycle to show we can provide value.\nThere is one robot, 2 engineers and one detailer in the factory. The detailer has some holiday off, so only has 21 days available.\nThe 2 cars need different time with each resource:\nRobot time: Car A - 3 days; Car B - 4 days.\nEngineer time: Car A - 5 days; Car B - 6 days.\nDetailer time: Car A - 1.5 days; Car B - 3 days.\nCar A provides €30,000 profit, whilst Car B offers €45,000 profit.\nAt the moment, they produce 4 of each cars per month, for €300,000 profit. Not bad at all, but we think we can do better for them.\nThis can be modelled as follows:\nMaximise\nProfit = 30,000A + 45,000B\nSubject to:\nA ≥ 0\nB ≥ 0\n3A + 4B ≤ 30\n5A + 6B ≤ 60\n1.5A + 3B ≤ 21", "import pulp\n\n# Instantiate our problem class\nmodel = pulp.LpProblem(\"Profit maximising problem\", pulp.LpMaximize)", "Unlike our previous problem, the decision variables in this case won't be continuous (We can't sell half a car!), so the category is integer.", "A = pulp.LpVariable('A', lowBound=0, cat='Integer')\nB = pulp.LpVariable('B', lowBound=0, cat='Integer')\n\n# Objective function\nmodel += 30000 * A + 45000 * B, \"Profit\"\n\n# Constraints\nmodel += 3 * A + 4 * B <= 30\nmodel += 5 * A + 6 * B <= 60\nmodel += 1.5 * A + 3 * B <= 21\n\n# Solve our problem\nmodel.solve()\npulp.LpStatus[model.status]\n\n# Print our decision variable values\nprint \"Production of Car A = {}\".format(A.varValue)\nprint \"Production of Car B = {}\".format(B.varValue)\n\n# Print our objective function value\nprint pulp.value(model.objective)", "So that's €330,000 monthly profit, compared to their original monthly profit of €300,000\nBy producing 2 cars of Car A and 4 cars of Car B, we bolster the profits at the factory by €30,000 per month.\nWe take our consultancy fee and leave the company with €360,000 extra profit for the factory every year.\nIn the next part, we'll be making some sausages!" ]
[ "markdown", "code", "markdown", "code", "markdown" ]
cuttlefishh/emp
code/04-subsets-prevalence/subset_samples_by_empo_and_study.ipynb
bsd-3-clause
[ "Author: stefan.m.janssen@gmail.com<br>\nDate: 27 Dec 2016<br>\nlanguage: Python 3.5<br>\nconda enviroment: micronota<br>\nlicense: unlicensed<br>\nsubset_samples_by_empo_and_study.ipynb\nSub-sample EMP\nCurrently the EMP comprises ~34k samples. Sometimes we want to operate on smaller sample numbers. Thus, we need to create several sub-samples for the EMP.\nThis notebook operates on the merged mapping file, filters samples accoring to some criteria (see below) and picks a specified numbers of samples, such that growing sub-samples do always contain all smaller sub-samples.", "%matplotlib inline\n\n# make all necessary imports\nimport pandas as pd\nimport numpy as np\nimport sys\nimport random\nimport seaborn as sn\nimport matplotlib.pyplot as plt\nimport os\nfrom commons import empo3_to_color", "the next cell contains all parameters that might need to be changed", "#filename of the input mapping file\nfile_mapping = '../../data/mapping-files/emp_qiime_mapping_release1.tsv'\n\n#filename of the resulting mapping file\nfile_output = 'modMapping.txt'\n\n#a file containing counts of observations for various OTU picking methods.\nfile_observations = '../../data/otu-picking/observations.tsv'\n\n#define seed to make results comparable between runs\nrandom.seed(7)\n\n#a sample must have at least 'minCounts' sequence reads to be included in qc-filtered\n#and minCountsReference/minCountsDeblur to be included in subsets\nminCounts = 1000 \nminCountsReference = 10000\nminCountsDeblur = 5000\n\n#sub-set sizes\nsetSizes = [1000, 2000, 5000, 10000]", "read in mapping file and filter accoring to three criteria:\n1. a sample must contain a certain number of raw sequence reads\n2. the sample must not be flagged as being a \"Control\"\n3. study is considered as being OK (this is a result of a manual curation)", "metadata = pd.read_csv(file_mapping, sep=\"\\t\", index_col=0, low_memory=False, dtype=str)\n#it is more consistent to read all fields as strings and manually convert to numeric values for selected columns. Thus, roundtripping (read -> write) results in a nearly identical file.\nmetadata['sequences_split_libraries'] = (pd.to_numeric(metadata['sequences_split_libraries'], errors='coerce'))\nmetadata['study_ok'] = metadata['study_ok'].map(lambda x: np.True_ if x == 'True' else np.False_)\n\n#load table containing read counts and merge to mapping file\nobservations = pd.read_csv(file_observations, sep=\"\\t\", index_col=0)\nmetadata = metadata.merge(observations, left_index=True, right_index=True, how=\"inner\")\n\nfiltered_metadata = metadata[\n #(metadata['sequences_split_libraries'] >= minCounts) & #remove samples with low read numbers\n (metadata['observations_closed_ref_greengenes'].fillna(0).astype(int) >= minCounts) &\n (metadata['observations_closed_ref_silva'].fillna(0).astype(int) >= minCounts) &\n (metadata['observations_open_ref_greengenes'].fillna(0).astype(int) >= minCounts) &\n (metadata['observations_deblur_90bp'].fillna(0).astype(int) >= minCounts) &\n (metadata['empo_1'] != 'Control') & #remove controls \n (metadata['study_ok'] == np.True_) & #keep only studies that are manually curated and found OK\n (metadata['empo_0'].notnull()) & #ensure we only deal with annotated data\n (metadata['empo_1'].notnull()) &\n (metadata['empo_2'].notnull()) &\n (metadata['empo_3'].notnull()) \n]\n\nsubset_metadata = metadata[\n #(metadata['sequences_split_libraries'] >= minCounts) & #remove samples with low read numbers\n (metadata['observations_closed_ref_greengenes'].fillna(0).astype(int) >= minCountsReference) &\n (metadata['observations_closed_ref_silva'].fillna(0).astype(int) >= minCountsReference) &\n (metadata['observations_open_ref_greengenes'].fillna(0).astype(int) >= minCountsReference) &\n (metadata['observations_deblur_90bp'].fillna(0).astype(int) >= minCountsDeblur) &\n (metadata['empo_1'] != 'Control') & #remove controls \n (metadata['study_ok'] == np.True_) & #keep only studies that are manually curated and found OK\n (metadata['empo_0'].notnull()) & #ensure we only deal with annotated data\n (metadata['empo_1'].notnull()) &\n (metadata['empo_2'].notnull()) &\n (metadata['empo_3'].notnull()) \n]\n\nemp_metadata = metadata[\n (metadata['sequences_split_libraries'].notnull()) & (metadata['sequences_split_libraries'] != 0)\n]", "actual logic to pick samples\nAssume that we want to create more than one subsample set. However, they should form a strict hierarchy, i.e. a sample in a smaller set must also occure in any larger sets.\nFurthermore, we want to make sure that each group is covered, since some groups are very large, others are small and might be otherwise subject of being missed. We divide the number of targeted samples by the number of existing groups (+ some corrections to end up with the right number of samples) and randomly sample within each group a sufficient number of samples.\nIf a group has more samples than needed samples should drawn from all existing studies in that group.\nA larger subset is always initialized with the IDs of the next smaller subset, thus we make sure to form the hierarchy.", "#convert np.infty to the actual number of total available samples in EMP satisfying the filtering criteria\nsetSizes = list(map(lambda x: x if x is not np.infty else subset_metadata.shape[0], setSizes))\n\n#make sure set sizes increase\nsetSizes = sorted(setSizes) \n\nsubsets = {} #resulting object, will hold sample ID lists for the different subsets\nfor i in range(0, len(setSizes)):\n setSize = setSizes[i]\n subsets[setSize] = [] #list of used sample IDs is empty at the beginning\n if i == 0:\n unusedMetadata = subset_metadata #if we compile the smallest subset, all available sample IDs can be used\n else:\n subsets[setSize].extend(subsets[setSizes[i-1]]) #a larger subset must always contain all sample IDs from a smaller subset\n unusedMetadata = subset_metadata[~subset_metadata.index.isin(subsets[setSizes[i-1]])] #make sure sample IDs are not added twice\n \n empo_groupped = unusedMetadata.groupby(['empo_0','empo_1','empo_2','empo_3']) #group available sample IDs to ensure that each group is covered in the resulting subset\n empo_seenGroups = 0\n for empo_n,empo_g in sorted(empo_groupped, key=lambda x: len(x[1])): #iterate through the groups, ascendingly by number of available samples\n empo_size = (setSize - len(subsets[setSize])) / (len(list(empo_groupped)) - empo_seenGroups) #define optimal number of samples for each group\n if empo_g.shape[0] < empo_size:\n toBeAdded = list(empo_g.index) #if group contains fewer samples than 'size', all sample IDs are added\n else:\n toBeAdded = []\n study_seenGroups = 0\n study_grouped = empo_g.groupby('study_id')\n for study_n, study_g in sorted(study_grouped, key=lambda x: len(x[1])):\n study_size = (empo_size - len(toBeAdded)) / (len(list(study_grouped)) - study_seenGroups)\n if study_g.shape[0] < study_size:\n toBeAdded.extend(list(study_g.index))\n else:\n toBeAdded.extend(random.sample(list(study_g.index), int(study_size)))\n study_seenGroups += 1\n subsets[setSize].extend(toBeAdded)\n empo_seenGroups += 1", "Write output by first merging new columns to the original metadata and than only write those to a output file.", "newColumnNames = []\n\n#add a column to mark samples that are in EMP (i.e. samples that have some counts)\nnewColumnNames.append('all_emp')\nmetadata[newColumnNames[-1]] = metadata.index.isin(emp_metadata.index)\n\n#add a column to mark samples that satisfy our filtering criteria\nnewColumnNames.append('qc_filtered')\nmetadata[newColumnNames[-1]] = metadata.index.isin(filtered_metadata.index)\n\n#add new columns to the orginal mapping file\nfor size in sorted(subsets.keys()):\n newColumnNames.append('subset_'+str(size))\n metadata[newColumnNames[-1]] = metadata.index.isin(subsets[size])\n\n#re-convert field to integer values and \"\" instead of np.float_\nmetadata['sequences_split_libraries'] = metadata['sequences_split_libraries'].map(lambda x: \"\" if np.isnan(x) else str(int(x)))\n\nmetadata[newColumnNames].to_csv(open(file_output, \"w\"), sep=\"\\t\")\n# metadata.to_csv(open(file_output, \"w\"), sep=\"\\t\") #if we want to save the complete mapping file\n\nprint(\"Result is stored in '%s'\" % (os.path.abspath(file_output)))", "diagnostics\nCollect data for diagnostics", "singles = []\nstudy_singles = []\nsets = {'everything': metadata,\n 'all_emp': emp_metadata,\n 'qc_filtered': filtered_metadata}\nordering = ['everything', 'all_emp', 'qc_filtered']\nfor setSize in sorted(subsets.keys()):\n name = 'subset_' + str(setSize)\n sets[name] = metadata[metadata[name]]\n ordering.append(name)\n\nfor setName in sets.keys():\n x = []\n for n,g in sets[setName].groupby(['empo_0','empo_1','empo_2','empo_3']):\n x.append({'samples': g.shape[0], \n 'studies': len(list(g.groupby('study_id'))),\n 'group': n[3],\n 'set': setName})\n singles.append(pd.DataFrame(x))\n \n study_x = []\n for n,g in metadata.groupby(['study_id']):\n study_x.append({'samples': sets[setName][sets[setName]['study_id'] == n].shape[0],\n 'study': n,\n 'set': setName})\n study_singles.append(pd.DataFrame(study_x))\n\ndata = pd.concat(singles)\nstudy_data = pd.concat(study_singles)", "visualize data\nTwo quick sanity checks:\nWe expect the sub-set distribution of 'number samples' (first plot) to follow the one of the 'filtered' data-set, but be cut off at a certain level such that only the lower part remains.\nThe distribution of 'number studies' (second plot) should be identical to the one of the 'filtered' data-set.", "empo_ordering = list(data[data.set == 'everything'].sort_values('samples', ascending=False).group)\n\nfig, ax = plt.subplots(2, 1, figsize=(20, 8))\nsn.barplot(x=\"set\", \n y=\"samples\", \n hue=\"group\", \n data=data, \n order=ordering,\n hue_order=empo_ordering,\n ax=ax[0],\n palette=sn.color_palette([empo3_to_color(g) for g in empo_ordering]),\n )\nl = ax[0].legend(bbox_to_anchor=(1.05, 1), loc=2)\nl = ax[0].set_ylabel(\"number samples\")\nl = ax[0].set_xlabel(\"\")\nl = ax[0].set_xticklabels([ s + \"\\n(n = \" + str(sets[s].shape[0]) + \")\" for s in ordering ])\n\nsn.barplot(x=\"set\", \n y=\"studies\", \n hue=\"group\", \n data=data, \n order=ordering,\n hue_order=empo_ordering,\n ax=ax[1],\n palette=sn.color_palette([empo3_to_color(g) for g in empo_ordering]),\n )\nl = ax[1].legend(bbox_to_anchor=(1.05, 1), loc=2)\nl.remove()\nl = ax[1].set_ylabel(\"number studies\")\nl = ax[1].set_xlabel(\"\")\nl = ax[1].set_xticklabels([ s + \"\\n(n = \" + str(len(list(sets[s].groupby('study_id')))) + \")\" for s in ordering ])", "We want to make sure that samples drawn within a EMPO group are equally (not proportionally) drawn from the studies that fall into the EMPO group. We should observe the same trimming of distributions as on the above graph 'number samples'. However, the picture gets blurred due to the fact that the overall number of samples to be drawn is dictated by the EMPO category.", "fig, ax = plt.subplots(1, 1, figsize=(20, 8))\n\nsn.barplot(x=\"set\",\n y=\"samples\",\n hue=\"study\",\n data=study_data,\n order=ordering,\n ax = ax,\n hue_order=list(study_data[study_data.set == 'everything'].sort_values('samples', ascending=False).study),\n )\nl = ax.legend(bbox_to_anchor=(1.05, 1), loc=2)\nl.remove()\nl = ax.set_ylabel(\"number samples\")\nl = ax.set_xlabel(\"\")\nl = ax.set_xticklabels([ s + \"\\n(n = \" + str(sets[s].shape[0]) + \")\" for s in ordering ])" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Trevortds/Etymachine
Prototyping semi-supervised.ipynb
gpl-2.0
[ "Setup", "import tsvopener\nimport pandas as pd\nimport numpy as np\nfrom nltk import word_tokenize\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom scipy.sparse import csr_matrix, vstack\nfrom sklearn.semi_supervised import LabelPropagation, LabelSpreading\n\n\n\n\nregex_categorized = tsvopener.open_tsv(\"categorized.tsv\")\nhuman_categorized = tsvopener.open_tsv(\"human_categorized.tsv\")\n\n# Accuracy Check\n#\n# match = 0\n# no_match = 0\n# for key in human_categorized:\n# if human_categorized[key] == regex_categorized[key]:\n# match += 1\n# else:\n# no_match += 1\n# \n# print(\"accuracy of regex data in {} human-categorized words\".format(\n# len(human_categorized)))\n# print(match/(match+no_match))\n# \n# accuracy of regex data in 350 human-categorized words\n# 0.7857142857142857", "Prepare Vectors", "# set up targets for the human-categorized data\ntargets = pd.DataFrame.from_dict(human_categorized, 'index')\ntargets[0] = pd.Categorical(targets[0])\ntargets['code'] = targets[0].cat.codes\n# form: | word (label) | language | code (1-5)\n\ntmp_dict = {}\nfor key in human_categorized:\n tmp_dict[key] = tsvopener.etymdict[key]\nsupervised_sents = pd.DataFrame.from_dict(tmp_dict, 'index')\n\nall_sents = pd.DataFrame.from_dict(tsvopener.etymdict, 'index')\nvectorizer = CountVectorizer(stop_words='english', max_features=10000)\nall_sents.index.get_loc(\"anyways (adv.)\")\n\n\n# vectorize the unsupervised vectors.\n\nvectors = vectorizer.fit_transform(all_sents.values[:,0])\n\nprint(vectors.shape)\n# supervised_vectors = vectorizer.fit_transform(supervised_data.values[:,0])\n\n# add labels \n\n# initialize to -1\nall_sents['code'] = -1\n\n\nsupervised_vectors = csr_matrix((len(human_categorized),\n vectors.shape[1]), \n dtype=vectors.dtype)\n\nj = 0\nfor key in supervised_sents.index:\n all_sents.loc[key]['code'] = targets.loc[key]['code']\n i = all_sents.index.get_loc(key)\n supervised_vectors[j] = vectors[i]\n j += 1\n\n\n \n# supervised_vectors = csr_matrix((len(human_categorized),\n# unsupervised_vectors.shape[1]), \n# dtype=unsupervised_vectors.dtype)\n\n# j = 0\n# for key in supervised_data.index:\n# i = unsupervised_data.index.get_loc(key)\n# supervised_vectors[j] = unsupervised_vectors[i]\n# j += 1\n\n\n \nall_sents.loc['dicky (n.)']", "Use Scikit's semisupervised learning\nThere are two semisupervised methods that scikit has. Label Propagation and Label Spreading. The difference is in how they regularize.", "num_points = 1000\nnum_test = 50\n\nx = vstack([vectors[:num_points], supervised_vectors]).toarray()\nt = all_sents['code'][:num_points].append(targets['code'])\n\nx_test = x[-num_test:]\nt_test = t[-num_test:]\nx = x[:-num_test]\nt = t[:-num_test]\n\nlabel_prop_model = LabelSpreading(kernel='knn')\nfrom time import time\n\nprint(\"fitting model\")\ntimer_start = time()\nlabel_prop_model.fit(x, t)\nprint(\"runtime: %0.3fs\" % (time()-timer_start))\n\n\nprint(\"done!\")\n\n# unsupervised_data['code'].iloc[:1000]\n\nimport pickle \n\n# with open(\"classifiers/labelspreading_knn_all_but_100.pkl\", 'bw') as writefile:\n# pickle.dump(label_prop_model, writefile)\n\n\nimport smtplib\n \nserver = smtplib.SMTP('smtp.gmail.com', 587)\nserver.starttls()\nserver.login(\"trevortds3@gmail.com\", \"Picardy3\")\n \nmsg = \"Job's done!\"\nserver.sendmail(\"trevortds3@gmail.com\", \"trevortds@gmail.com\", msg)\nserver.quit()\n\ntargets", "Measuring effectiveness.", "from sklearn.metrics import precision_score, accuracy_score, f1_score, recall_score\n\n\n\nt_pred = label_prop_model.predict(x_test)\n\nprint(\"Metrics based on 50 hold-out points\")\n\nprint(\"Macro\")\nprint(\"accuracy: %f\" % accuracy_score(t_test, t_pred))\nprint(\"precision: %f\" % precision_score(t_test, t_pred, average='macro'))\nprint(\"recall: %f\" % recall_score(t_test, t_pred, average='macro'))\nprint(\"f1: %f\" % f1_score(t_test, t_pred, average='macro'))\nprint(\"\\n\\nMicro\")\nprint(\"accuracy: %f\" % accuracy_score(t_test, t_pred))\nprint(\"precision: %f\" % precision_score(t_test, t_pred, average='micro'))\nprint(\"recall: %f\" % recall_score(t_test, t_pred, average='micro'))\nprint(\"f1: %f\" % f1_score(t_test, t_pred, average='micro'))\n\nfrom sklearn import metrics\nimport matplotlib.pyplot as pl\n\nlabels = [\"English\", \"French\", \"Greek\", \"Latin\",\"Norse\", \"Other\"]\nlabels_digits = [0, 1, 2, 3, 4, 5]\ncm = metrics.confusion_matrix(t_test, t_pred, labels_digits)\n\nfig = pl.figure()\nax = fig.add_subplot(111)\ncax = ax.matshow(cm)\npl.title(\"Label Spreading with KNN kernel (k=7)\")\nfig.colorbar(cax)\nax.set_xticklabels([''] + labels)\nax.set_yticklabels([''] + labels)\npl.xlabel('Predicted')\npl.ylabel('True')\n\npl.show()", "PCA: Let's see what it looks like\nPerforming PCA", "supervised_vectors\n\n\nimport matplotlib.pyplot as pl\n\nu, s, v = np.linalg.svd(supervised_vectors.toarray())\npca = np.dot(u[:,0:2], np.diag(s[0:2]))\n\n\n\nenglish = np.empty((0,2))\nfrench = np.empty((0,2))\ngreek = np.empty((0,2))\nlatin = np.empty((0,2))\nnorse = np.empty((0,2))\nother = np.empty((0,2))\n\nfor i in range(pca.shape[0]):\n if targets[0].iloc[i] == \"English\":\n english = np.vstack((english, pca[i]))\n elif targets[0].iloc[i] == \"French\":\n french = np.vstack((french, pca[i]))\n elif targets[0].iloc[i] == \"Greek\":\n greek = np.vstack((greek, pca[i]))\n elif targets[0].iloc[i] == \"Latin\":\n latin = np.vstack((latin, pca[i]))\n elif targets[0].iloc[i] == \"Norse\":\n norse = np.vstack((norse, pca[i]))\n elif targets[0].iloc[i] == \"Other\":\n other = np.vstack((other, pca[i]))\n \npl.plot( english[:,0], english[:,1], \"ro\", \n french[:,0], french[:,1], \"bs\",\n greek[:,0], greek[:,1], \"g+\",\n latin[:,0], latin[:,1], \"c^\",\n norse[:,0], norse[:,1], \"mD\",\n other[:,0], other[:,1], \"kx\")\npl.axis([-5,0,-2, 5])\npl.show()\n\nprint (s)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
spectralDNS/shenfun
docs/source/fasttransforms.ipynb
bsd-2-clause
[ "<!-- File automatically generated using DocOnce (https://github.com/doconce/doconce/):\ndoconce format ipynb fasttransforms.do.txt -->\n\nDemo - Some fast transforms\nMikael Mortensen (email: mikaem@math.uio.no), Department of Mathematics, University of Oslo.\nDate: May 27, 2021\nSummary. This demo will show how to compute fast forward transforms for the three\ndifferent Dirichlet bases that are implemented for Chebyshev\npolynomials in Shenfun.\nForward and backward transforms\nA function $u(x)$ can be approximated in a finite global spectral\nexpansion $u_N(x)$ as\n<!-- Equation labels as ordinary links -->\n<div id=\"eq:expansion\"></div>\n\n$$\n\\label{eq:expansion} \\tag{1}\nu_N(x) = \\sum_{k=0}^{N-1} \\hat{u}_k \\phi_k(x), \\quad \\forall \\, x \\, \\in [-1, 1],\n$$\nwhere $\\phi_k(x)$ are the basis functions and $\\boldsymbol{\\hat{u}} = {\\hat{u}k}{k=0}^{N-1}$\nare the expansion coefficients. The function $u_N(x)$ is continuous\non the interval domain $[-1, 1]$. The span of the basis functions\n$V_N = \\text{span} {\\phi_k}{k=0}^{N-1}$ represents a functionspace.\nAssociated with this functionspace is a set of quadrature points\n${x_k}{k=0}^{N-1}$ that, along with quadrature weights ${\\omega_k}{k=0}^{N-1}$, can be used\nfor efficient integration. We can also evaluate the function $u_N(x)$ at\nthese quadrature points to get the sequence\n$\\boldsymbol{u} = {u_N(x_k)}{k=0}^{N-1}$. If $\\boldsymbol{\\hat{u}}={\\hat{u}k}{k=0}^{N-1}$ are known,\nthen $\\boldsymbol{u}$ can be evaluated directly from\nEq. (1)\n<!-- Equation labels as ordinary links -->\n<div id=\"eq:expansionQ\"></div>\n\n$$\n\\label{eq:expansionQ} \\tag{2}\nu_N(x_j) = \\sum_{k=0}^{N-1} \\hat{u}_k \\phi_k(x_j), \\quad \\forall \\, j=0,1, \\ldots, N-1.\n$$\nThis would correspond to a backward transform according to\nthe Shenfun terminology. A direct evaluation of the backward\n(2) transform takes $\\mathcal{O}(N^2)$\noperations since it requires a double sum (over both $j$\nand $k$). A fast transform is\na transform that can be computed in $\\mathcal{O}(N \\log N)$ operations.\nThis is what the Fast Fourier Transform (FFT) does. It computes a double\nsum, like (2), in $\\mathcal{O}(N \\log N)$ operations.\nThe other way around, computing ${\\hat{u}k}{k=0}^{N-1}$ from the\nknown ${u_N(x_k)}_{k=0}^{N-1}$ corresponds to a forward transform.\nThe forward transform is computed using a projection of $u$\ninto $V_N$, which is formulated as: find $u_N \\in V_N$ such that\n<!-- Equation labels as ordinary links -->\n<div id=\"eq:projection\"></div>\n\n$$\n\\label{eq:projection} \\tag{3}\n(u_N-u, v){\\omega^{\\sigma}} = 0, \\quad \\forall \\, v \\in V{N},\n$$\nwhere $(a, b){\\omega^{\\sigma}} = \\int{I} a b \\omega^{\\sigma} dx$ is the\ninner product in $L^2_{\\omega^{\\sigma}}(I)$, and $\\omega^{\\sigma}(x)=(1-x^2)^{\\sigma}$ is a weight function.\nFor Chebyshev polynomials the weight function is usually $\\omega^{-1/2}=(1-x^2)^{-1/2}$.\nInserting for $u_N$ and $v=\\phi_k$, we get\n<!-- Equation labels as ordinary links -->\n<div id=\"_auto1\"></div>\n\n$$\n\\begin{equation}\n\\sum_{j=0}^{N-1}(\\phi_j, \\phi_k){\\omega^{\\sigma}} \\hat{u}{j} = (u, \\phi_k)_{\\omega^{\\sigma}}, \n\\label{_auto1} \\tag{4}\n\\end{equation}\n$$\n<!-- Equation labels as ordinary links -->\n<div id=\"_auto2\"></div>\n\n$$\n\\begin{equation}\nB \\boldsymbol{\\hat{u}} = \\boldsymbol{\\tilde{u}}, \n\\label{_auto2} \\tag{5}\n\\end{equation}\n$$\n<!-- Equation labels as ordinary links -->\n<div id=\"_auto3\"></div>\n\n$$\n\\begin{equation}\n\\boldsymbol{\\hat{u}} = B^{-1} \\boldsymbol{\\tilde{u}},\n\\label{_auto3} \\tag{6}\n\\end{equation}\n$$\nwhere\n$\\boldsymbol{\\tilde{u}} = {(u, \\phi_k){\\omega^{\\sigma}}}{k=0}^{N-1}$ and the mass matrix\n$B = (b_{kj}){k,j=0}^{N-1}$, with $b{kj}=(\\phi_j, \\phi_k)_{\\omega^{\\sigma}}$.\nNote that the forward transform requires both an inner product\n$\\boldsymbol{\\tilde{u}}$ and a matrix inversion. By a fast forward transform\nwe mean a transform that can be computed in $\\mathcal{O}(N \\log N)$\noperations. If $B$ is a diagonal or banded matrix, the matrix inversion costs $\\mathcal{O}(N)$,\nand the limiting factor is then the inner product. Like for the backward transform,\nthe inner product, computed with quadrature, is a double sum\n$$\n(u, \\phi_k){\\omega^{\\sigma}} = \\sum{j=0}^{N-1} u(x_j) \\phi_k(x_j) \\omega_j, \\quad \\forall \\, k = 0, 1, \\ldots, N-1,\n$$\nwhere ${\\omega_j}_{j=0}^{N-1}$ are the quadrature weights.\nA naive implementation of the inner product\ntakes $\\mathcal{O}(N^2)$ operations. However,\nfor Chebyshev polynomials we can compute the double loop with\nfast $\\mathcal{O}(N \\log N)$ discrete sine or cosine transforms,\nthat are versions of the FFT. To see this, assume that the basis functions are $\\phi_k(x) =T_k(x)$, where\n$T_k(x)$ is the $k$'th Chebyshev polynomial of the first kind,\nand the weight function is $\\omega^{-1/2}$.\nWe then choose Gauss-Chebyshev points $x_j = \\cos(\\theta_j)$,\nwhere $\\theta_j=\\pi (2j+1)/(2N)$, and the associated quadrature weights\nthat are constant $\\omega_j = \\pi/N$. The Chebyshev polynomials evaluated\non the quadrature points can now\nalternatively be written as $T_k(x_j) = \\cos(k \\theta_j)$,\nand the inner product becomes\n$$\n(u, T_k){\\omega^{-1/2}} = \\sum{j=0}^{N-1} u(x_j) \\cos(k \\theta_j) \\pi/N, \\quad \\forall \\, k = 0, 1, \\ldots, N-1.\n$$\nFrom the FFTW documentation\nwe recognise this sum as half a DCT-II (the FFTW DCT-II has a factor\n2 in front of the sum) of $\\boldsymbol{u}\\pi/N$. Hence, we can compute the inner product as\n$$\n(u, T_k)_{\\omega^{-1/2}} = \\frac{\\pi}{2N} \\text{dct}^{II}(\\boldsymbol{u})_k, \\quad k = 0, 1, \\ldots, N-1.\n$$\nDirichlet bases\nThe basis function $T_k$ satisfies $T_k(\\pm 1) = (\\pm 1)^k$ at the\nboundaries of the domain, and the space $S_N=\\text{span}{T_k}_{k=0}^{N-1}$,\nof dimension $N$,\nis thus not associated with any specific set of boundary conditions.\nA functionspace for homogeneous Dirichlet boundary conditions is\ngiven as $V_N={v\\in S_N | v(\\pm 1)=0 }$. Because of the two restrictions\nthe space has dimension $N-2$.\nThere are several different choices of basis functions\nfor $V_N$.\nThe most interesting we name $\\phi_k^n$, for integer $n$, and\ndefine them as\n<!-- Equation labels as ordinary links -->\n<div id=\"_auto4\"></div>\n\n$$\n\\begin{equation}\n\\phi^n_k = \\omega T^{(n)}{k+n} = (1-x^2) T^{(n)}{k+n},\n\\label{_auto4} \\tag{7}\n\\end{equation}\n$$\nwhere $T^{(n)}{k+n}$ is the $n$'th derivative of $T{k+n}$. We have\nfor any integer $n$ that $V_N=\\text{span}{\\phi^n_k}_{k=0}^{N-3}$, and an\nexpansion in any of these basis functions is\n<!-- Equation labels as ordinary links -->\n<div id=\"eq:uNgeneric\"></div>\n\n$$\n\\begin{equation}\n\\label{eq:uNgeneric} \\tag{8}\n u_N = \\sum_{k=0}^{N-3} \\hat{u}^n_k \\phi^n_k.\n\\end{equation}\n$$\nWe can find the sequence ${\\hat{u}^n_{k}}_{k=0}^{N-3}$ for any $n$\nusing a projection into the space $V_N$. The projection is computed\nby using Eq. (8) and $v=\\phi^n_k$ in\nEq. (3)\n<!-- Equation labels as ordinary links -->\n<div id=\"eq:projortho\"></div>\n\n$$\n\\begin{equation}\n\\label{eq:projortho} \\tag{9}\n \\sum_{j=0}^{N-3} ( T^{(n)}{j+n}, T^{(n)}{k+n}){\\omega^{\\sigma+2}} \\hat{u}^{n}_j = (u, T^{(n)}{k+n})_{\\omega^{\\sigma+1}}.\n\\end{equation}\n$$\nNow how can this projection be computed as efficiently as possible?\nThe Chebyshev polynomials and their derivatives are known to satisfy\nthe following orthogonality relation\n<!-- Equation labels as ordinary links -->\n<div id=\"eq:orthon\"></div>\n\n$$\n\\begin{equation}\n\\label{eq:orthon} \\tag{10}\n (T^{(n)}j, T^{(n)}_k){\\omega^{n-1/2}} = \\alpha^{n}k \\delta{kj}, \\quad \\text{for}\\, n \\ge 0,\n\\end{equation}\n$$\nwhere $\\delta_{kj}$ is the Kronecker delta function and\n<!-- Equation labels as ordinary links -->\n<div id=\"_auto5\"></div>\n\n$$\n\\begin{equation}\n \\alpha^n_k = \\frac{c_{k+n}\\pi k (k+n-1)!}{2(k-n)!},\n\\label{_auto5} \\tag{11}\n\\end{equation}\n$$\nwhere $c_0=2$ and $c_k=1$ for $k>0$. This can be used in\ncomputing (9), because we just\nneed to choose the $\\sigma$ that leads to a diagonal mass matrix.\nFor $n=(0, 1, 2)$ this will be $\\sigma=-5/2, -3/2$ and $-1/2$,\nrespectively. So, choosing $\\sigma=-5/2, -3/2$ and $-1/2$\nfor $n=0, 1$ and 2, respectively, will lead to a diagonal\nmass matrix $( T^{(n)}{j+n}, T^{(n)}{k+n})_{\\omega^{\\sigma+2}}$.\nUsing these $\\sigma$'s we can invert the diagonal mass matrices\nin Eq. (9) to get\n<!-- Equation labels as ordinary links -->\n<div id=\"_auto6\"></div>\n\n$$\n\\begin{equation}\n \\hat{u}^n_k = \\frac{1}{\\alpha^n_{k+n}}(u, T^{(n)}{k+n}){\\omega^{\\sigma+1}}, \\quad k=0, 1, \\ldots, N-3, \\text{ for } n \\in (0, 1, 2).\n\\label{_auto6} \\tag{12}\n\\end{equation}\n$$\nUsing now quadrature, $1-x^2_i=\\sin^2 \\theta_i$ and the\nfast transforms $(u, T_k){\\omega^{-1/2}} = \\pi/2/N \\text{dct}^{II}(\\boldsymbol{u})_k$\nand $(u, U_k){\\omega^{-1/2}} = \\pi/2/N \\text{dst}^{II}(\\boldsymbol{u}/\\sin \\boldsymbol{\\theta})_k$,\nwhere $\\boldsymbol{u}/\\sin \\boldsymbol{\\theta}$ implies element-wise division,\nwe get\n<!-- Equation labels as ordinary links -->\n<div id=\"eq:fast1\"></div>\n\n$$\n\\begin{equation}\n \\hat{u}^0_k = \\frac{1}{c_k N} \\text{dct}^{II}(\\boldsymbol{u}/\\sin^2 \\boldsymbol{\\theta})_k, \\quad k = 0, 1, \\ldots, N-3, \\label{eq:fast1} \\tag{13} \n\\end{equation}\n$$\n<!-- Equation labels as ordinary links -->\n<div id=\"eq:fast2\"></div>\n\n$$\n\\begin{equation}\n \\hat{u}^1_k = \\frac{1}{(k+1)N}\\text{dst}^{II}(\\boldsymbol{u}/\\sin \\boldsymbol{\\theta})_k, \\quad k = 0, 1, \\ldots, N-3, \\label{eq:fast2} \\tag{14}\n\\end{equation}\n$$\n<!-- Equation labels as ordinary links -->\n<div id=\"eq:fast3\"></div>\n\n$$\n\\begin{equation}\n \\hat{u}^2_k = \\frac{1}{2(k+2)}\\left(\\hat{u}^1_k - \\hat{u}^1_{k+2} \\right), \\quad k=0, 1, \\ldots, N-3. \\label{eq:fast3} \\tag{15}\n\\end{equation}\n$$\nThe last one requires some work, using the identity\n$\\phi^2_k=(1-x^2)T''{k+2}=0.5(k+2)(k+3)(U_k - (k+1)/(k+3)U{k+2})$.\nVerification\nTo validate all the fast methods we compute the projection first regularly\nusing the Shenfun function project,\nwhich is using $\\sigma=-1/2$, and then the fast methods above. The two\nprojections should be the same, but they will not give identical results.\nIn general, the fast transforms above should be both faster and more\naccurate, because they only take a discrete transform and merely a diagonal\nmass matrix inversion.\nWe start the implementation by importing necessary modules from Shenfun\nand mpi4py-fft", "from shenfun import *\nfrom mpi4py_fft import fftw", "The three bases ${\\phi^n_k}_{k=0}^{N-3}$ are implemented\nwith slightly different scaling in shenfun.\nThe first, with $n=0$, is obtained with no special scaling using", "N = 20\nD0 = FunctionSpace(N, 'C', bc=(0, 0), basis='Heinrichs')", "The second basis is implemented in Shenfun as $\\phi_k = \\frac{2}{k+1}\\phi^1_k$,\nwhich can be simplified as\n<!-- Equation labels as ordinary links -->\n<div id=\"eq:ft:shen\"></div>\n\n$$\n\\label{eq:ft:shen} \\tag{16}\n\\phi_k(x) = T_k-T_{k+2}, \\quad k=0,1, \\ldots, N-3,\n$$\nand implemented as", "D1 = FunctionSpace(N, 'C', bc=(0, 0)) # this is the default basis", "Because of the scaling the expansion coefficients for $\\phi_k$ are\n$\\hat{u}^{\\phi}_k=\\frac{k+1}{2}\\hat{u}^1_k$. Using (14) we get\n$$\n\\hat{u}^{\\phi}_k = \\frac{1}{2N}\\text{dst}^{II}(\\boldsymbol{u}/\\sin \\boldsymbol{\\theta})_k, \\quad k = 0, 1, \\ldots, N-3.\n$$\nThe third basis is also scaled and implemented in Shenfun as $\\psi_k = \\frac{2}{(k+3)(k+2)}\\phi^2_k$,\nwhich can be simplified using Chebyshev polynomials of the second\nkind $U_k$\n<!-- Equation labels as ordinary links -->\n<div id=\"eq:ft:dirichletU\"></div>\n\n$$\n\\label{eq:ft:dirichletU} \\tag{17}\n\\psi_k(x) = U_k-\\frac{k+1}{k+3}U_{k+2}, \\quad k=0,1, \\ldots, N-3.\n$$\nWe get the basis using", "D2 = FunctionSpace(N, 'U', bc=(0, 0), quad='GC') # quad='GU' is default for U", "and the expansion coefficients are found as\n$\\hat{u}^{\\psi}_k = \\frac{(k+3)(k+2)}{2} \\hat{u}^2_k$.\nFor verification of all the fast transforms we first create a vector\nconsisting of random expansion coefficients, and then transform\nit backwards to physical space", "f = Function(D0, buffer=np.random.random(N))\nf[-2:] = 0\nfb = f.backward().copy()", "Next, we perform the regular projections into the three spaces\nD0, D1 and D2, using the default inner product\nin $L^2_{\\omega^{-1/2}}$ for D0 and D1, whereas $L^2_{\\omega^{1/2}}$\nis used for D2. Now u0, u1 and u2 will be the\nthree solution vectors\n$\\boldsymbol{\\hat{u}}^{\\varphi}$, $\\boldsymbol{\\hat{u}}^{\\phi}$\nand $\\boldsymbol{\\hat{u}}^{\\psi}$, respectively.", "u0 = project(fb, D0)\nu1 = project(fb, D1)\nu2 = project(fb, D2)", "Now compute the fast transforms and assert that they are equal to u0, u1 and u2", "theta = np.pi*(2*np.arange(N)+1)/(2*N)\n# Test for n=0\ndct = fftw.dctn(fb.copy(), type=2)\nck = np.ones(N); ck[0] = 2\nd0 = dct(fb/np.sin(theta)**2)/(ck*N)\nassert np.linalg.norm(d0-u0) < 1e-8, np.linalg.norm(d0-f0)\n# Test for n=1\ndst = fftw.dstn(fb.copy(), type=2)\nd1 = dst(fb/np.sin(theta))/(2*N)\nassert np.linalg.norm(d1-u1) < 1e-8\n# Test for n=2\nut = d1\nk = np.arange(N)\nd2 = Function(D2)\nd2[:-2] = (k[:-2]+3)/2/(k[:-2]+1)*ut[:-2]\nd2[:-2] = d2[:-2] - 0.5*ut[2:]\nassert np.linalg.norm(d2-u2) < 1e-8", "That's it! If you make it to here with no errors, then the three tests pass, and the fast transforms are equal to the slow ones, at least within given precision.\nLet's try some timings", "%timeit project(fb, D1)\n\n%timeit dst(fb/np.sin(theta))/(2*N)", "We can precompute the sine term, because it does not change", "dd = np.sin(theta)*2*N\n%timeit dst(fb/dd)", "The other two transforms are approximately the same speed.", "%timeit dct(fb/np.sin(theta)**2)/(ck*N)", "<!-- ======= Bibliography ======= -->" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
NifTK/NiftyNet
demos/PROMISE12/PROMISE12_Demo_Notebook.ipynb
apache-2.0
[ "PROMISE12 prostate segmentation demo\nPreparation:\n1) Make sure you have set up the PROMISE12 data set. If not, download it from https://promise12.grand-challenge.org/ (registration required) and run data/PROMISE12/setup.py\n2) Make sure you are in NiftyNet root, setting niftynet_path correctly to the path with the niftynet folder in it", "import os,sys \nniftynet_path=r'path/to/NiftyNet'\nos.chdir(niftynet_path)", "3) Make sure you have all the dependencies installed (replacing gpu with cpu for cpu-only mode):", "import pip\n#pip.main(['install','-r','requirements-gpu.txt'])\npip.main(['install','-r','requirements-cpu.txt'])\npip.main(['install', 'SimpleITK>=1.0.0'])", "Training a network from the command line\nThe simplest way to use NiftyNet is via the commandline net_segment.py script. Normally, this is done on the command line with a command like this from the NiftyNet root directory:\npython net_segment.py train --conf demos/PROMISE12/promise12_demo_train_config.ini --max_iter 10\nNotice that we use configuration file that is specific to this experiment. This file contains default settings. Also note that we can override these settings on the command line.\nTo execute NiftyNet from within the notebook, you can run the following python code:", "import os\nimport sys\nimport niftynet\nsys.argv=['','train','-a','net_segment','--conf',os.path.join('demos','PROMISE12','promise12_demo_train_config.ini'),'--max_iter','10']\nniftynet.main()", "Now you have trained (a few iterations of) a deep learning network for medical image segmentation. If you have some time on your hands, you can finish training the network (by leaving off the max_iter argument) and try it out, by running the following command\npython net_segment.py inference --conf demos/PROMISE12/promise12_demo_inference_config.ini\nor the following python code in the Notebook", "import os\nimport sys\nimport niftynet\nsys.argv=['', 'inference','-a','net_segment','--conf',os.path.join('demos','PROMISE12','promise12_demo_inference_config.ini')]\nniftynet.main()", "Otherwise, you can load up some pre-trained weights for the network:\npython net_segment.py inference --conf demo/PROMISE12/promise12_demo_config.ini --model_dir demo/PROMISE12/pretrained\nor the following python code in the Notebook", "import os\nimport sys\nimport niftynet\nsys.argv=['', 'inference','-a','net_segment','--conf',os.path.join('demos','PROMISE12','promise12_demo_inference_config.ini'), '--model_dir', os.path.join('demos','PROMISE12','pretrained')]\nniftynet.main()", "You can find your segmented images in output/promise12_demo\nNiftyNet has taken care of a lot of details behind the scenes:\n1. Organizing data into a dataset of images and segmentation labels\n2. Building a deep leaning network (in this case, it is based on VNet by Milletari et al.)\n3. Added deep learning infrastruture, such as a loss function for segmentation, the ADAM optimizer.\n4. Added augmentation, where the images are zoomed and rotated a little bit for every training step so that you do not over-fit the data\n5. Run the training algorithm\nAll of this was controlled by the configuration file.\nThe configuration file\nLet's take a closer look at the configuration file. Further details about the configuration settings are available in config/readme.md\nThese lines define how NiftyNet organizes your data. In this case, in the ./data/PROMISE12 folder there is one T2-weighted MR image named 'Case??_T2.nii.gz' and one reference segmentation named 'Case??_segmentation.nii.gz' per patient. The images for each patient are automatically grouped because they share the same prefix 'Case??'. For training, we exclude patients Case20-Case26, and for inference, we only include patients Case20-Case26, so that our training and inference data are mutually exclusive.\nThese lines are setting up some system parameters: which GPUs to use (in this case whatever is available), where to save the trained network parameters, and how many threads to use for queuing them up.\nThe following lines specify network properties.\nSummary\nIn this demo \n1. you learned to run training and testing for a deep-learning-based segmentation pipeline from the command-line and from python code directly; \n2. you also learned about the NiftyNet configuration files, and how they control the learning and inference process; and \n3. you learned multiple ways to tell NiftyNet which data to use." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
MedievalSure/ToStudy
notebook/02_01_1DConvection.ipynb
mit
[ "Content under Creative Commons Attribution license CC-BY 4.0, code under MIT license (c)2014 L.A. Barba, G.F. Forsyth, C.D. Cooper. Based on CFD Python, (c)2013 L.A. Barba, also under CC-BY.\nSpace & Time\nIntroduction to numerical solution of PDEs\nWelcome to Space and Time: Introduction to finite-difference solutions of PDEs, the second module of \"Practical Numerical Methods with Python\". \nIn the first module, we looked into numerical integration methods for the solution of ordinary differential equations (ODEs), using the phugoid model of glider flight as a motivation. In this module, we will study the numerical solution of partial differential equations (PDEs), where the unknown is a multi-variate function. The problem could depend on time, $t$, and one spatial dimension $x$ (or more), which means we need to build a discretization grid with each independent variable.\nWe will start our discussion of numerical PDEs with 1-D linear and non-linear convection equations, the 1-D diffusion equation, and 1-D Burgers' equation. We hope you will enjoy them!\n1D linear convection\nThe one-dimensional linear convection equation is the simplest, most basic model that can be used to learn something about numerical solution of PDEs. It's surprising that this little equation can teach us so much! Here it is:\n\\begin{equation}\\frac{\\partial u}{\\partial t} + c \\frac{\\partial u}{\\partial x} = 0\\end{equation}\nThe equation represents a wave propagating with speed $c$ in the $x$ direction, without change of shape. For that reason, it's sometimes called the one-way wave equation (sometimes also the advection equation).\nWith an initial condition $u(x,0)=u_0(x)$, the equation has an exact solution given by:\n\\begin{equation}u(x,t)=u_0(x-ct). \n\\end{equation}\nGo on: check it. Take the time and space derivative and stick them into the equation to see that it holds!\nLook at the exact solution for a moment ... we know two things about it: \n\nits shape does not change, being always the same as the initial wave, $u_0$, only shifted in the $x$-direction; and \nit's constant along so-called characteristic curves, $x-ct=$constant. This means that for any point in space and time, you can move back along the characteristic curve to $t=0$ to know the value of the solution.\n\n\nCharacteristic curves for positive wave speed.\nWhy do we call the equations linear? PDEs can be either linear or non-linear. In a linear equation, the unknown function $u$ and its derivatives appear only in linear terms, in other words, there are no products, powers, or transcendental functions applied on them. \nWhat is the most important feature of linear equations? Do you remember? In case you forgot: solutions can be superposed to generate new solutions that still satisfy the original equation. This is super useful!\nFinite-differences\nIn the previous lessons, we discretized time derivatives; now we have derivatives in both space and time, so we need to discretize with respect to both these variables. \nImagine a space-time plot, where the coordinates in the vertical direction represent advancing in time—for example, from $t^n$ to $t^{n+1}$—and the coordinates in the horizontal direction move in space: consecutive points are $x_{i-1}$, $x_i$, and $x_{i+1}$. This creates a grid where a point has both a temporal and spatial index. Here is a graphical representation of the space-time grid:\n\\begin{matrix}\nt^{n+1} & \\rightarrow & \\bullet && \\bullet && \\bullet \\\nt^n & \\rightarrow & \\bullet && \\bullet && \\bullet \\\n& & x_{i-1} && x_i && x_{i+1}\n\\end{matrix}\nFor the numerical solution of $u(x,t)$, we'll use subscripts to denote the spatial position, like $u_i$, and superscripts to denote the temporal instant, like $u^n$. We would then label the solution at the top-middle point in the grid above as follows:\n$u^{n+1}_{i}$.\nEach grid point below has an index $i$, corresponding to the spatial position and increasing to the right, and an index $n$, corresponding to the time instant and increasing upwards. A small grid segment would have the following values of the numerical solution at each point:\n\\begin{matrix}\n& &\\bullet & & \\bullet & & \\bullet \\\n& &u^{n+1}{i-1} & & u^{n+1}_i & & u^{n+1}{i+1} \\\n& &\\bullet & & \\bullet & & \\bullet \\\n& &u^n_{i-1} & & u^n_i & & u^n_{i+1} \\\n& &\\bullet & & \\bullet & & \\bullet \\\n& &u^{n-1}{i-1} & & u^{n-1}_i & & u^{n-1}{i+1} \\\n\\end{matrix}\nAnother way to explain our discretization grid is to say that it is built with constant steps in time and space, $\\Delta t$ and $\\Delta x$, as follows:\n\\begin{eqnarray}\nx_i &=& i\\, \\Delta x \\quad \\text{and} \\quad t^n= n\\, \\Delta t \\nonumber \\\nu_i^n &=& u(i\\, \\Delta x, n\\, \\Delta t)\n\\end{eqnarray}\nDiscretizing our model equation\nLet's see how to discretize the 1-D linear convection equation in both space and time. By definition, the partial derivative with respect to time changes only with time and not with space; its discretized form changes only the $n$ indices. Similarly, the partial derivative with respect to $x$ changes with space not time, and only the $i$ indices are affected. \nWe'll discretize the spatial coordinate $x$ into points indexed from $i=0$ to $N$, and then step in discrete time intervals of size $\\Delta t$.\nFrom the definition of a derivative (and simply removing the limit), we know that for $\\Delta x$ sufficiently small:\n\\begin{equation}\\frac{\\partial u}{\\partial x}\\approx \\frac{u(x+\\Delta x)-u(x)}{\\Delta x}\\end{equation}\nThis formula could be applied at any point $x_i$. But note that it's not the only way that we can estimate the derivative. The geometrical interpretation of the first derivative $\\partial u/ \\partial x$ at any point is that it represents the slope of the tangent to the curve $u(x)$. In the sketch below, we show a slope line at $x_i$ and mark it as \"exact.\" If the formula written above is applied at $x_i$, it approximates the derivative using the next spatial grid point: it is then called a forward difference formula. \nBut as shown in the sketch below, we could also estimate the spatial derivative using the point behind $x_i$, in which case it is called a backward difference. We could even use the two points on each side of $x_i$, and obtain what's called a central difference (but in that case the denominator would be $2\\Delta x$).\n\nThree finite-difference approximations at $x_i$.\nWe have three possible ways to represent a discrete form of $\\partial u/ \\partial x$:\n\nForward difference: uses $x_i$ and $x_i + \\Delta x$,\nBackward difference: uses $x_i$ and $x_i- \\Delta x$,\nCentral difference: uses two points on either side of $x_i$.\n\nThe sketch above also suggests that some finite-difference formulas might be better than others: it looks like the central difference approximation is closer to the slope of the \"exact\" derivative. Curious if this is just an effect of our exaggerated picture? We'll show you later how to make this observation rigorous!\nThe three formulas are:\n\\begin{eqnarray}\n\\frac{\\partial u}{\\partial x} & \\approx & \\frac{u(x_{i+1})-u(x_i)}{\\Delta x} \\quad\\text{Forward}\\\n\\frac{\\partial u}{\\partial x} & \\approx & \\frac{u(x_i)-u(x_{i-1})}{\\Delta x} \\quad\\text{Backward}\\\n\\frac{\\partial u}{\\partial x} & \\approx & \\frac{u(x_{i+1})-u(x_{i-1})}{2\\Delta x} \\quad\\text{Central}\n\\end{eqnarray}\nEuler's method is equivalent to using a forward-difference scheme for the time derivative. Let's stick with that, and choose the backward-difference scheme for the space derivative. Our discrete equation is then:\n\\begin{equation}\\frac{u_i^{n+1}-u_i^n}{\\Delta t} + c \\frac{u_i^n - u_{i-1}^n}{\\Delta x} = 0, \\end{equation}\nwhere $n$ and $n+1$ are two consecutive steps in time, while $i-1$ and $i$ are two neighboring points of the discretized $x$ coordinate. With given initial conditions, the only unknown in this discretization is $u_i^{n+1}$. We solve for this unknown to get an equation that lets us step in time, as follows:\n\\begin{equation}u_i^{n+1} = u_i^n - c \\frac{\\Delta t}{\\Delta x}(u_i^n-u_{i-1}^n)\\end{equation}\nWe like to make drawings of a grid segment, showing the grid points that influence our numerical solution. This is called a stencil. Below is the stencil for solving our model equation with the finite-difference formula we wrote above.\n\nStencil for the \"forward-time/backward-space\" scheme.\nAnd compute!\nAlright. Let's get a little Python on the road. First: we need to load our array and plotting libraries, as usual. And if you noticed in the Bonus! notebook for Module 1, we taught you a neat trick to set some global plotting parameters with the rcParams module. We like to do that.", "import numpy \nfrom matplotlib import pyplot \n%matplotlib inline\nfrom matplotlib import rcParams\nrcParams['font.family'] = 'serif'\nrcParams['font.size'] = 16", "As a first exercise, we'll solve the 1D linear convection equation with a square wave initial condition, defined as follows:\n\\begin{equation}\nu(x,0)=\\begin{cases}2 & \\text{where } 0.5\\leq x \\leq 1,\\\n1 & \\text{everywhere else in } (0, 2)\n\\end{cases}\n\\end{equation}\nWe also need a boundary condition on $x$: let $u=1$ at $x=0$. Our spatial domain for the numerical solution will only cover the range $x\\in (0, 2)$.\n\nSquare wave initial condition.\nNow let's define a few variables; we want to make an evenly spaced grid of points within our spatial domain. In the code below, we define a variable called nx that will be the number of spatial grid points, and a variable dx that will be the distance between any pair of adjacent grid points. We also can define a step in time, dt, a number of steps, nt, and a value for the wave speed: we like to keep things simple and make $c=1$.", "nx = 41 # try changing this number from 41 to 81 and Run All ... what happens?\ndx = 2/(nx-1)\nnt = 25 \ndt = .02 \nc = 1 #assume wavespeed of c = 1\nx = numpy.linspace(0,2,nx)", "We also need to set up our initial conditions. Here, we use the NumPy function ones() defining an array which is nx elements long with every value equal to $1$. How useful! We then change a slice of that array to the value $u=2$, to get the square wave, and we print out the initial array just to admire it. But which values should we change? The problem states that we need to change the indices of u such that the square wave begins at $x = 0.5$ and ends at $x = 1$.\nWe can use the numpy.where function to return a list of indices where the vector $x$ meets (or doesn't meet) some condition.", "u = numpy.ones(nx) #numpy function ones()\nlbound = numpy.where(x >= 0.5)\nubound = numpy.where(x <= 1)\n\nprint(lbound)\nprint(ubound)", "That leaves us with two vectors. lbound, which has the indices for $x \\geq .5$ and 'ubound', which has the indices for $x \\leq 1$. To combine these two, we can use an intersection, with numpy.intersect1d.", "bounds = numpy.intersect1d(lbound, ubound)\nu[bounds]=2 #setting u = 2 between 0.5 and 1 as per our I.C.s\nprint(u)", "Remember that Python can also combine commands, we could have instead written\nPython\nu[numpy.intersect1d(numpy.where(x &gt;= 0.5), numpy.where(x &lt;= 1))] = 2\nbut that can be a little hard to read.\nNow let's take a look at those initial conditions we've built with a handy plot.", "pyplot.plot(x, u, color='#003366', ls='--', lw=3)\npyplot.ylim(0,2.5);", "It does look pretty close to what we expected. But it looks like the sides of the square wave are not perfectly vertical. Is that right? Think for a bit.\nNow it's time to write some code for the discrete form of the convection equation using our chosen finite-difference scheme. \nFor every element of our array u, we need to perform the operation: \n$$u_i^{n+1} = u_i^n - c \\frac{\\Delta t}{\\Delta x}(u_i^n-u_{i-1}^n)$$\nWe'll store the result in a new (temporary) array un, which will be the solution $u$ for the next time-step. We will repeat this operation for as many time-steps as we specify and then we can see how far the wave has traveled. \nWe first initialize the placeholder array un to hold the values we calculate for the $n+1$ timestep, using once again the NumPy function ones().\nThen, we may think we have two iterative operations: one in space and one in time (we'll learn differently later), so we may start by nesting a spatial loop inside the time loop, as shown below. You see that the code for the finite-difference scheme is a direct expression of the discrete equation:", "for n in range(1,nt): \n un = u.copy() \n for i in range(1,nx): \n \n u[i] = un[i]-c*dt/dx*(un[i]-un[i-1])", "Note—We will learn later that the code as written above is quite inefficient, and there are better ways to write this, Python-style. But let's carry on.\nNow let's inspect our solution array after advancing in time with a line plot.", "pyplot.plot(x, u, color='#003366', ls='--', lw=3)\npyplot.ylim(0,2.5);", "That's funny. Our square wave has definitely moved to the right, but it's no longer in the shape of a top-hat. What's going on?\nDig deeper\nThe solution differs from the expected square wave because the discretized equation is an approximation of the continuous differential equation that we want to solve. There are errors: we knew that. But the modified shape of the initial wave is something curious. Maybe it can be improved by making the grid spacing finer. Why don't you try it? Does it help?\nSpatial truncation error\nRecall the finite-difference approximation we are using for the spatial derivative:\n\\begin{equation}\\frac{\\partial u}{\\partial x}\\approx \\frac{u(x+\\Delta x)-u(x)}{\\Delta x}\\end{equation}\nWe obtain it by using the definition of the derivative at a point, and simply removing the limit, in the assumption that $\\Delta x$ is very small. But we already learned with Euler's method that this introduces an error, called the truncation error.\nUsing a Taylor series expansion for the spatial terms now, we see that the backward-difference scheme produces a first-order method, in space.\n\\begin{equation}\n\\frac{\\partial u}{\\partial x}(x_i) = \\frac{u(x_i)-u(x_{i-1})}{\\Delta x} + \\frac{\\Delta x}{2} \\frac{\\partial^2 u}{\\partial x^2}(x_i) - \\frac{\\Delta x^2}{6} \\frac{\\partial^3 u}{\\partial x^3}(x_i)+ \\cdots\n\\end{equation}\nThe dominant term that is neglected in the finite-difference approximation is of $\\mathcal{O}(\\Delta x)$. We also see that the approximation converges to the exact derivative as $\\Delta x \\rightarrow 0$. That's good news!\nIn summary, the chosen \"forward-time/backward space\" difference scheme is first-order in both space and time: the truncation errors are $\\mathcal{O}(\\Delta t, \\Delta x)$. We'll come back to this!\nNon-linear convection\nLet's move on to the non-linear convection equation, using the same methods as before. The 1-D convection equation is:\n\\begin{equation}\\frac{\\partial u}{\\partial t} + u \\frac{\\partial u}{\\partial x} = 0\\end{equation}\nThe only difference with the linear case is that we've replaced the constant wave speed $c$ by the variable speed $u$. The equation is non-linear because now we have a product of the solution and one of its derivatives: the product $u\\,\\partial u/\\partial x$. This changes everything!\nWe're going to use the same discretization as for linear convection: forward difference in time and backward difference in space. Here is the discretized equation:\n\\begin{equation}\\frac{u_i^{n+1}-u_i^n}{\\Delta t} + u_i^n \\frac{u_i^n-u_{i-1}^n}{\\Delta x} = 0\\end{equation}\nSolving for the only unknown term, $u_i^{n+1}$, gives an equation that can be used to advance in time:\n\\begin{equation}u_i^{n+1} = u_i^n - u_i^n \\frac{\\Delta t}{\\Delta x} (u_i^n - u_{i-1}^n)\\end{equation}\nThere is very little that needs to change from the code written so far. In fact, we'll even use the same square-wave initial condition. But let's re-initialize the variable u with the initial values, and re-enter the numerical parameters here, for convenience (we no longer need $c$, though).", "##problem parameters\nnx = 41\ndx = 2/(nx-1)\nnt = 10 \ndt = .02 \n\n##initial conditions\nu = numpy.ones(nx) \nu[numpy.intersect1d(lbound, ubound)]=2 \n\n", "How does it look?", "pyplot.plot(x, u, color='#003366', ls='--', lw=3)\npyplot.ylim(0,2.5);", "Changing just one line of code in the solution of linear convection, we are able to now get the non-linear solution: the line that corresponds to the discrete equation now has un[i] in the place where before we just had c. So you could write something like:\nPython\nfor n in range(1,nt): \n un = u.copy() \n for i in range(1,nx): \n u[i] = un[i]-un[i]*dt/dx*(un[i]-un[i-1])\nWe're going to be more clever than that and use NumPy to update all values of the spatial grid in one fell swoop. We don't really need to write a line of code that gets executed for each value of $u$ on the spatial grid. Python can update them all at once! Study the code below, and compare it with the one above. Here is a helpful sketch, to illustrate the array operation—also called a \"vectorized\" operation—for $u_i-u_{i-1}$.\n\n<br>\nSketch to explain vectorized stencil operation. Adapted from \"Indices point between elements\" by Nelson Elhage.", "for n in range(1, nt): \n un = u.copy() \n u[1:] = un[1:]-un[1:]*dt/dx*(un[1:]-un[0:-1]) \n u[0] = 1.0\n\npyplot.plot(x, u, color='#003366', ls='--', lw=3)\npyplot.ylim(0,2.5);", "Hmm. That's quite interesting: like in the linear case, we see that we have lost the sharp sides of our initial square wave, but there's more. Now, the wave has also lost symmetry! It seems to be lagging on the rear side, while the front of the wave is steepening. Is this another form of numerical error, do you ask? No! It's physics!\nDig deeper\nThink about the effect of having replaced the constant wave speed $c$ by the variable speed given by the solution $u$. It means that different parts of the wave move at different speeds. Make a sketch of an initial wave and think about where the speed is higher and where it is lower ...\nReferences\n\nElhage, Nelson (2015), \"Indices point between elements\"\n\n\nThe cell below loads the style of the notebook.", "from IPython.core.display import HTML\ncss_file = 'numericalmoocstyle.css'\nHTML(open(css_file, \"r\").read())" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
geektoni/shogun
doc/ipython-notebooks/ica/bss_audio.ipynb
bsd-3-clause
[ "Blind Source Separation with the Shogun Machine Learning Toolbox\nBy Kevin Hughes\nThis notebook illustrates <a href=\"http://en.wikipedia.org/wiki/Blind_signal_separation\">Blind Source Seperation</a>(BSS) on audio signals using <a href=\"http://en.wikipedia.org/wiki/Independent_component_analysis\">Independent Component Analysis</a> (ICA) in Shogun. We generate a mixed signal and try to seperate it out using Shogun's implementation of ICA & BSS called <a href=\"http://www.shogun-toolbox.org/doc/en/3.0.0/classshogun_1_1Jade.html\">JADE</a>.\nMy favorite example of this problem is known as the cocktail party problem where a number of people are talking simultaneously and we want to separate each persons speech so we can listen to it separately. Now the caveat with this type of approach is that we need as many mixtures as we have source signals or in terms of the cocktail party problem we need as many microphones as people talking in the room.\nLet's get started, this example is going to be in python and the first thing we are going to need to do is load some audio files. To make things a bit easier further on in this example I'm going to wrap the basic scipy wav file reader and add some additional functionality. First I added a case to handle converting stereo wav files back into mono wav files and secondly this loader takes a desired sample rate and resamples the input to match. This is important because when we mix the two audio signals they need to have the same sample rate.", "import numpy as np\nimport os\nSHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')\nfrom scipy.io import wavfile\nfrom scipy.signal import resample\nimport shogun as sg\n\ndef load_wav(filename,samplerate=44100):\n \n # load file\n rate, data = wavfile.read(filename)\n\n # convert stereo to mono\n if len(data.shape) > 1:\n data = data[:,0]/2 + data[:,1]/2\n\n # re-interpolate samplerate \n ratio = float(samplerate) / float(rate)\n data = resample(data, int(len(data) * ratio))\n \n return samplerate, data.astype(np.int16)", "Next we're going to need a way to play the audio files we're working with (otherwise this wouldn't be very exciting at all would it?). In the next bit of code I've defined a wavPlayer class that takes the signal and the sample rate and then creates a nice HTML5 webplayer right inline with the notebook.", "from IPython.display import Audio\nfrom IPython.display import display\ndef wavPlayer(data, rate):\n display(Audio(data, rate=rate))", "Now that we can load and play wav files we actually need some wav files! I found the sounds from Starcraft to be a great source of wav files because they're short, interesting and remind me of my childhood. You can download Starcraft wav files here: http://wavs.unclebubby.com/computer/starcraft/ among other places on the web or from your Starcraft install directory (come on I know its still there).\nAnother good source of data (although lets be honest less cool) is ICA central and various other more academic data sets: http://perso.telecom-paristech.fr/~cardoso/icacentral/base_multi.html. Note that for lots of these data sets the data will be mixed already so you'll be able to skip the next few steps.\nOkay lets load up an audio file. I chose the Terran Battlecruiser saying \"Good Day Commander\". In addition to the creating a wavPlayer I also plotted the data using Matplotlib (and tried my best to have the graph length match the HTML player length). Have a listen!", "# change to the shogun-data directory\nimport os\nos.chdir(os.path.join(SHOGUN_DATA_DIR, 'ica'))\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\n# load\nfs1,s1 = load_wav('tbawht02.wav') # Terran Battlecruiser - \"Good day, commander.\"\n\n# plot\nplt.figure(figsize=(6.75,2))\nplt.plot(s1)\nplt.title('Signal 1')\nplt.show()\n\n# player\nwavPlayer(s1, fs1)", "Now let's load a second audio clip:", "# load\nfs2,s2 = load_wav('TMaRdy00.wav') # Terran Marine - \"You want a piece of me, boy?\"\n\n# plot\nplt.figure(figsize=(6.75,2))\nplt.plot(s2)\nplt.title('Signal 2')\nplt.show()\n\n# player\nwavPlayer(s2, fs2)", "and a third audio clip:", "# load\nfs3,s3 = load_wav('PZeRdy00.wav') # Protoss Zealot - \"My life for Aiur!\"\n\n# plot\nplt.figure(figsize=(6.75,2))\nplt.plot(s3)\nplt.title('Signal 3')\nplt.show()\n\n# player\nwavPlayer(s3, fs3)", "Now we've got our audio files loaded up into our example program. The next thing we need to do is mix them together!\nFirst another nuance - what if the audio clips aren't the same lenth? The solution I came up with for this was to simply resize them all to the length of the longest signal, the extra length will just be filled with zeros so it won't affect the sound.\nThe signals are mixed by creating a mixing matrix $A$ and taking the dot product of $A$ with the signals $S$.\nAfterwards I plot the mixed signals and create the wavPlayers, have a listen!", "# Adjust for different clip lengths\nfs = fs1\nlength = max([len(s1), len(s2), len(s3)])\ns1 = np.resize(s1, (length,1))\ns2 = np.resize(s2, (length,1))\ns3 = np.resize(s3, (length,1))\n\nS = (np.c_[s1, s2, s3]).T\n\n# Mixing Matrix\n#A = np.random.uniform(size=(3,3))\n#A = A / A.sum(axis=0)\nA = np.array([[1, 0.5, 0.5],\n [0.5, 1, 0.5], \n [0.5, 0.5, 1]]) \nprint('Mixing Matrix:')\nprint(A.round(2))\n\n# Mix Signals\nX = np.dot(A,S)\n\n# Mixed Signal i\nfor i in range(X.shape[0]):\n plt.figure(figsize=(6.75,2))\n plt.plot((X[i]).astype(np.int16))\n plt.title('Mixed Signal %d' % (i+1))\n plt.show()\n wavPlayer((X[i]).astype(np.int16), fs)", "Now before we can work on separating these signals we need to get the data ready for Shogun, thankfully this is pretty easy!", "# Convert to features for shogun\nmixed_signals = sg.create_features((X).astype(np.float64))", "Now lets unmix those signals!\nIn this example I'm going to use an Independent Component Analysis (ICA) algorithm called JADE. JADE is one of the ICA algorithms available in Shogun and it works by performing Aproximate Joint Diagonalization (AJD) on a 4th order cumulant tensor. I'm not going to go into a lot of detail on how JADE works behind the scenes but here is the reference for the original paper:\nCardoso, J. F., & Souloumiac, A. (1993). Blind beamforming for non-Gaussian signals. In IEE Proceedings F (Radar and Signal Processing) (Vol. 140, No. 6, pp. 362-370). IET Digital Library.\nShogun also has several other ICA algorithms including the Second Order Blind Identification (SOBI) algorithm, FFSep, JediSep, UWedgeSep and FastICA. All of the algorithms inherit from the ICAConverter base class and share some common methods for setting an intial guess for the mixing matrix, retrieving the final mixing matrix and getting/setting the number of iterations to run and the desired convergence tolerance. Some of the algorithms have additional getters for intermediate calculations, for example Jade has a method for returning the 4th order cumulant tensor while the \"Sep\" algorithms have a getter for the time lagged covariance matrices. Check out the source code on GitHub (https://github.com/shogun-toolbox/shogun) or the Shogun docs (http://www.shogun-toolbox.org/doc/en/latest/annotated.html) for more details!", "# Separating with JADE\njade = sg.create_transformer('Jade')\njade.fit(mixed_signals)\nsignals = jade.transform(mixed_signals)\n\nS_ = signals.get('feature_matrix')\n\nA_ = jade.get('mixing_matrix')\nA_ = A_ / A_.sum(axis=0)\nprint('Estimated Mixing Matrix:')\nprint(A_)", "Thats all there is to it! Check out how nicely those signals have been separated and have a listen!", "# Show separation results\n\n# Separated Signal i\ngain = 4000\nfor i in range(S_.shape[0]):\n plt.figure(figsize=(6.75,2))\n plt.plot((gain*S_[i]).astype(np.int16))\n plt.title('Separated Signal %d' % (i+1))\n plt.show()\n wavPlayer((gain*S_[i]).astype(np.int16), fs)", "BSS isn't only useful for working with Audio, it is also useful for image processing and pre-processing other forms of high dimensional data. Have a google for ICA and machine learning if you want to learn more!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
schoolie/bokeh
examples/howto/server_embed/notebook_embed.ipynb
bsd-3-clause
[ "Embedding a Bokeh server in a Notebook\nThis notebook shows how a Bokeh server application can be embedded inside a Jupyter notebook.", "import pandas as pd\nimport yaml\n\nfrom bokeh.layouts import column\nfrom bokeh.models import ColumnDataSource, Slider\nfrom bokeh.plotting import figure\nfrom bokeh.themes import Theme\nfrom bokeh.io import show, output_notebook\n\noutput_notebook()", "There are various application handlers that can be used to build up Bokeh documents. For example, there is a ScriptHandler that uses the code from a .py file to produce Bokeh documents. This is the handler that is used when we run bokeh serve app.py. Here we are going to use the lesser-known FunctionHandler, that gets configured with a plain Python function to build up a document. \nHere is the function modify_doc(doc) that defines our app:", "def modify_doc(doc):\n data_url = \"http://www.neracoos.org/erddap/tabledap/B01_sbe37_all.csvp?time,temperature&depth=1&temperature_qc=0&time>=2016-02-15&time<=2017-03-22\"\n df = pd.read_csv(data_url, parse_dates=True, index_col=0)\n df = df.rename(columns={'temperature (celsius)': 'temperature'})\n df.index.name = 'time'\n\n source = ColumnDataSource(data=df)\n\n plot = figure(x_axis_type='datetime', y_range=(0, 25),\n y_axis_label='Temperature (Celsius)',\n title=\"Sea Surface Temperature at 43.18, -70.43\")\n plot.line('time', 'temperature', source=source)\n\n def callback(attr, old, new):\n if new == 0:\n data = df\n else:\n data = df.rolling('{0}D'.format(new)).mean()\n source.data = ColumnDataSource(data=data).data\n\n slider = Slider(start=0, end=30, value=0, step=1, title=\"Smoothing by N Days\")\n slider.on_change('value', callback)\n\n doc.add_root(column(slider, plot))\n\n doc.theme = Theme(json=yaml.load(\"\"\"\n attrs:\n Figure:\n background_fill_color: \"#DDDDDD\"\n outline_line_color: white\n toolbar_location: above\n height: 500\n width: 800\n Grid:\n grid_line_dash: [6, 4]\n grid_line_color: white\n \"\"\"))", "We take the function above and configure a FunctionHandler with it. Then we create an Application that uses handler. (It is possible, but uncommon, for Bokeh applications to have more than one handler.) The end result is that the Bokeh server will call modify_doc to build new documents for every new sessions that is opened.", "from bokeh.application.handlers import FunctionHandler\nfrom bokeh.application import Application\n\nhandler = FunctionHandler(modify_doc)\napp = Application(handler)", "Now we can display our application using show:", "show(app)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tensorflow/docs-l10n
site/ko/hub/tutorials/semantic_similarity_with_tf_hub_universal_encoder.ipynb
apache-2.0
[ "Copyright 2018 The TensorFlow Hub Authors.\nLicensed under the Apache License, Version 2.0 (the \"License\");", "# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================", "Universal Sentence Encoder\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://www.tensorflow.org/hub/tutorials/semantic_similarity_with_tf_hub_universal_encoder\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\">TensorFlow.org에서 보기</a></td>\n <td> <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/hub/tutorials/semantic_similarity_with_tf_hub_universal_encoder.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\">Google Colab에서 실행</a> </td>\n <td> <a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ko/hub/tutorials/semantic_similarity_with_tf_hub_universal_encoder.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\">GitHub에서 보기</a> </td>\n <td><a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/hub/tutorials/semantic_similarity_with_tf_hub_universal_encoder.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\">노트북 다운로드하기</a></td>\n <td> <a href=\"https://tfhub.dev/s?q=google%2Funiversal-sentence-encoder%2F4%20OR%20google%2Funiversal-sentence-encoder-large%2F5\"><img src=\"https://www.tensorflow.org/images/hub_logo_32px.png\">TF Hub 모델 보기</a> </td>\n</table>\n\n이 노트북은 Universal Sentence Encoder에 액세스하고 이를 문장 유사성 및 문장 분류 작업에 사용하는 방법을 보여줍니다.\nUniversal Sentence Encoder를 사용하면 기존에 개별 단어에 대한 임베딩을 조회하는 것처럼 쉽게 문장 수준 임베딩을 얻을 수 있습니다. 그러면 문장 임베딩을 간단히 사용하여 문장 수준의 의미론적 유사성을 계산할 수 있을 뿐만 아니라 감독되지 않은 더 적은 훈련 데이터를 사용하여 다운스트림 분류 작업의 성능을 높일 수 있습니다.\n설정\n이 섹션에서는 TF 허브의 Universal Sentence Encoder에 액세스하기 위한 환경을 설정하고 인코더를 단어, 문장 및 단락에 적용하는 예를 제공합니다.", "%%capture\n!pip3 install seaborn", "Tensorflow 설치에 대한 자세한 내용은 https://www.tensorflow.org/install/에서 찾을 수 있습니다.", "#@title Load the Universal Sentence Encoder's TF Hub module\nfrom absl import logging\n\nimport tensorflow as tf\n\nimport tensorflow_hub as hub\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport re\nimport seaborn as sns\n\nmodule_url = \"https://tfhub.dev/google/universal-sentence-encoder/4\" #@param [\"https://tfhub.dev/google/universal-sentence-encoder/4\", \"https://tfhub.dev/google/universal-sentence-encoder-large/5\"]\nmodel = hub.load(module_url)\nprint (\"module %s loaded\" % module_url)\ndef embed(input):\n return model(input)\n\n#@title Compute a representation for each message, showing various lengths supported.\nword = \"Elephant\"\nsentence = \"I am a sentence for which I would like to get its embedding.\"\nparagraph = (\n \"Universal Sentence Encoder embeddings also support short paragraphs. \"\n \"There is no hard limit on how long the paragraph is. Roughly, the longer \"\n \"the more 'diluted' the embedding will be.\")\nmessages = [word, sentence, paragraph]\n\n# Reduce logging output.\nlogging.set_verbosity(logging.ERROR)\n\nmessage_embeddings = embed(messages)\n\nfor i, message_embedding in enumerate(np.array(message_embeddings).tolist()):\n print(\"Message: {}\".format(messages[i]))\n print(\"Embedding size: {}\".format(len(message_embedding)))\n message_embedding_snippet = \", \".join(\n (str(x) for x in message_embedding[:3]))\n print(\"Embedding: [{}, ...]\\n\".format(message_embedding_snippet))", "의미론적 텍스트 유사성 작업 예\nUniversal Sentence Encoder에 의해 생성된 임베딩은 대략적으로 정규화됩니다. 두 문장의 의미론적 유사성은 인코딩의 내적으로 간편하게 계산될 수 있습니다.", "def plot_similarity(labels, features, rotation):\n corr = np.inner(features, features)\n sns.set(font_scale=1.2)\n g = sns.heatmap(\n corr,\n xticklabels=labels,\n yticklabels=labels,\n vmin=0,\n vmax=1,\n cmap=\"YlOrRd\")\n g.set_xticklabels(labels, rotation=rotation)\n g.set_title(\"Semantic Textual Similarity\")\n\ndef run_and_plot(messages_):\n message_embeddings_ = embed(messages_)\n plot_similarity(messages_, message_embeddings_, 90)", "시각화된 유사성\n여기서는 히트 맵으로 유사성을 나타냅니다. 최종 그래프는 9x9 행렬이며, 각 항 [i, j]는 문장 i 및 j에 대한 인코딩의 내적을 바탕으로 색상이 지정됩니다.", "messages = [\n # Smartphones\n \"I like my phone\",\n \"My phone is not good.\",\n \"Your cellphone looks great.\",\n\n # Weather\n \"Will it snow tomorrow?\",\n \"Recently a lot of hurricanes have hit the US\",\n \"Global warming is real\",\n\n # Food and health\n \"An apple a day, keeps the doctors away\",\n \"Eating strawberries is healthy\",\n \"Is paleo better than keto?\",\n\n # Asking about age\n \"How old are you?\",\n \"what is your age?\",\n]\n\nrun_and_plot(messages)\n ", "평가: 의미론적 텍스트 유사성(STS) 벤치마크\nSTS 벤치마크는 문장 임베딩을 사용하여 계산된 유사성 점수가 사람의 판단과 일치하는 정도에 대한 내재적 평가를 제공합니다. 벤치마크를 위해 시스템이 다양한 문장 쌍 선택에 대한 유사성 점수를 반환해야 합니다. 그런 다음 Pearson 상관 관계를 사용하여 사람의 판단에 대한 머신 유사성 점수의 품질을 평가합니다.\n데이터 다운로드하기", "import pandas\nimport scipy\nimport math\nimport csv\n\nsts_dataset = tf.keras.utils.get_file(\n fname=\"Stsbenchmark.tar.gz\",\n origin=\"http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz\",\n extract=True)\nsts_dev = pandas.read_table(\n os.path.join(os.path.dirname(sts_dataset), \"stsbenchmark\", \"sts-dev.csv\"),\n error_bad_lines=False,\n skip_blank_lines=True,\n usecols=[4, 5, 6],\n names=[\"sim\", \"sent_1\", \"sent_2\"])\nsts_test = pandas.read_table(\n os.path.join(\n os.path.dirname(sts_dataset), \"stsbenchmark\", \"sts-test.csv\"),\n error_bad_lines=False,\n quoting=csv.QUOTE_NONE,\n skip_blank_lines=True,\n usecols=[4, 5, 6],\n names=[\"sim\", \"sent_1\", \"sent_2\"])\n# cleanup some NaN values in sts_dev\nsts_dev = sts_dev[[isinstance(s, str) for s in sts_dev['sent_2']]]", "문장 임베딩 평가하기", "sts_data = sts_dev #@param [\"sts_dev\", \"sts_test\"] {type:\"raw\"}\n\ndef run_sts_benchmark(batch):\n sts_encode1 = tf.nn.l2_normalize(embed(tf.constant(batch['sent_1'].tolist())), axis=1)\n sts_encode2 = tf.nn.l2_normalize(embed(tf.constant(batch['sent_2'].tolist())), axis=1)\n cosine_similarities = tf.reduce_sum(tf.multiply(sts_encode1, sts_encode2), axis=1)\n clip_cosine_similarities = tf.clip_by_value(cosine_similarities, -1.0, 1.0)\n scores = 1.0 - tf.acos(clip_cosine_similarities) / math.pi\n \"\"\"Returns the similarity scores\"\"\"\n return scores\n\ndev_scores = sts_data['sim'].tolist()\nscores = []\nfor batch in np.array_split(sts_data, 10):\n scores.extend(run_sts_benchmark(batch))\n\npearson_correlation = scipy.stats.pearsonr(scores, dev_scores)\nprint('Pearson correlation coefficient = {0}\\np-value = {1}'.format(\n pearson_correlation[0], pearson_correlation[1]))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
musketeer191/job_analytics
.ipynb_checkpoints/user_apply_job-checkpoint.ipynb
gpl-3.0
[ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom time import time\n\nfrom my_util import *\nfrom job_rec_helpers import *", "HELPERS:", "# Global vars\nDATA_DIR = 'D:/larc_projects/job_analytics/data/clean/'\nRES_DIR = 'd:/larc_projects/job_analytics/results/'\nAGG_DIR = RES_DIR + 'agg/'\nFIG_DIR = RES_DIR + 'figs/'\n\napps = pd.read_csv(DATA_DIR + 'apps_with_time.csv')\napps.shape\n\n# Rm noise (numbers) in job_title column\napps['is_number'] = map(is_number, apps['job_title'])\napps = apps.query('is_number == False')\n\n# del apps['user.id']; del apps['user.index']; del apps['item.index']; del apps['freq']\napps.rename(columns={'job.id': 'job_id', 'job.title': 'job_title', 'apply.date': 'apply_date'}, \n inplace=True)\n\napps.to_csv(DATA_DIR + 'apps_with_time.csv', index=False)\napps.head(3)", "Basic statistics", "n_applicant = apps['uid'].nunique(); n_application = apps.shape[0]\nn_job = len(np.unique(apps['job_id'])); n_job_title = len(np.unique(apps['job_title']))\nn_company = posts['company_registration_number_uen_ep'].nunique()\n\nstats = pd.DataFrame({'n_application': n_application, 'n_applicant': n_applicant, \n 'n_job': n_job, 'n_job_title': n_job_title, 'n_company': n_company}, index=[0])\nstats\n\nstats.to_csv(DATA_DIR + 'stats/stats.csv', index=False)", "Applicant-apply-Job matrix\nA. Number of times an applicant applies a specific job title (position).", "agg_apps = apps.groupby(by=['uid', 'job_title']).agg({'job_id': 'nunique', 'apply_date': 'nunique'})\n\n# convert to DF\nagg_apps = agg_apps.add_prefix('n_').reset_index()\nagg_apps['n_apply'] = agg_apps['n_job_id']\nagg_apps.head(3)", "Let's look at the quartiles of the number of times an applicant applies for a specific job.", "quantile(df['n_apply'])", "As expected, for most of the cases (50%), an applicant applies just once for a specific job. \nHowever, we can also see at least 1 extreme case where an applicant applies 582 times for just a job title. Thus, let's look more closely at the distribution of $N_{apply}$.", "plt.hist(df['n_apply'], bins=np.unique(df['n_apply']), log=True)\n\nplt.xlabel(r'$N_{apply}$')\nplt.ylabel('# applicant-job pairs (log scale)')\nplt.savefig(DATA_DIR + 'apply_freq.pdf')\n\nplt.show()\nplt.close()", "From the histogram, we can see that there are cases when a user applies for a job titles at least 100 times. Let's look closer at those extreme cases.\nExtreme cases (a user applies the same job title at least 100 times)", "extremes = agg_apps.query('n_apply >= 100')\nextremes.sort_values(by='n_apply', ascending=False, inplace=True)\nextremes.head()\n\nprint('No. of extreme cases: {}'.format(extremes.shape[0]))", "To get a more complete picture on these extreme cases, let's put in apply dates and companies of those jobs.\n\nGet dates and compute duration of extreme applications:", "# ext_users = np.unique(extremes['uid'])\ndf = apps[apps['uid'].isin(extremes['uid'])]\ndf = df[df['job_title'].isin(extremes['job_title'])]\next_apps = df\n\next_apps.head(1)\n\nres = calDuration(ext_apps)\n\nres = pd.merge(res, extremes, left_index=True, right_on=['uid', 'job_title'])\nres.sort_values(by='uid', inplace=True)\nres = res[['uid', 'job_title', 'n_apply', 'first_apply_date', 'last_apply_date', 'n_active_day', 'total_duration_in_day']]\n\nres.sort_values('n_apply', ascending=False, inplace=True)\nres.head()\n\nres.tail()\n\ntmp = apps.query('uid==33833')[['uid', 'job_title', 'job_id']] .groupby('job_title').agg({'job_id': 'nunique'})\ntmp = tmp.add_prefix('n_').reset_index()\ntmp.rename(columns={'n_job_id': 'n_apply'}, inplace=True)\ntmp.sort_values('n_apply', ascending=False, inplace=True)\n\nquantile(res['n_active_day'])\n\nres.to_csv(RES_DIR + 'extremes.csv')", "Dates/duration of all applications:", "apps_with_duration = calDuration(apps)\n\napps_with_duration.head()\n\nall_res = pd.merge(apps_with_duration, agg_apps, left_index=True, right_on=['uid', 'job_title'])\nall_res.sort_values(by='uid', inplace=True)\nall_res = all_res[['uid', 'job_title', 'n_apply', 'first_apply_date', 'last_apply_date', 'n_active_day', 'total_duration_in_day']]\n\nall_res.head()\n\nall_res.shape\n\nall_res.to_csv(AGG_DIR + 'timed_apps.csv', index=False)\n\nnormal = all_res.query('n_apply < 100')\nextremes = res\n\nplt.figure(figsize=(10,6))\nplt.subplot(1,2,1)\nplt.hist(extremes['n_active_day'], bins=np.unique(extremes['n_active_day']))\nplt.title('Extreme cases')\nplt.xlabel('# active days')\nplt.ylabel('# user-apply-job cases')\n\nplt.subplots_adjust(wspace=.5)\nplt.subplot(1,2,2)\nplt.hist(normal['n_active_day'], bins=np.unique(normal['n_active_day']), \n log=True)\nplt.title('Normal cases')\nplt.xlabel('# active days')\nplt.ylabel('# user-apply-job cases')\n\nplt.savefig(RES_DIR + 'n_active_day.pdf')\nplt.show()\nplt.close()", "B. Number of different job titles an applicant applies", "agg_job_title = apps[['uid', 'job_title']].groupby('uid').agg({'job_title': 'nunique'})\nagg_job_title = agg_job_title.add_prefix('n_').reset_index()\n\nagg_job_title.sort_values('n_job_title', ascending=False, inplace=True)\n# agg_job_title.head()\n\nagg_job_id = apps[['uid', 'job_id']].groupby('uid').agg({'job_id': 'nunique'})\nagg_job_id = agg_job_id.add_prefix('n_').reset_index()\nagg_job_id.sort_values('n_job_id', ascending=False, inplace=True)\n\nagg_df = pd.merge(agg_job_title, agg_job_id)\n\nagg_df.rename(columns={'n_job_id': 'n_job'}, inplace=True)\nagg_df.head()\n\nplt.close('all')\n\nfig = plt.figure(figsize=(10,6))\nplt.subplot(1,2,1)\nloglog(agg_df['n_job_title'], xl='# Job titles applied', yl='# applicants')\nplt.subplots_adjust(wspace=.5)\n\nplt.subplot(1,2,2)\nloglog(agg_df['n_job'], xl='# Jobs applied', yl='# applicants')\n\nplt.savefig(FIG_DIR + 'figs/applied_jobs.pdf')\nplt.show()\nplt.close()\n\nprint apps.shape[0]\n\n# Join all job titles of each user for reference\nt0 = time()\ntmp = apps[['uid', 'job_title']].groupby('uid').agg({'job_title': paste})\nprint('Finished joining job titles after {}s'.format(time()-t0))\ntmp = tmp.add_suffix('s').reset_index()\n\napps_by_job_title = pd.merge(apps_by_job_title, tmp)\napps_by_job_title.sort_values('n_job_title', ascending=False, inplace=True)\n\napps_by_job_title.to_csv(AGG_DIR + 'apps_by_job_title.csv', index=False)", "C. Number of company an applicant applies\nMerge necessary files to get a full dataset", "posts = pd.read_csv(DATA_DIR + 'full_job_posts.csv')\nprint(posts.shape)\nposts = dot2dash(posts)\n\nposts.head()\n\n# Extract just job id and employer id\njob_and_employer = posts[['job_id', 'company_registration_number_uen_ep']].drop_duplicates()\njob_and_employer.head(1)\n\n# Load employer details (names, desc,...)\nemployer_detail = pd.read_csv(DATA_DIR + 'employers.csv')\nemployer_detail.drop_duplicates(inplace=True)\nprint(employer_detail.shape)\n\nemployer_detail = dot2dash(employer_detail)\nemployer_detail.head(1)\n\n# Merge to add employer details\njob_and_employer = job_and_employer.rename(columns={'company_registration_number_uen_ep': 'reg_no_uen_ep'})\ndf = pd.merge(apps, pd.merge(job_and_employer, employer_detail))\nprint(df.shape)\n\ndf.sort_values(by='organisation_name_ep', inplace=True, na_position='first')\ndf.head()\n\n# del df['is_number']\n# df.to_csv(DATA_DIR + 'full_apps.csv', index=False)\ndf.head()\n\nuser_apply_comp = df[['uid', 'reg_no_uen_ep', 'organisation_name_ep']]\nuser_apply_comp['n_apply'] = ''\n\napps_by_comp = user_apply_comp.groupby(['uid', 'reg_no_uen_ep', 'organisation_name_ep']).count()\napps_by_comp = apps_by_comp.reset_index()\napps_by_comp.sort_values('n_apply', ascending=False, inplace=True)\napps_by_comp.head()\n\napps_by_comp.to_csv(AGG_DIR + 'apps_by_comp.csv', index=False)\n\nloglog(apps_by_comp['n_apply'], xl='# applications', yl='# user-apply-company cases')\n\nplt.savefig(FIG_DIR + 'user_comp.pdf')\nplt.show()\nplt.close()\n\nquantile(user_apply_comp['n_apply'])", "D. Number of (job title, company) an applicant applies", "tmp = df[['uid', 'job_title', 'reg_no_uen_ep', 'organisation_name_ep']]\ntmp['n_apply'] = ''\n\napps_by_job_comp = tmp.groupby(['uid', 'job_title', 'reg_no_uen_ep', 'organisation_name_ep']).count()\napps_by_job_comp = apps_by_job_comp.reset_index()\napps_by_job_comp.sort_values('n_apply', ascending=False, inplace=True)\nprint apps_by_job_comp.shape\napps_by_job_comp.head()\n\napps_by_job_comp.to_csv(AGG_DIR + 'apps_by_job_comp.csv', index=False)\n\nloglog(apps_by_job_comp['n_apply'], xl='# applications', yl='# user-apply-job-at-company cases')\nplt.savefig(FIG_DIR + 'user_job_comp.pdf')\nplt.show()\nplt.close()\n\njob_comp = df[['job_title', 'organisation_name_ep']].drop_duplicates()\njob_comp.shape" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
felipessalvatore/CNNexample
src/tutorials/notMNIST.ipynb
mit
[ "CNN Example: notMNIST dataset\nThe notMNIST dataset is a example similar to MNIST. The data consists of characters rendered in a variety of fonts on a 28x28 image. The labels are limited to 'A' through 'J' (10 classes).", "import os\nimport sys\nimport tensorflow as tf\nimport inspect\nimport matplotlib.pyplot as plt\nimport numpy as np\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0, parentdir)\n\nfrom util import get_data_4d, plot9images,randomize_in_place\nfrom CNN import CNNModel, train_model,check_test,one_prediction\nfrom DataHolder import DataHolder\nfrom Config import Config\n", "Importing all the data", "train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels = get_data_4d()\nprint('Training:', train_dataset.shape, train_labels.shape)\nprint('Validation:', valid_dataset.shape, valid_labels.shape)\nprint('Testing:', test_dataset.shape, test_labels.shape)", "Visualizing some examples", "train_classes = np.argmax(train_labels, axis=1)\ntrain_classes = [chr(i + ord('A')) for i in train_classes]\nimg_size = 28\nimg_shape = (img_size, img_size)\nimages = train_dataset[0:9]\ncls_true = train_classes[0:9]\nplot9images(images, cls_true, img_shape) ", "The hyperparameters of the model are", "my_config = Config()\nprint(\"batch_size = {}\".format(my_config.batch_size))\nprint(\"patch_size = {}\".format(my_config.patch_size))\nprint(\"image_size = {}\".format(my_config.image_size))\nprint(\"num_labels = {}\".format(my_config.num_labels))\nprint(\"num_channels = {}\".format(my_config.num_channels))\nprint(\"num_filters_1 = {}\".format(my_config.num_filters_1))\nprint(\"num_filters_2 = {}\".format(my_config.num_filters_2))\nprint(\"hidden_nodes_1 = {}\".format(my_config.hidden_nodes_1))\nprint(\"hidden_nodes_2 = {}\".format(my_config.hidden_nodes_2))\nprint(\"hidden_nodes_3 = {}\".format(my_config.hidden_nodes_3))\nprint(\"learning_rate = {}\".format(my_config.learning_rate))\nprint(\"steps_for_decay = {}\".format(my_config.steps_for_decay))\nprint(\"decay_rate = {}\".format(my_config.decay_rate))", "Now, training the model using 10001 steps", "my_dataholder = DataHolder(train_dataset,\n train_labels,\n valid_dataset,\n valid_labels,\n test_dataset,\n test_labels)\nmy_model = CNNModel(my_config, my_dataholder)\ntrain_model(my_model, my_dataholder, num_steps=10001, show_step=1000)", "Cheking the trained model with the test dataset", "print(\"Test accuracy: %.2f%%\" % (check_test(my_model) * 100))", "Seeing the model perform in 9 images from the valid dataset", "randomize_in_place(valid_dataset, valid_labels, 0)\nvalid_classes = np.argmax(valid_labels, axis=1)\nvalid_classes = [chr(i + ord('A')) for i in valid_classes]\ncls_true = valid_classes[0:9]\nimages = valid_dataset[0:9]\nimages = [image.reshape(1,\n image.shape[0],\n image.shape[1],\n image.shape[2]) for image in images]\npredictions = [chr(one_prediction(my_model, image) + ord('A')) for image in images]\nplot9images(images, cls_true, img_shape) \n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
junhwanjang/DataSchool
Lecture/11. 추정 및 검정/6) MLE 모수 추정의 예.ipynb
mit
[ "MLE 모수 추정의 예\n베르누이 분포의 모수 추정\n\n각각의 시도 $x_i$에 대한 확률은 베르누이 분포\n\n$$ P(x | \\theta ) = \\text{Bern}(x | \\theta ) = \\theta^x (1 - \\theta)^{1-x}$$\n\n\n샘플이 $N$개 있는 경우, Likelihood\n$$ L = P(x_{1:N}|\\theta) = \\prod_{i=1}^N \\theta^{x_i} (1 - \\theta)^{1-x_i} $$\n\n\nLog-Likelihood\n$$ \n\\begin{eqnarray}\n\\log L \n&=& \\log P(x_{1:N}|\\theta) \\\n&=& \\sum_{i=1}^N \\big{ {x_i} \\log\\theta + (1-x_i)\\log(1 - \\theta) \\big} \\\n&=& \\sum_{i=1}^N {x_i} \\log\\theta + \\left( N-\\sum_{i=1}^N x_i \\right) \\log( 1 - \\theta ) \\\n\\end{eqnarray}\n$$\n\n\n$x = 1$(성공) 또는 $x= 0$ (실패) 이므로 \n\n전체 시도 횟수 $N$\n\n그 중 성공 횟수 $N_1 = \\sum_{i=1}^N {x_i}$\n\n\n따라서 Log-Likelihood는 \n$$ \n\\begin{eqnarray}\n\\log L \n&=& N_1 \\log\\theta + (N-N_1) \\log(1 - \\theta) \\\n\\end{eqnarray}\n$$\n\n\nLog-Likelihood Derivative\n\n\n$$\n\\begin{eqnarray}\n\\dfrac{\\partial \\log L}{\\partial \\theta} \n&=& \\dfrac{\\partial}{\\partial \\theta} \\big{ N_1 \\log\\theta + (N-N_1) \\log(1 - \\theta) \\big} = 0\\\n&=& \\dfrac{N_1}{\\theta} - \\dfrac{N-N_1}{1-\\theta} = 0 \\\n\\end{eqnarray}\n$$\n$$\n\\dfrac{N_1}{\\theta} = \\dfrac{N-N_1}{1-\\theta}\n$$\n$$\n\\dfrac{1-\\theta}{\\theta} = \\dfrac{N-N_1}{N_1} \n$$\n$$\n\\dfrac{1}{\\theta} - 1 = \\dfrac{N}{N_1} - 1\n$$\n$$\n\\theta= \\dfrac{N_1}{N}\n$$", "theta0 = 0.6\nx = sp.stats.bernoulli(theta0).rvs(1000)\nN0, N1 = np.bincount(x, minlength=2)\nN = N0 + N1\ntheta = N1/N\ntheta", "카테고리 분포의 모수 추정\n\n각각의 시도 $x_i$에 대한 확률은 카테고리 분포\n\n$$ P(x | \\theta ) = \\text{Cat}(x | \\theta) = \\prod_{k=1}^K \\theta_k^{x_k} $$\n$$ \\sum_{k=1}^K \\theta_k = 1 $$\n\n\n샘플이 $N$개 있는 경우, Likelihood\n$$ L = P(x_{1:N}|\\theta) = \\prod_{i=1}^N \\prod_{k=1}^K \\theta_k^{x_{i,k}} $$\n\n\nLog-Likelihood\n$$ \n\\begin{eqnarray}\n\\log L \n&=& \\log P(x_{1:N}|\\theta) \\\n&=& \\sum_{i=1}^N \\sum_{k=1}^K {x_{i,k}} \\log\\theta_k \\\n&=& \\sum_{k=1}^K \\log\\theta_k \\sum_{i=1}^N {x_{i,k}}\n\\end{eqnarray}\n$$\n\n\n$x_k$가 나온 횟수 $N_k = \\sum_{i=1}^N {x_{i,k}}$이라고 표시\n\n\n따라서 Log-Likelihood는 \n$$ \n\\begin{eqnarray}\n\\log L \n&=& \\sum_{k=1}^K \\log\\theta_k N_k\n\\end{eqnarray}\n$$\n\n\n추가 조건\n$$ \\sum_{k=1}^K \\theta_k = 1 $$\n\n\nLog-Likelihood Derivative with Lagrange multiplier\n\n\n$$\n\\begin{eqnarray}\n\\dfrac{\\partial \\log L}{\\partial \\theta_k} \n&=& \\dfrac{\\partial}{\\partial \\theta_k} \\left{ \\sum_{k=1}^K \\log\\theta_k N_k + \\lambda \\left(1- \\sum_{k=1}^K \\theta_k\\right) \\right} = 0 \\\n\\dfrac{\\partial \\log L}{\\partial \\lambda} \n&=& \\dfrac{\\partial}{\\partial \\lambda} \\left{ \\sum_{k=1}^K \\log\\theta_k N_k + \\lambda \\left(1- \\sum_{k=1}^K \\theta_k \\right) \\right} = 0\\\n\\end{eqnarray}\n$$\n$$\n\\dfrac{N_1}{\\theta_1} = \\dfrac{N_2}{\\theta_2} = \\cdots = \\dfrac{N_K}{\\theta_K} = \\lambda\n$$\n$$\n\\sum_{k=1}^K N_k = N\n$$\n$$\n\\lambda \\sum_{k=1}^K \\theta_k = \\lambda = N\n$$\n$$\n\\theta_k = \\dfrac{N_k}{N}\n$$", "theta0 = np.array([0.1, 0.3, 0.6])\nx = np.random.choice(np.arange(3), 1000, p=theta0)\nN0, N1, N2 = np.bincount(x, minlength=3)\nN = N0 + N1 + N2\ntheta = np.array([N0, N1, N2]) / N\ntheta", "정규 분포의 모수 추정\n\n각각의 시도 $x_i$에 대한 확률은 가우시안 정규 분포\n\n$$ P(x | \\theta ) = N(x | \\mu, \\sigma^2) = \\dfrac{1}{\\sqrt{2\\pi\\sigma^2}} \\exp \\left(-\\dfrac{(x-\\mu)^2}{2\\sigma^2}\\right) $$\n\n\n샘플이 $N$개 있는 경우, Likelihood\n$$ L = P(x_{1:N}|\\theta) = \\prod_{i=1}^N \\dfrac{1}{\\sqrt{2\\pi\\sigma^2}} \\exp \\left(-\\dfrac{(x_i-\\mu)^2}{2\\sigma^2}\\right)$$\n\n\nLog-Likelihood\n$$ \n\\begin{eqnarray}\n\\log L \n&=& \\log P(x_{1:N}|\\theta) \\\n&=& \\sum_{i=1}^N \\left{ -\\dfrac{1}{2}\\log(2\\pi\\sigma^2) - \\dfrac{(x_i-\\mu)^2}{2\\sigma^2} \\right} \\\n&=& -\\dfrac{N}{2} \\log(2\\pi\\sigma^2) - \\dfrac{1}{2\\sigma^2}\\sum_{i=1}^N (x_i-\\mu)^2\n\\end{eqnarray}\n$$\n\n\nLog-Likelihood Derivative\n\n\n$$\n\\begin{eqnarray}\n\\dfrac{\\partial \\log L}{\\partial \\mu} \n&=& \\dfrac{\\partial}{\\partial \\mu} \\left{ \\dfrac{N}{2} \\log(2\\pi\\sigma^2) + \\dfrac{1}{2\\sigma^2}\\sum_{i=1}^N (x_i-\\mu)^2 \\right} = 0 \\\n\\dfrac{\\partial \\log L}{\\partial \\sigma^2} \n&=& \\dfrac{\\partial}{\\partial \\sigma^2} \\left{ \\dfrac{N}{2} \\log(2\\pi\\sigma^2) + \\dfrac{1}{2\\sigma^2}\\sum_{i=1}^N (x_i-\\mu)^2 \\right} = 0\\\n\\end{eqnarray}\n$$\n$$\n\\dfrac{2}{2\\sigma^2}\\sum_{i=1}^N (x_i-\\mu) = 0\n$$\n$$\nN \\mu = \\sum_{i=1}^N x_i\n$$\n$$\n\\mu = \\dfrac{1}{N}\\sum_{i=1}^N x_i = \\bar{x}\n$$\n$$\n\\dfrac{N}{2\\sigma^2 } - \\dfrac{1}{2(\\sigma^2)^2}\\sum_{i=1}^N (x_i-\\mu)^2 = 0\n$$\n$$\n\\sigma^2 = \\dfrac{1}{N}\\sum_{i=1}^N (x_i-\\mu)^2 = \\dfrac{1}{N}\\sum_{i=1}^N (x_i-\\bar{x})^2 = s^2\n$$", "mu0 = 1\nsigma0 = 2\nx = sp.stats.norm(mu0, sigma0).rvs(1000)\nxbar = x.mean()\ns2 = x.std(ddof=1)\nxbar, s2", "다변수 정규 분포의 모수 추정\nMLE for Multivariate Gaussian Normal Distribution\n\n각각의 시도 $x_i$에 대한 확률은 다변수 정규 분포\n\n$$ P(x | \\theta ) = N(x | \\mu, \\Sigma) = \\dfrac{1}{(2\\pi)^{D/2} |\\Sigma|^{1/2}} \\exp \\left( -\\dfrac{1}{2} (x-\\mu)^T \\Sigma^{-1} (x-\\mu) \\right) $$\n\n\n샘플이 $N$개 있는 경우, Likelihood\n$$ L = P(x_{1:N}|\\theta) = \\prod_{i=1}^N \\dfrac{1}{(2\\pi)^{D/2} |\\Sigma|^{1/2}} \\exp \\left( -\\dfrac{1}{2} (x_i-\\mu)^T \\Sigma^{-1} (x_i-\\mu) \\right)$$\n\n\nLog-Likelihood\n$$ \n\\begin{eqnarray}\n\\log L \n&=& \\log P(x_{1:N}|\\theta) \\\n&=& \\sum_{i=1}^N \\left{ -\\log((2\\pi)^{D/2} |\\Sigma|^{1/2}) - \\dfrac{1}{2} (x-\\mu)^T \\Sigma^{-1} (x-\\mu) \\right} \\\n&=& C -\\dfrac{N}{2} \\log|\\Sigma| - \\dfrac{1}{2} \\sum (x-\\mu)^T \\Sigma^{-1} (x-\\mu) \n\\end{eqnarray}\n$$\n\n\nprecision matrix $\\Lambda = \\Sigma^{-1}$\n\n\n$$ \n\\begin{eqnarray}\n\\log L \n&=& C + \\dfrac{N}{2} \\log|\\Lambda| - \\dfrac{1}{2} \\sum(x-\\mu)^T \\Lambda (x-\\mu) \n\\end{eqnarray}\n$$\n$$ \\dfrac{\\partial L}{\\partial \\mu} = - \\dfrac{\\partial}{\\partial \\mu} \\sum_{i=1}^N (x_i-\\mu)^T \\Lambda (x_i-\\mu) = \\sum_{i=1}^N 2\\Lambda (x_i - \\mu) = 0 $$\n$$ \\mu = \\dfrac{1}{N}\\sum_{i=1}^N x_i $$\n$$ \\dfrac{\\partial L}{\\partial \\Lambda} = \\dfrac{\\partial}{\\partial \\Lambda} \\dfrac{N}{2} \\log|\\Lambda| - \\dfrac{\\partial}{\\partial \\Lambda} \\dfrac{1}{2} \\sum_{i=1}^N \\text{tr}( (x_i-\\mu)(x_i-\\mu)^T\\Lambda) =0 $$\n$$ \\dfrac{N}{2} \\Lambda^{-T} = \\dfrac{1}{2}\\sum_{i=1}^N (x_i-\\mu)(x_i-\\mu)^T $$ \n$$ \\Sigma = \\dfrac{1}{N}\\sum_{i=1}^N (x_i-\\mu)(x_i-\\mu)^T $$", "mu0 = np.array([0, 1])\nsigma0 = np.array([[1, 0.2], [0.2, 4]])\nx = sp.stats.multivariate_normal(mu0, sigma0).rvs(1000)\nxbar = x.mean(axis=0)\nS2 = np.cov(x, rowvar=0)\nprint(xbar)\nprint(S2)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
setiQuest/ML4SETI
tutorials/Step_4_Classify_with_WatsonVR_old.ipynb
apache-2.0
[ "Watson Visual Recognition Training with Spectrogram Images from SETI Signal Data\n\nhttps://www.ibm.com/watson/developercloud/visual-recognition/api/v3/\nhttps://www.ibm.com/watson/developercloud/doc/visual-recognition/customizing.html\nhttps://github.com/watson-developer-cloud/python-sdk\nhttps://github.com/watson-developer-cloud/python-sdk/blob/master/watson_developer_cloud/visual_recognition_v3.py\n\n<hr>\n\nInstall the Watson Developer Cloud Python SDK\n\nInstall the Python SDK if has not been previously installed !pip install --upgrade watson-developer-cloud\nRestart the kernel, after installing the SDK", "#!pip install --user --upgrade watson-developer-cloud\n\n#Making a local folder to put my data.\n\n#NOTE: YOU MUST do something like this on a Spark Enterprise cluster at the hackathon so that\n#you can put your data into a separate local file space. Otherwise, you'll likely collide with \n#your fellow participants. \n\nmy_team_name_data_folder = 'my_team_name_data_folder'\n\nmydatafolder = os.environ['PWD'] + '/' + my_team_name_data_folder + '/zipfiles'\nif os.path.exists(mydatafolder) is False:\n os.makedirs(mydatafolder)\n\n!ls my_team_name_data_folder/zipfiles\n\nfrom __future__ import division\n\nimport cStringIO\nimport glob\nimport json\nimport os\nimport requests\nimport time\nimport timeit\nimport zipfile\nimport copy\n\nfrom random import randint\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport ibmseti\n\nfrom watson_developer_cloud import VisualRecognitionV3\n\napiVer = VisualRecognitionV3.latest_version #'2016-05-20'\nclassifier_prefix = 'setisignals'\n\n#You can sign up with WatsonVR through Bluemix to get a key\n#However, Hackathon participants will be provided with a WATSON VR key that has more free API calls per day.\napiKey = 'WATSON-VISUAL-RECOGNITION-API-KEY' ", "<br/>\nInit the Watson Visual Recognition Python Library\n\nyou may need to install the SDK first: !pip install --upgrade watson-developer-cloud\nyou will need the API key from the Watson Visual Recognition Service", "vr = VisualRecognitionV3(apiVer, api_key=apiKey)", "<br/>\nLook For Existing Custom Classifier\nUse an existing custom classifier (and update) if one exists, else a new custom classifier will be created", "## View all of your classifiers\n\nclassifiers = vr.list_classifiers()\nprint json.dumps(classifiers, indent=2)\n\n## Run this cell ONLY IF you want to REMOVE all classifiers\n# Otherwise, the subsequent cell will append images to the `classifier_prefix` classifier\nclassifiers = vr.list_classifiers()\nfor c in classifiers['classifiers']:\n vr.delete_classifier(c['classifier_id'])\n\nclassifiers = vr.list_classifiers()\nprint json.dumps(classifiers, indent=2)\n\n#Create new classifier, or get the ID for the latest SETISIGNALS classifier\n\nclassifier_id = None\nclassifier = None\n\nclassifiers = vr.list_classifiers()\n\nfor c in classifiers['classifiers']:\n if c['status'] == 'ready' and (classifier_prefix in c['classifier_id']):\n classifier_id = c['classifier_id']\n\n\nif classifier_id is not None:\n classifier = vr.get_classifier(classifier_id)\n print '\\r\\nFound classifer:\\r\\n\\r\\n{}'.format(json.dumps(classifier, indent=2))\nelse:\n print 'No custom classifier available\\r\\n'\n print(json.dumps(classifiers, indent=2))", "<br/>\nSend the Images Archives to the Watson Visual Recognition Service for Training\n\nhttps://www.ibm.com/watson/developercloud/doc/visual-recognition/customizing.html\nhttps://www.ibm.com/watson/developercloud/visual-recognition/api/v3/\nhttps://github.com/watson-developer-cloud/python-sdk", "squiggle = sorted(glob.glob('{}/classification_*_squiggle.zip'.format(mydatafolder)))\nnarrowband = sorted(glob.glob('{}/classification_*_narrowband.zip'.format(mydatafolder)))\nnarrowbanddrd = sorted(glob.glob('{}/classification_*_narrowbanddrd.zip'.format(mydatafolder)))\nnoise = sorted(glob.glob('{}/classification_*_noise.zip'.format(mydatafolder)))\n\nsq = len(squiggle)\nnb = len(narrowband)\nnd = len(narrowbanddrd)\nns = len(noise)\n\n## Possible todo here: Try using the 'noise' as a \"negative\" example when training Watson. See the Watson documentation.\n\nnum = max(sq, nb, nd, ns)\n#num = max(sq, nb, nd)\n\nif classifier_id is None:\n print 'Adding custom classifier ... this may take awhile'\nelse:\n print 'Updating custom classifier {} ... this may take awhile'.format(classifier_id)\n\nfor i in range(num):\n squiggle_p = open(squiggle[i], 'rb') if i < sq else None\n narrowband_p = open(narrowband[i], 'rb') if i < nb else None\n narrowbanddrd_p = open(narrowbanddrd[i], 'rb') if i < nd else None\n noise_p = open(noise[i], 'rb') if i < ns else None\n\n if classifier_id is None:\n# print 'Creating with\\r\\n{}\\r\\n{}\\r\\n{}\\r'.format(squiggle_p, narrowband_p, narrowbanddrd_p) #use this line if going to use 'noise' as negative example\n print 'Creating with\\r\\n{}\\r\\n{}\\r\\n{}\\r\\n{}\\r'.format(squiggle_p, narrowband_p, narrowbanddrd_p, noise_p)\n classifier = vr.create_classifier(\n classifier_prefix,\n squiggle_positive_examples = squiggle_p,\n narrowband_positive_examples = narrowband_p,\n narrowbanddrd_positive_examples = narrowbanddrd_p,\n noise_positive_examples = noise_p #remove this if going to use noise as 'negative' examples\n )\n \n classifier_id = classifier['classifier_id']\n else:\n print 'Updating with\\r\\n{}\\r\\n{}\\r\\n{}\\r\\n{}\\r'.format(squiggle_p, narrowband_p, narrowbanddrd_p, noise_p)\n# print 'Updating with\\r\\n{}\\r\\n{}\\r\\n{}\\r'.format(squiggle_p, narrowband_p, narrowbanddrd_p) #use this line if going to use 'noise' as negative example\n classifier = vr.update_classifier(\n classifier_id,\n squiggle_positive_examples = squiggle_p,\n narrowband_positive_examples = narrowband_p,\n narrowbanddrd_positive_examples = narrowbanddrd_p,\n noise_positive_examples = noise_p #remove this if going to use noise as 'negative' examples\n )\n\n if squiggle_p is not None:\n squiggle_p.close()\n if narrowband_p is not None:\n narrowband_p.close()\n if narrowbanddrd_p is not None:\n narrowbanddrd_p.close()\n if noise_p is not None:\n noise_p.close()\n\n if classifier is not None:\n print('Classifier: {}'.format(classifier_id))\n status = classifier['status']\n startTimer = timeit.default_timer()\n while status in ['training', 'retraining']:\n print('Status: {}'.format(status))\n time.sleep(10)\n classifier = vr.get_classifier(classifier_id)\n status = classifier['status']\n stopTimer = timeit.default_timer()\n print '{} took {} minutes'.format('Training' if i == 0 else 'Retraining', int(stopTimer - startTimer) / 60)\n\nprint(json.dumps(vr.get_classifier(classifier_id), indent=2))\n", "<br/>\nTake a Random Data File for Testing\n\nTake a random data file from the test set\nCreate a Spectrogram Image", "zz = zipfile.ZipFile(mydatafolder + '/' + 'testset_narrowband.zip')\n\ntest_list = zz.namelist()\nrandomSignal = zz.open(test_list[10],'r')\n\nfrom IPython.display import Image\nsquigImg = randomSignal.read()\nImage(squigImg)\n\n#note - have to 'open' this again because it was already .read() out in the line above\nrandomSignal = zz.open(test_list[10],'r')\n\nurl_result = vr.classify(images_file=randomSignal, classifier_ids=classifier_id, threshold=0.0)\n\nprint(json.dumps(url_result, indent=2))", "<br/>\nRun the Complete Test Set", "#Create a dictionary object to store results from Watson\n\nfrom collections import defaultdict\n\nclass_list = ['squiggle', 'noise', 'narrowband', 'narrowbanddrd']\n\nresults_group_by_class = {}\nfor classification in class_list:\n results_group_by_class[classification] = defaultdict(list)\n \nfailed_to_classify_uuid_list = []\n\nprint classifier_id\n\nresults_group_by_class\n\n\n### NOTE. If this breaks due to a requests timeout or other error: **just restart this cell**\n# The processing should pick up where it left off. \n\n## NOTE: This code could be more efficient and make fewer HTTP calls to Watson. I could have dumped the testset_<class>.zip into \n# smaller zip files (testset_<class>_N.zip for N = 1,2,3,4...) and then made a single call to Watson with each smaller zip file\n#\n# Example:\n# with open(mydatafolder + '/' + 'testset_squiggle_1.zip', 'rb') as squigglezips:\n# url_result = vr.classify(images_file=squigglezips, classifier_ids=classifier_id, threshold=0.0)\n\n# The 'testset_squiggle.zip' files are too large to make a single to call to Watson, and so this code goes through\n# each file one by one.\n\n### ASLO, I could have farmed this out to the Spark executor nodes as well. \n\nfor sigclass in class_list:\n \n passed = 0\n \n zz = zipfile.ZipFile(mydatafolder + '/' + 'testset_{}.zip'.format(sigclass))\n zzlist = zz.namelist()\n \n ### REDUCING TESTING to only the first 30 signals in the test set -- to keep this demonstration code faster.\n \n zzlist = zzlist[:30] \n \n zzlistsize = len(zzlist)\n \n startTimer = timeit.default_timer()\n\n resdict = results_group_by_class[classification]\n \n print 'Running test ({} images) for {}... this may take a while.'.format(zzlistsize, sigclass)\n\n for fn in zzlist:\n pngfilename = fn.split('/')[-1]\n uuid = pngfilename.split('.')[0]\n classification = sigclass\n \n if uuid in resdict['uuid'] or uuid in failed_to_classify_uuid_list:\n print \" have already classified {}\".format(uuid)\n continue\n \n classify_result = vr.classify(images_file=zz.open(fn,'r'), classifier_ids=classifier_id, threshold=0.0)\n \n maxscore = 0\n maxscoreclass = None\n\n classifiers_arr = classify_result['images'][0]['classifiers']\n \n score_list = []\n for classifier_result in classifiers_arr:\n for class_result in classifier_result['classes']:\n score_list.append((class_result['class'],class_result['score']))\n if class_result['score'] > maxscore:\n maxscore = class_result['score']\n maxscoreclass = class_result['class']\n\n #sort alphabetically\n score_list.sort(key = lambda x: x[0])\n score_list = map(lambda x:x[1], score_list)\n\n if maxscoreclass is None:\n print 'Failed: {} - Actual: {}, No classification returned'.format(pngfilename, classification)\n #print(json.dumps(classify_result, indent=2))\n\n elif maxscoreclass != classification:\n print 'Failed: {} - Actual: {}, Watson Predicted: {} ({})'.format(pngfilename, classification, maxscoreclass, maxscore)\n else:\n passed += 1\n print 'Passed: {} - Actual: {}, Watson Predicted: {} ({})'.format(pngfilename, classification, maxscoreclass, maxscore)\n\n if maxscoreclass is not None:\n resdict['signal_classification'].append(classification)\n resdict['uuid'].append(uuid)\n resdict['watson_class'].append(maxscoreclass)\n resdict['watson_class_score'].append(maxscore)\n resdict['scores'].append(score_list)\n else:\n #add to failed list\n failed_to_classify_uuid_list.append(uuid)\n\n stopTimer = timeit.default_timer()\n\n print 'Test Score: {}% ({} of {} Passed)'.format(int((float(passed) / zzlistsize) * 100), passed, zzlistsize)\n print 'Tested {} images in {} minutes'.format(zzlistsize, int(stopTimer - startTimer) / 60)\n\nprint \"DONE\"\n \n\nimport pickle\npickle.dump(results_group_by_class, open(mydatafolder + '/' + \"watson_results.pickle\", \"w\"))\n\nwatson_results = pickle.load(open(mydatafolder + '/' + \"watson_results.pickle\",\"r\"))\n# reorganize the watson_results dictionary to extract\n# a list of [true_class, [scores], estimated_class] and\n# use these for measuring our model's performance\n\nclass_scores = []\nfor k in watson_results.keys():\n class_scores += zip(watson_results[k]['uuid'], watson_results[k]['signal_classification'], watson_results[k]['scores'], watson_results[k]['watson_class'] )\n\nclass_scores[100]\n\nfrom sklearn.metrics import classification_report\nimport sklearn\n\ny_train = [x[1] for x in class_scores]\ny_pred = [x[3] for x in class_scores]\ny_prob = [x[2] for x in class_scores]\n#we normalize the Watson score values to 1 in order to use them in the log_loss calculation even though the Watson VR scores are not true class prediction probabilities\ny_prob = map(lambda x: (x, sum(x)), y_prob)\ny_prob = map(lambda x: [y / x[1] for y in x[0]], y_prob)\n\nprint sklearn.metrics.classification_report(y_train,y_pred)\nprint sklearn.metrics.confusion_matrix(y_train,y_pred)\nprint(\"Classification accuracy: %0.6f\" % sklearn.metrics.accuracy_score(y_train,y_pred) )\nprint(\"Log Loss: %0.6f\" % sklearn.metrics.log_loss(y_train,y_prob) )", "Generate CSV file for Scoreboard\nHere's an example of what the CSV file should look like for submission to the scoreboard. Although, in this case, we only have 4 classes instead of 7.\nNOTE: This uses the PNG files created in the Step 3 notebook, which only contain the BASIC4 data set. The code challenge and hackathon will be based on the Primary Data Set which contains 7 signal classes", "import csv\nmy_output_results = my_team_name_data_folder + '/' + 'watson_scores.csv'\nwith open(my_output_results, 'w') as csvfile:\n fwriter = csv.writer(csvfile, delimiter=',')\n for row in class_scores:\n fwriter.writerow([row[0]] + row[2])\n\n!cat my_team_name_data_folder/watson_scores.csv" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Neuroglycerin/neukrill-net-work
notebooks/Holdout testing for Pylearn2 Pickles.ipynb
mit
[ "After running your Pylearn2 models, it's probably not best to compare them on the score they get on the validation set, as that is used in the training process; so could be the victim of overfitting. It would be better to run the model over the test set, which is supposed to be a holdout set used to compare models. We could rerun all our models with a monitor on this value, but for models we've already run, it might be more useful to be able to pull out this value for just that pickle.\nThis is likely to be wasted effort, because it seems like the kind of thing that should already exist in Pylearn2. Unfortunately, since I can't find it and it seems fairly simple to implement I'm just going to go ahead and write it.\nHopefully, this will also help us figure out what's going wrong with some submissions, that turn out to be incredibly bad; for example, those using augmentation.", "import pylearn2.utils\nimport pylearn2.config\nimport theano\nimport neukrill_net.dense_dataset\nimport neukrill_net.utils\nimport numpy as np\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport holoviews as hl\n%load_ext holoviews.ipython", "Loading data and model\nInitialise, loading the settings and the test dataset we're going to be using:", "cd ..\n\nsettings = neukrill_net.utils.Settings(\"settings.json\")\nrun_settings = neukrill_net.utils.load_run_settings(\n \"run_settings/alexnet_based_norm_global_8aug.json\", settings, force=True)\n\n%%time\n# loading the model\nmodel = pylearn2.utils.serial.load(run_settings['pickle abspath'])\n\nreload(neukrill_net.dense_dataset)\n\n%%time\n# loading the data\ndataset = neukrill_net.dense_dataset.DensePNGDataset(settings_path=run_settings['settings_path'],\n run_settings=run_settings['run_settings_path'],\n train_or_predict='train',\n training_set_mode='test', force=True)", "Setting up forward pass\nNow we've loaded the data and the model we're going to set up a forward pass through the data in the same way we do it in the test.py script: pick a batch size, compile a Theano function and then iterate over the whole dataset in batches, filling an array of predictions.", "# find allowed batch size over 1000 (want big batches)\n# (Theano has to have fixed batch size and we don't want leftover)\nbatch_size=1000\nwhile dataset.X.shape[0]%batch_size != 0:\n batch_size += 1\n\nn_batches = int(dataset.X.shape[0]/batch_size)\n\n%%time\n# set this batch size\nmodel.set_batch_size(batch_size)\n# compile Theano function\nX = model.get_input_space().make_batch_theano()\nY = model.fprop(X)\nf = theano.function([X],Y)", "Compute probabilities\nThe following is the same as the code in test.py that applies the processing.", "%%time\ny = np.zeros((dataset.X.shape[0],len(settings.classes)))\nfor i in xrange(n_batches):\n print(\"Batch {0} of {1}\".format(i+1,n_batches))\n x_arg = dataset.X[i*batch_size:(i+1)*batch_size,:]\n if X.ndim > 2:\n x_arg = dataset.get_topological_view(x_arg)\n y[i*batch_size:(i+1)*batch_size,:] = (f(x_arg.astype(X.dtype).T))\n\nplt.scatter(np.where(y == 0)[1],np.where(y==0)[0])", "Of course, it's strange that there are any zeros at all. Hopefully they'll go away when we start averaging.\nScore before averaging\nWe can score the model before averaging by just using the class labels as they were going to be used for training. Using Sklearn's utility for calculating log_loss:", "import sklearn.metrics\n\nsklearn.metrics.log_loss(dataset.y,y)", "Score after averaging\nIn test.py we take the least intelligent approach to dealing with averaging over the different augmented versions. Basically, we just assume that whatever the augmentation factor is, the labels must repeat over that step size, so we can just collapse those into a single vector of probabilities.\nFirst, we should check that assumption:", "# augmentation factor\naf = 8\n\nfor low,high in zip(range(0,dataset.y.shape[0],af),range(af,dataset.y.shape[0]+af,af)):\n first = dataset.y[low][0]\n if any(first != i for i in dataset.y[low:high].ravel()):\n print(\"Labels do not match at:\", (low,high))\n break\n\ny_collapsed = np.zeros((int(dataset.X.shape[0]/af), len(settings.classes))) \nfor i,(low,high) in enumerate(zip(range(0,dataset.y.shape[0],af),\n range(af,dataset.y.shape[0]+af,af))):\n y_collapsed[i,:] = np.mean(y[low:high,:], axis=0)\n\nplt.scatter(np.where(y_collapsed == 0)[1],np.where(y_collapsed == 0)[0])", "There are no zeros in there now!", "labels_collapsed = dataset.y[range(0,dataset.y.shape[0],af)]\n\nlabels_collapsed.shape\n\nsklearn.metrics.log_loss(labels_collapsed,y_collapsed)", "That's pretty much exactly what we got on the leaderboard." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
sdpython/ensae_teaching_cs
_doc/notebooks/td1a/td1a_cenonce_session1.ipynb
mit
[ "1A.0 - Premiers pas en Python\nQuestions très simples pour ceux qui savent coder, quelques éléments principaux du langage Python pour les autres.", "from jyquickhelper import add_notebook_menu\nadd_notebook_menu()", "Partie 1\nUn langage de programmation permet de décrire avec précision des opérations très simples sur des données. Comme tout langage, il a une grammaire et des mot-clés. La complexité d'un programme vient de ce qu'il faut beaucoup d'opérations simples pour arriver à ses fins. Voyons cela quelques usages simples. Il vous suffit d'exécuter chaque petit extrait en appuyant sur le triangle pointant vers la droite ci-dessus. N'hésitez pas à modifier les extraits pour mieux comprendre ce que le programme fait.\nLa calculatrice", "x = 5\ny = 10\nz = x + y\nprint(z) # affiche z", "On programme sert souvent à automatiser un calcul comme le calcul mensuel du taux de chômage, le taux d'inflation, le temps qu'il fera demain... Pour pouvoir répéter ce même calcul sur des valeurs différentes, il faut pouvoir décrire ce calcul sans savoir ce que sont ces valeurs. Un moyen simple est de les nommer : on utilise des variables. Une variable désigne des données. x=5 signifie que la variable xcontient 5. x+3 signifie qu'on ajoute 3 à x sans avoir besoin de savoir ce que x désigne.\nL'addition, l'incrémentation", "x = 2\ny = x + 1\nprint(y)\nx += 5\nprint(x)", "Lorsqu'on programme, on passe son temps à écrire des calculs à partir de variables pour les stocker dans d'autres variables voire dans les mêmes variables. Lorsqu'on écrit y=x+5, cela veut dire qu'on doit ajouter 5 à x et qu'on stocke le résultat dans y. Lorsqu'on écrit x += 5, cela veut dire qu'on doit ajouter 5 à x et qu'on n'a plus besoin de la valeur que x contenait avant l'opération.\nLa répétition ou les boucles", "a = 0\nfor i in range (0, 10) :\n a = a + i # répète dix fois cette ligne\nprint (a) ", "Le mot-clé print n'a pas d'incidence sur le programme. En revanche, il permet d'afficher l'état d'une variable au moment où on exécute l'instruction print.\nL'aiguillage ou les tests", "a = 10\nif a > 0 :\n print(a) # un seul des deux blocs est pris en considération\nelse:\n a -= 1\n print(a)", "Les chaînes de caractères", "a = 10\nprint(a) # quelle est la différence\nprint(\"a\") # entre les deux lignes\ns = \"texte\"\ns += \"c\"\nprint(s) ", "Toute valeur a un type et cela détermine les opérations qu'on peut faire dessus. 2 + 2 fait 4 pour tout le monde. 2 + \"2\" fait quatre pour un humain, mais est incompréhensible pour l'ordinateur car on ajoute deux choses différentes (torchon + serviette).", "print(\"2\" + \"3\")\nprint(2+3)", "Partie 2\nDans cette seconde série, partie, il s'agit d'interpréter pourquoi un programme ne fait pas ce qu'il est censé faire ou pourquoi il provoque une erreur, et si possible, de corriger cette erreur.\nUn oubli", "a = 5\na += 4\nprint(a) # on voudrait voir 9 mais c'est 5 qui apparaît", "Une erreur de syntaxe", "a = 0\nfor i in range (0, 10): # il manque quelque chose sur cette ligne\n a = a + i\nprint(a)", "Une autre erreur de syntaxe", "a = 0\nfor i in range (0, 10):\n a = a + i # regardez bien\nprint(a) ", "Une opération interdite", "a = 0\ns = \"e\"\nprint(a + s) # petit problème de type", "Un nombre impair de...", "a = 0\nfor i in range (0, 10) :\n a = (a + (i+2)*3 ) # comptez bien\nprint(a) ", "Partie 3\nIl faut maintenant écrire trois programmes qui :\n\nEcrire un programme qui calcule la somme des 10 premiers entiers au carré.\nEcrire un programme qui calcule la somme des 5 premiers entiers impairs au carré.\nEcrire un programme qui calcule la somme des qui 10 premières factorielles : $\\sum_{i=1}^{10} i!$.\n\nA propos de la parité :", "14%2, 233%2", "Tutor Magic\nCet outil permet de visualiser le déroulement des programmes (pas trop grand, site original pythontutor.com).", "%load_ext tutormagic\n\n%%tutor --lang python3\n\na = 0\nfor i in range (0, 10):\n a = a + i", "Arriverez-vous à résoudre le première exercice du site CodinGame ?" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ethen8181/machine-learning
trees/lightgbm.ipynb
mit
[ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#LightGBM\" data-toc-modified-id=\"LightGBM-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>LightGBM</a></span><ul class=\"toc-item\"><li><span><a href=\"#Data-Preprocessing\" data-toc-modified-id=\"Data-Preprocessing-1.1\"><span class=\"toc-item-num\">1.1&nbsp;&nbsp;</span>Data Preprocessing</a></span></li><li><span><a href=\"#Benchmarking\" data-toc-modified-id=\"Benchmarking-1.2\"><span class=\"toc-item-num\">1.2&nbsp;&nbsp;</span>Benchmarking</a></span></li><li><span><a href=\"#Categorical-Variables-in-Tree-based-Models\" data-toc-modified-id=\"Categorical-Variables-in-Tree-based-Models-1.3\"><span class=\"toc-item-num\">1.3&nbsp;&nbsp;</span>Categorical Variables in Tree-based Models</a></span></li></ul></li><li><span><a href=\"#Reference\" data-toc-modified-id=\"Reference-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>Reference</a></span></li></ul></div>", "# code for loading the format for the notebook\nimport os\n\n# path : store the current path to convert back to it later\npath = os.getcwd()\nos.chdir(os.path.join('..', 'notebook_format'))\n\nfrom formats import load_style\nload_style(css_style='custom2.css', plot_style=False)\n\nos.chdir(path)\n\n# 1. magic for inline plot\n# 2. magic to print version\n# 3. magic so that the notebook will reload external python modules\n# 4. magic to enable retina (high resolution) plots\n# https://gist.github.com/minrk/3301035\n%matplotlib inline\n%load_ext watermark\n%load_ext autoreload\n%autoreload 2\n%config InlineBackend.figure_format='retina'\n\nimport os\nimport re\nimport time\nimport requests\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom xgboost import XGBClassifier\nfrom lightgbm import LGBMClassifier\nfrom lightgbm import plot_importance\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder, OrdinalEncoder\n\n%watermark -a 'Ethen' -d -t -v -p numpy,pandas,sklearn,matplotlib,xgboost,lightgbm", "LightGBM\nGradient boosting is a machine learning technique that produces a prediction model in the form of an ensemble of weak classifiers, optimizing for a differentiable loss function. One of the most popular types of gradient boosting is gradient boosted trees, that internally is made up of an ensemble of week decision trees. There are two different ways to compute the trees: level-wise and leaf-wise as illustrated by the diagram below:\n<img src=\"img/levelwise.png\" width=\"50%\" height=\"50%\">\n<img src=\"img/leafwise.png\" width=\"60%\" height=\"60%\">\n\nThe level-wise strategy adds complexity extending the depth of the tree level by level. As a contrary, the leaf-wise strategy generates branches by optimizing a loss.\n\nThe level-wise strategy grows the tree level by level. In this strategy, each node splits the data prioritizing the nodes closer to the tree root. The leaf-wise strategy grows the tree by splitting the data at the nodes with the highest loss change. Level-wise growth is usually better for smaller datasets whereas leaf-wise tends to overfit. Leaf-wise growth tends to excel in larger datasets where it is considerably faster than level-wise growth.\nA key challenge in training boosted decision trees is the computational cost of finding the best split for each leaf. Conventional techniques find the exact split for each leaf, and require scanning through all the data in each iteration. A different approach approximates the split by building histograms of the features. That way, the algorithm doesn’t need to evaluate every single value of the features to compute the split, but only the bins of the histogram, which are bounded. This approach turns out to be much more efficient for large datasets, without adversely affecting accuracy.\nWith all of that being said LightGBM is a fast, distributed, high performance gradient boosting that was open-source by Microsoft around August 2016. The main advantages of LightGBM includes:\n\nFaster training speed and higher efficiency: LightGBM use histogram based algorithm i.e it buckets continuous feature values into discrete bins which fasten the training procedure.\nLower memory usage: Replaces continuous values to discrete bins which result in lower memory usage.\nBetter accuracy than any other boosting algorithm: It produces much more complex trees by following leaf wise split approach rather than a level-wise approach which is the main factor in achieving higher accuracy. However, it can sometimes lead to overfitting which can be avoided by setting the max_depth parameter.\nCompatibility with Large Datasets: It is capable of performing equally good with large datasets with a significant reduction in training time as compared to XGBoost.\nParallel learning supported.\n\nThe significant speed advantage of LightGBM translates into the ability to do more iterations and/or quicker hyperparameter search, which can be very useful if we have a limited time budget for optimizing your model or want to experiment with different feature engineering ideas.\nData Preprocessing\nThis notebook compares LightGBM with XGBoost, another extremely popular gradient boosting framework by applying both the algorithms to a dataset and then comparing the model's performance and execution time. Here we will be using the Adult dataset that consists of 32561 observations and 14 features describing individuals from various countries. Our target is to predict whether a person makes <=50k or >50k annually on basis of the other information available. Dataset consists of 32561 observations and 14 features describing individuals.", "def get_data():\n file_path = 'adult.csv'\n if not os.path.isfile(file_path):\n def chunks(input_list, n_chunk):\n \"\"\"take a list and break it up into n-size chunks\"\"\"\n for i in range(0, len(input_list), n_chunk):\n yield input_list[i:i + n_chunk] \n\n columns = [\n 'age',\n 'workclass',\n 'fnlwgt',\n 'education',\n 'education_num',\n 'marital_status',\n 'occupation',\n 'relationship',\n 'race',\n 'sex',\n 'capital_gain',\n 'capital_loss',\n 'hours_per_week',\n 'native_country',\n 'income'\n ]\n\n url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data'\n r = requests.get(url)\n raw_text = r.text.replace('\\n', ',')\n splitted_text = re.split(r',\\s*', raw_text)\n data = list(chunks(splitted_text, n_chunk=len(columns)))\n data = pd.DataFrame(data, columns=columns).dropna(axis=0, how='any')\n data.to_csv(file_path, index=False)\n\n data = pd.read_csv(file_path)\n return data\n\n\ndata = get_data()\nprint('dimensions:', data.shape)\ndata.head()\n\nlabel_col = 'income'\ncat_cols = [\n 'workclass',\n 'education',\n 'marital_status',\n 'occupation',\n 'relationship',\n 'race',\n 'sex',\n 'native_country'\n]\n\nnum_cols = [\n 'age',\n 'fnlwgt',\n 'education_num',\n 'capital_gain',\n 'capital_loss',\n 'hours_per_week'\n]\n\nprint('number of numerical features: ', len(num_cols))\nprint('number of categorical features: ', len(cat_cols))\n\nlabel_encode = LabelEncoder() \ndata[label_col] = label_encode.fit_transform(data[label_col])\ny = data[label_col].values\ndata = data.drop(label_col, axis=1)\n\nprint('labels distribution:', np.bincount(y) / y.size)\n\ntest_size = 0.1\nsplit_random_state = 1234\ndf_train, df_test, y_train, y_test = train_test_split(\n data, y, test_size=test_size,\n random_state=split_random_state, stratify=y)\n\ndf_train = df_train.reset_index(drop=True)\ndf_test = df_test.reset_index(drop=True)\n\nprint('dimensions:', df_train.shape)\ndf_train.head()", "We'll perform very little feature engineering as that's not our main focus here. The following code chunk only one hot encodes the categorical features. There will be follow up discussions on this in later section.", "from sklearn.preprocessing import OneHotEncoder\n\none_hot_encoder = OneHotEncoder(sparse=False, dtype=np.int32)\none_hot_encoder.fit(df_train[cat_cols])\ncat_one_hot_cols = one_hot_encoder.get_feature_names(cat_cols)\n\nprint('number of one hot encoded categorical columns: ', len(cat_one_hot_cols))\ncat_one_hot_cols[:5]\n\ndef preprocess_one_hot(df, one_hot_encoder, num_cols, cat_cols):\n df = df.copy()\n \n cat_one_hot_cols = one_hot_encoder.get_feature_names(cat_cols)\n\n df_one_hot = pd.DataFrame(\n one_hot_encoder.transform(df[cat_cols]),\n columns=cat_one_hot_cols\n )\n df_preprocessed = pd.concat([\n df[num_cols],\n df_one_hot\n ], axis=1)\n return df_preprocessed\n\ndf_train_one_hot = preprocess_one_hot(df_train, one_hot_encoder, num_cols, cat_cols)\ndf_test_one_hot = preprocess_one_hot(df_test, one_hot_encoder, num_cols, cat_cols)\nprint(df_train_one_hot.shape)\ndf_train_one_hot.dtypes", "Benchmarking\nThe next section compares the xgboost and lightgbm's implementation in terms of both execution time and model performance. There are a bunch of other hyperparameters that we as the end-user can specify, but here we explicity specify arguably the most important ones.", "time.sleep(5)\n\nlgb = LGBMClassifier(\n n_jobs=-1,\n max_depth=6,\n subsample=1,\n n_estimators=100,\n learning_rate=0.1,\n colsample_bytree=1,\n objective='binary',\n boosting_type='gbdt')\n\nstart = time.time()\nlgb.fit(df_train_one_hot, y_train)\nlgb_elapse = time.time() - start\nprint('elapse:, ', lgb_elapse)\n\ntime.sleep(5)\n\n# raw xgboost\nxgb = XGBClassifier(\n n_jobs=-1,\n max_depth=6,\n subsample=1,\n n_estimators=100,\n learning_rate=0.1,\n colsample_bytree=1,\n objective='binary:logistic',\n booster='gbtree')\n\nstart = time.time()\nxgb.fit(df_train_one_hot, y_train)\nxgb_elapse = time.time() - start\nprint('elapse:, ', xgb_elapse)", "XGBoost includes a tree_method = 'hist'option that buckets continuous variables into bins to speed up training, we also set grow_policy = 'lossguide' to favor splitting at nodes with highest loss change, which mimics LightGBM.", "time.sleep(5)\n\nxgb_hist = XGBClassifier(\n n_jobs=-1,\n max_depth=6,\n subsample=1,\n n_estimators=100,\n learning_rate=0.1,\n colsample_bytree=1,\n objective='binary:logistic',\n booster='gbtree',\n tree_method='hist',\n grow_policy='lossguide')\n\nstart = time.time()\nxgb_hist.fit(df_train_one_hot, y_train)\nxgb_hist_elapse = time.time() - start\nprint('elapse:, ', xgb_hist_elapse)\n\n# evaluate performance\ny_pred = lgb.predict_proba(df_test_one_hot)[:, 1]\nlgb_auc = roc_auc_score(y_test, y_pred)\nprint('auc score: ', lgb_auc)\n\ny_pred = xgb.predict_proba(df_test_one_hot)[:, 1]\nxgb_auc = roc_auc_score(y_test, y_pred)\nprint('auc score: ', xgb_auc)\n\ny_pred = xgb_hist.predict_proba(df_test_one_hot)[:, 1]\nxgb_hist_auc = roc_auc_score(y_test, y_pred)\nprint('auc score: ', xgb_hist_auc)\n\n# comparison table\nresults = pd.DataFrame({\n 'elapse_time': [lgb_elapse, xgb_hist_elapse, xgb_elapse],\n 'auc_score': [lgb_auc, xgb_hist_auc, xgb_auc]})\nresults.index = ['LightGBM', 'XGBoostHist', 'XGBoost']\nresults", "From the resulting table, we can see that there isn't a noticeable difference in auc score between the two implementations. On the other hand, there is a significant difference in the time it takes to finish the whole training procedure. This is a huge advantage and makes LightGBM a much better approach when dealing with large datasets.\nFor those interested, the people at Microsoft has a blog that has a even more thorough benchmark result on various datasets. Link is included below along with a summary of their results:\n\nBlog: Lessons Learned From Benchmarking Fast Machine Learning Algorithms\nOur results, based on tests on six datasets, are summarized as follows:\n\nXGBoost and LightGBM achieve similar accuracy metrics.\nLightGBM has lower training time than XGBoost and its histogram-based variant, XGBoost hist, for all test datasets, on both CPU and GPU implementations. The training time difference between the two libraries depends on the dataset, and can be as big as 25 times.\nXGBoost GPU implementation does not scale well to large datasets and ran out of memory in half of the tests.\nXGBoost hist may be significantly slower than the original XGBoost when feature dimensionality is high.\n\n\nCategorical Variables in Tree-based Models\nMany real-world datasets include a mix of continuous and categorical variables. The property of the latter is that their values has zero inherent ordering. One major advantage of decision tree models and their ensemble counterparts, such as random forests, extra trees and gradient boosted trees, is that they are able to operate on both continuous and categorical variables directly (popular implementations of tree-based models differ as to whether they honor this fact). In contrast, most other popular models (e.g., generalized linear models, neural networks) must instead transform categorical variables into some numerical format, usually by one-hot encoding them to create a new dummy variable for each level of the original variable. e.g.\n<img src=\"img/onehot_encoding.png\" width=\"80%\" height=\"80%\">\nOne drawback of one hot encoding is that they can lead to a huge increase in the dimensionality of the feature representations. For example, one hot encoding U.S. states adds 49 dimensions to to our feature representation.\nTo understand why we don't need to perform one hot encoding for tree-based models, we need to refer back to the logic of tree-based algorithms. At the heart of the tree-based algorithm is a sub-algorithm that splits the samples into two bins by selecting a feature and a value. This splitting algorithm considers each of the features in turn, and for each feature selects the value of that feature that minimizes the impurity of the bins.\nThis means tree-based models are essentially looking for places to split the data, they are not multiplying our inputs by weights. In contrast, most other popular models (e.g., generalized linear models, neural networks) would interpret categorical variables such as red=1, blue=2 as blue is twice the amount of red, which is obviously not what we want.", "ordinal_encoder = OrdinalEncoder(dtype=np.int32)\nordinal_encoder.fit(df_train[cat_cols])\n\ndef preprocess_ordinal(df, ordinal_encoder, cat_cols, cat_dtype='int32'):\n df = df.copy()\n df[cat_cols] = ordinal_encoder.transform(df[cat_cols])\n df[cat_cols] = df[cat_cols].astype(cat_dtype)\n return df\n\ndf_train_ordinal = preprocess_ordinal(df_train, ordinal_encoder, cat_cols)\ndf_test_ordinal = preprocess_ordinal(df_test, ordinal_encoder, cat_cols)\nprint(df_train_ordinal.shape)\ndf_train_ordinal.dtypes\n\ntime.sleep(5)\n\nlgb = LGBMClassifier(\n n_jobs=-1,\n max_depth=6,\n subsample=1,\n n_estimators=100,\n learning_rate=0.1,\n colsample_bytree=1,\n objective='binary',\n boosting_type='gbdt')\n\nstart = time.time()\nlgb.fit(df_train_ordinal, y_train)\nlgb_ordinal_elapse = time.time() - start\nprint('elapse:, ', lgb_ordinal_elapse)\n\ny_pred = lgb.predict_proba(df_test_ordinal)[:, 1]\nlgb_ordinal_auc = roc_auc_score(y_test, y_pred)\nprint('auc score: ', lgb_ordinal_auc)\n\n# comparison table\nresults = pd.DataFrame({\n 'elapse_time': [lgb_ordinal_elapse, lgb_elapse, xgb_hist_elapse, xgb_elapse],\n 'auc_score': [lgb_ordinal_auc, lgb_auc, xgb_hist_auc, xgb_auc]})\nresults.index = ['LightGBM Ordinal', 'LightGBM', 'XGBoostHist', 'XGBoost']\nresults", "From the result above, we can see that it requires even less training time without sacrificing any sort of performance. What's even more is that we now no longer need to perform the one hot encoding on our categorical features. The code chunk below shows this is highly advantageous from a memory-usage perspective when we have a bunch of categorical features.", "print('OneHot Encoding')\nprint('number of columns: ', df_train_one_hot.shape[1])\nprint('memory usage: ', df_train_one_hot.memory_usage(deep=True).sum())\nprint()\n\nprint('Ordinal Encoding')\nprint('number of columns: ', df_train_ordinal.shape[1])\nprint('memory usage: ', df_train_ordinal.memory_usage(deep=True).sum())\n\n# plotting the feature importance just out of curiosity\n\n# change default style figure and font size\nplt.rcParams['figure.figsize'] = 10, 8\nplt.rcParams['font.size'] = 12\n\n# like other tree-based models, it can also output the\n# feature importance plot\nplot_importance(lgb, importance_type='gain')\nplt.show()", "For tuning LightGBM's hyperparameter, the documentation page has some pretty good suggestions. LightGBM Documentation: Parameters Tuning\nReference\n\nLightGBM Documentation: Parameters Tuning\nBlog: xgboost’s New Fast Histogram (tree_method = hist)\nBlog: Which algorithm takes the crown: Light GBM vs XGBOOST?\nBlog: Are categorical variables getting lost in your random forests?\nBlog: Lessons Learned From Benchmarking Fast Machine Learning Algorithms\nStackoverflow: Why tree-based model do not need one-hot encoding for nominal data?" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
cgpotts/cs224u
finetuning.ipynb
apache-2.0
[ "Bringing contextual word representations into your models", "__author__ = \"Christopher Potts\"\n__version__ = \"CS224u, Stanford, Spring 2022\"", "Contents\n\nOverview\nGeneral set-up\nHugging Face BERT models and tokenizers\nBERT featurization with Hugging Face\nSimple feed-forward experiment\nA feed-forward experiment with the sst module\nAn RNN experiment with the sst module\n\n\nBERT fine-tuning with Hugging Face\nHfBertClassifier\nHfBertClassifier experiment\n\n\n\nOverview\nThis notebook provides a basic introduction to using pre-trained BERT representations with the Hugging Face library. It is meant as a practical companion to our lecture on contextual word representations. The goal of this notebook is just to help you use these representations in your own work.\nIf you haven't already, I encourage you to review the notebook vsm_04_contextualreps.ipynb before working with this one. That notebook covers the fundamentals of these models; this one dives into the details more quickly.\nA number of the experiments in this notebook are resource-intensive. I've included timing information for the expensive steps, to give you a sense for how long things are likely to take. I ran this notebook on a laptop with a single NVIDIA RTX 2080 GPU. \nGeneral set-up\nThe following are requirements that you'll already have met if you've been working in this repository. As you can see, we'll use the Stanford Sentiment Treebank for illustrations, and we'll try out a few different deep learning models.", "import os\nfrom sklearn.metrics import classification_report\nimport torch\nimport torch.nn as nn\nimport transformers\nfrom transformers import BertModel, BertTokenizer\n\nfrom torch_shallow_neural_classifier import TorchShallowNeuralClassifier\nfrom torch_rnn_classifier import TorchRNNModel\nfrom torch_rnn_classifier import TorchRNNClassifier\nfrom torch_rnn_classifier import TorchRNNClassifierModel\nfrom torch_rnn_classifier import TorchRNNClassifier\nimport sst\nimport utils\n\nutils.fix_random_seeds()\n\nSST_HOME = os.path.join(\"data\", \"sentiment\")", "The transformers library does a lot of logging. To avoid ending up with a cluttered notebook, I am changing the logging level. You might want to skip this as you scale up to building production systems, since the logging is very good – it gives you a lot of insights into what the models and code are doing.", "transformers.logging.set_verbosity_error()", "Hugging Face BERT models and tokenizers\nWe'll illustrate with the BERT-base cased model:", "weights_name = 'bert-base-cased'", "There are lots other options for pretrained weights. See this Hugging Face directory.\nNext, we specify a tokenizer and a model that match both each other and our choice of pretrained weights:", "bert_tokenizer = BertTokenizer.from_pretrained(weights_name)\n\nbert_model = BertModel.from_pretrained(weights_name)", "For modeling (as opposed to creating static representations), we will mostly process examples in batches – generally very small ones, as these models consume a lot of memory. Here's a small batch of texts to use as the starting point for illustrations:", "example_texts = [\n \"Encode sentence 1. [SEP] And sentence 2!\",\n \"Bert knows Snuffleupagus\"]", "We will often need to pad (and perhaps truncate) token lists so that we can work with fixed-dimensional tensors: The batch_encode_plus has a lot of options for doing this:", "example_ids = bert_tokenizer.batch_encode_plus(\n example_texts,\n add_special_tokens=True,\n return_attention_mask=True,\n padding='longest')\n\nexample_ids.keys()", "The token_type_ids is used for multi-text inputs like NLI. The 'input_ids' field gives the indices for each of the two examples:", "example_ids['input_ids']", "Notice that the final two tokens of the second example are pad tokens.\nFor fine-tuning, we want to avoid attending to padded tokens. The 'attention_mask' captures the needed mask, which we'll be able to feed directly to the pretrained BERT model:", "example_ids['attention_mask']", "Finally, we can run these indices and masks through the pretrained model:", "X_example = torch.tensor(example_ids['input_ids'])\nX_example_mask = torch.tensor(example_ids['attention_mask'])\n\nwith torch.no_grad():\n reps = bert_model(X_example, attention_mask=X_example_mask)", "Hugging Face BERT models create a special pooler_output representation that is the final representation above the [CLS] extended with a single layer of parameters:", "reps.pooler_output.shape", "We have two examples, each representented by a single vector of dimension 768, which is $d_{model}$ for BERT base using the notation from the original Transformers paper. This is an easy basis for fine-tuning, as we will see.\nWe can also access the final output for each state:", "reps.last_hidden_state.shape", "Here, we have 2 examples, each padded to the length of the longer one (12), and each of those representations has dimension 768. These representations can be used for sequence modeling, or pooled somehow for simple classifiers.\nThose are all the essential ingredients for working with these parameters in Hugging Face. Of course, the library has a lot of other functionality, but the above suffices to featurize and to fine-tune.\nBERT featurization with Hugging Face\nTo start, we'll use the Hugging Face interfaces just to featurize examples to create inputs to a separate model. In this setting, the BERT parameters are frozen.", "def bert_phi(text):\n input_ids = bert_tokenizer.encode(text, add_special_tokens=True)\n X = torch.tensor([input_ids])\n with torch.no_grad():\n reps = bert_model(X)\n return reps.last_hidden_state.squeeze(0).numpy()", "Simple feed-forward experiment\nFor a simple feed-forward experiment, we can get the representation of the [CLS] tokens and use them as the inputs to a shallow neural network:", "def bert_classifier_phi(text):\n reps = bert_phi(text)\n #return reps.mean(axis=0) # Another good, easy option.\n return reps[0]", "Next we read in the SST train and dev splits:", "train = sst.train_reader(SST_HOME)\n\ndev = sst.dev_reader(SST_HOME)", "Split the input/output pairs out into separate lists:", "X_str_train = train.sentence.values\ny_train = train.label.values\n\nX_str_dev = dev.sentence.values\ny_dev = dev.label.values", "In the next step, we featurize all of the examples. These steps are likely to be the slowest in these experiments:", "%time X_train = [bert_classifier_phi(text) for text in X_str_train]\n\n%time X_dev = [bert_classifier_phi(text) for text in X_str_dev]", "Now that all the examples are featurized, we can fit a model and evaluate it:", "model = TorchShallowNeuralClassifier(\n early_stopping=True,\n hidden_dim=300)\n\n%time _ = model.fit(X_train, y_train)\n\npreds = model.predict(X_dev)\n\nprint(classification_report(y_dev, preds, digits=3))", "A feed-forward experiment with the sst module\nIt is straightforward to conduct experiments like the above using sst.experiment, which will enable you to do a wider range of experiments without writing or copy-pasting a lot of code.", "def fit_shallow_network(X, y):\n mod = TorchShallowNeuralClassifier(\n hidden_dim=300,\n early_stopping=True)\n mod.fit(X, y)\n return mod\n\n%%time\n_ = sst.experiment(\n sst.train_reader(SST_HOME),\n bert_classifier_phi,\n fit_shallow_network,\n assess_dataframes=sst.dev_reader(SST_HOME),\n vectorize=False) # Pass in the BERT reps directly!", "An RNN experiment with the sst module\nWe can also use BERT representations as the input to an RNN. There is just one key change from how we used these models before:\n\n\nPreviously, we would feed in lists of tokens, and they would be converted to indices into a fixed embedding space. This presumes that all words have the same representation no matter what their context is. \n\n\nWith BERT, we skip the embedding entirely and just feed in lists of BERT vectors, which means that the same word can be represented in different ways.\n\n\nTorchRNNClassifier supports this via use_embedding=False. In turn, you needn't supply a vocabulary:", "def fit_rnn(X, y):\n mod = TorchRNNClassifier(\n vocab=[],\n early_stopping=True,\n use_embedding=False) # Pass in the BERT hidden states directly!\n mod.fit(X, y)\n return mod\n\n%%time\n_ = sst.experiment(\n sst.train_reader(SST_HOME),\n bert_phi,\n fit_rnn,\n assess_dataframes=sst.dev_reader(SST_HOME),\n vectorize=False) # Pass in the BERT hidden states directly!", "BERT fine-tuning with Hugging Face\nThe above experiments are quite successful – BERT gives us a reliable boost compared to other methods we've explored for the SST task. However, we might expect to do even better if we fine-tune the BERT parameters as part of fitting our SST classifier. To do that, we need to incorporate the Hugging Face BERT model into our classifier. This too is quite straightforward.\nHfBertClassifier\nThe most important step is to create an nn.Module subclass that has, for its parameters, both the BERT model and parameters for our own classifier. Here we define a very simple fine-tuning set-up in which some layers built on top of the output corresponding to [CLS] are used as the basis for the SST classifier:", "class HfBertClassifierModel(nn.Module):\n def __init__(self, n_classes, weights_name='bert-base-cased'):\n super().__init__()\n self.n_classes = n_classes\n self.weights_name = weights_name\n self.bert = BertModel.from_pretrained(self.weights_name)\n self.bert.train()\n self.hidden_dim = self.bert.embeddings.word_embeddings.embedding_dim\n # The only new parameters -- the classifier:\n self.classifier_layer = nn.Linear(\n self.hidden_dim, self.n_classes)\n\n def forward(self, indices, mask):\n reps = self.bert(\n indices, attention_mask=mask)\n return self.classifier_layer(reps.pooler_output)", "As you can see, self.bert does the heavy-lifting: it reads in all the pretrained BERT parameters, and I've specified self.bert.train() just to make sure that these parameters can be updated during our training process. \nIn forward, self.bert is used to process inputs, and then pooler_output is fed into self.classifier_layer. Hugging Face has already added a layer on top of the actual output for [CLS], so we can specify the model as\n$$\n\\begin{align}\n[h_{1}, \\ldots, h_{n}] &= \\text{BERT}([x_{1}, \\ldots, x_{n}]) \\\nh &= \\tanh(h_{1}W_{hh} + b_{h}) \\\ny &= \\textbf{softmax}(hW_{hy} + b_{y})\n\\end{align}$$\nfor a tokenized input sequence $[x_{1}, \\ldots, x_{n}]$. \nThe Hugging Face documentation somewhat amusingly says, of pooler_output,\n\nThis output is usually not a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence.\n\nwhich is entirely reasonable, but it will require more resources, so we'll do the simpler thing here.\nFor the training and prediction interface, we can subclass TorchShallowNeuralClassifier so that we don't have to write any of our own data-handling, training, or prediction code. The central changes are using HfBertClassifierModel in build_graph and processing the data with batch_encode_plus.", "class HfBertClassifier(TorchShallowNeuralClassifier):\n def __init__(self, weights_name, *args, **kwargs):\n self.weights_name = weights_name\n self.tokenizer = BertTokenizer.from_pretrained(self.weights_name)\n super().__init__(*args, **kwargs)\n self.params += ['weights_name']\n\n def build_graph(self):\n return HfBertClassifierModel(self.n_classes_, self.weights_name)\n\n def build_dataset(self, X, y=None):\n data = self.tokenizer.batch_encode_plus(\n X,\n max_length=None,\n add_special_tokens=True,\n padding='longest',\n return_attention_mask=True)\n indices = torch.tensor(data['input_ids'])\n mask = torch.tensor(data['attention_mask'])\n if y is None:\n dataset = torch.utils.data.TensorDataset(indices, mask)\n else:\n self.classes_ = sorted(set(y))\n self.n_classes_ = len(self.classes_)\n class2index = dict(zip(self.classes_, range(self.n_classes_)))\n y = [class2index[label] for label in y]\n y = torch.tensor(y)\n dataset = torch.utils.data.TensorDataset(indices, mask, y)\n return dataset", "HfBertClassifier experiment\nThat's it! Let's see how we do on the SST binary, root-only problem. Because fine-tuning is expensive, we'll conduct a modest hyperparameter search and run the model for just one epoch per setting evaluation, as we did when assessing NLI models.", "def bert_fine_tune_phi(text):\n return text\n\ndef fit_hf_bert_classifier_with_hyperparameter_search(X, y):\n basemod = HfBertClassifier(\n weights_name='bert-base-cased',\n batch_size=8, # Small batches to avoid memory overload.\n max_iter=1, # We'll search based on 1 iteration for efficiency.\n n_iter_no_change=5, # Early-stopping params are for the\n early_stopping=True) # final evaluation.\n\n param_grid = {\n 'gradient_accumulation_steps': [1, 4, 8],\n 'eta': [0.00005, 0.0001, 0.001],\n 'hidden_dim': [100, 200, 300]}\n\n bestmod = utils.fit_classifier_with_hyperparameter_search(\n X, y, basemod, cv=3, param_grid=param_grid)\n\n return bestmod\n\n%%time\nbert_classifier_xval = sst.experiment(\n sst.train_reader(SST_HOME),\n bert_fine_tune_phi,\n fit_hf_bert_classifier_with_hyperparameter_search,\n assess_dataframes=sst.dev_reader(SST_HOME),\n vectorize=False) # Pass in the BERT hidden state directly!", "And now on to the final test-set evaluation, using the best model from above:", "optimized_bert_classifier = bert_classifier_xval['model']\n\n# Remove the rest of the experiment results to clear out some memory:\ndel bert_classifier_xval\n\ndef fit_optimized_hf_bert_classifier(X, y):\n optimized_bert_classifier.max_iter = 1000\n optimized_bert_classifier.fit(X, y)\n return optimized_bert_classifier\n\ntest_df = sst.sentiment_reader(\n os.path.join(SST_HOME, \"sst3-test-labeled.csv\"))\n\n%%time\n_ = sst.experiment(\n sst.train_reader(SST_HOME),\n bert_fine_tune_phi,\n fit_optimized_hf_bert_classifier,\n assess_dataframes=test_df,\n vectorize=False) # Pass in the BERT hidden state directly!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
yy/dviz-course
m04-perception/lab.ipynb
mit
[ "W3 Lab: Perception\nIn this lab, we will learn basic usage of pandas library and then perform a small experiment to test the perception of length and area.", "import pandas as pd\nimport math\nimport matplotlib.pyplot as plt\n%matplotlib inline", "Vega datasets\nBefore going into the perception experiment, let's first talk about some handy datasets that you can play with. \nIt's nice to have clean datasets handy to practice data visualization. There is a nice small package called vega-datasets, from the altair project. \nYou can install the package by running\n$ pip install vega-datasets\n\nor \n$ pip3 install vega-datasets\n\nOnce you install the package, you can import and see the list of datasets:", "from vega_datasets import data\n\ndata.list_datasets()", "or you can work with only smaller, local datasets.", "from vega_datasets import local_data\nlocal_data.list_datasets()", "Ah, we have the anscombe data here! Let's see the description of the dataset.", "local_data.anscombe.description", "Anscombe's quartet dataset\nHow does the actual data look like? Very conveniently, calling the dataset returns a Pandas dataframe for you.", "df = local_data.anscombe()\ndf.head()", "Q1: can you draw a scatterplot of the dataset \"I\"? You can filter the dataframe based on the Series column and use plot function that you used for the Snow's map.", "# TODO: put your code here\n", "Some histograms with pandas\nLet's look at a slightly more complicated dataset.", "car_df = local_data.cars()\ncar_df.head()", "Pandas provides useful summary functions. It identifies numerical data columns and provides you with a table of summary statistics.", "car_df.describe()", "If you ask to draw a histogram, you get all of them. :)", "car_df.hist()", "Well this is too small. You can check out the documentation and change the size of the figure. \nQ2: by consulting the documentation, can you make the figure larger so that we can see all the labels clearly? And then make the layout 2 x 3 not 3 x 2, then change the number of bins to 20?", "# TODO: put your code here\n", "Your own psychophysics experiment!\nLet's do an experiment! The procedure is as follows:\n\nGenerate a random number between [1, 10];\nUse a horizontal bar to represent the number, i.e., the length of the bar is equal to the number;\nGuess the length of the bar by comparing it to two other bars with length 1 and 10 respectively;\nStore your guess (perceived length) and actual length to two separate lists;\nRepeat the above steps many times;\nHow does the perception of length differ from that of area?.\n\nFirst, let's define the length of a short and a long bar. We also create two empty lists to store perceived and actual length.", "import random\nimport time\nimport numpy as np\n\nl_short_bar = 1\nl_long_bar = 10\n\nperceived_length_list = []\nactual_length_list = []", "Perception of length\nLet's run the experiment.\nThe random module in Python provides various random number generators, and the random.uniform(a,b) function returns a floating point number in [a,b]. \nWe can plot horizontal bars using the pyplot.barh() function. Using this function, we can produce a bar graph that looks like this:", "mystery_length = random.uniform(1, 10) # generate a number between 1 and 10. this is the *actual* length.\n\nplt.barh(np.arange(3), [l_short_bar, mystery_length, l_long_bar], align='center')\nplt.yticks(np.arange(3), ('1', '?', '10'))\nplt.xticks([]) # no hint!", "Btw, np.arange is used to create a simple integer list [0, 1, 2].", "np.arange(3)", "Now let's define a function to perform the experiment once. When you run this function, it picks a random number between 1.0 and 10.0 and show the bar chart. Then it asks you to input your estimate of the length of the middle bar. It then saves that number to the perceived_length_list and the actual answer to the actual_length_list.", "def run_exp_once():\n mystery_length = random.uniform(1, 10) # generate a number between 1 and 10. \n\n plt.barh(np.arange(3), [l_short_bar, mystery_length, l_long_bar], height=0.5, align='center')\n plt.yticks(np.arange(3), ('1', '?', '10'))\n plt.xticks([]) # no hint!\n plt.show()\n \n try:\n perceived_length_list.append( float(input()) )\n except:\n print(\"This should only fail in workflow. If you are running this in browser, this won't fail.\")\n pass\n actual_length_list.append(mystery_length)\n\nrun_exp_once()", "Now, run the experiment many times to gather your data. Check the two lists to make sure that you have the proper dataset. The length of the two lists should be the same.", "# TODO: Run your experiment many times here\n", "Plotting the result\nNow we can draw the scatter plot of perceived and actual length. The matplotlib's scatter() function will do this. This is the backend of the pandas' scatterplot. Here is an example of how to use scatter:", "plt.scatter(x=[1,5,10], y=[1,10, 5])", "Q3: Now plot your result using the scatter() function. You should also use plt.title(), plt.xlabel(), and plt.ylabel() to label your axes and the plot itself.", "# TODO: put your code here\n", "After plotting, let's fit the relation between actual and perceived lengths using a polynomial function. We can easily do it using curve_fit(f, x, y) in Scipy, which is to fit $x$ and $y$ using the function f. In our case, $f = a*x^b +c$. For instance, we can check whether this works by creating a fake dataset that follows the exact form:", "from scipy.optimize import curve_fit\n\ndef func(x, a, b, c):\n return a * np.power(x, b) + c\n\nx = np.arange(20) # [0,1,2,3, ..., 19]\ny = np.power(x, 2) # [0,1,4,9, ... ]\n\npopt, pcov = curve_fit(func, x, y)\nprint('{:.2f} x^{:.2f} + {:.2f}'.format(*popt))", "In order to plot the function to check the relationship between the actual and perceived lenghts, you can use two variables x and y to plot the relationship where x equals to a series of continuous numbers. For example, if your x axis ranges from 1 to 9 then the variable x could be equal to np.linspace(1, 10, 50). The variable y will contain the equation that you get from popt. For example, if you get equation 1.00 x^2.00 + 0.00 then the variable y would be equal to 1.0 * x**2.0 + 0. \nAfter assigning x and y variables you will plot them in combination with the scatter plot of actual and perceived values to check if you get a linear relationship or not.\nQ4: Now fit your data! Do you see roughly linear relationship between the actual and the perceived lengths? It's ok if you don't!", "# TODO: your code here\n", "Perception of area\nSimilar to the above experiment, we now represent a random number as a circle, and the area of the circle is equal to the number.\nFirst, calculate the radius of a circle from its area and then plot using the Circle() function. plt.Circle((0,0), r) will plot a circle centered at (0,0) with radius r.", "n1 = 0.005\nn2 = 0.05\n\nradius1 = np.sqrt(n1/np.pi) # area = pi * r * r\nradius2 = np.sqrt(n2/np.pi)\nrandom_radius = np.sqrt(n1*random.uniform(1,10)/np.pi)\n\nplt.axis('equal')\nplt.axis('off')\ncirc1 = plt.Circle( (0,0), radius1, clip_on=False )\ncirc2 = plt.Circle( (4*radius2,0), radius2, clip_on=False )\nrand_circ = plt.Circle((2*radius2,0), random_radius, clip_on=False )\n\nplt.gca().add_artist(circ1)\nplt.gca().add_artist(circ2)\nplt.gca().add_artist(rand_circ)", "Let's have two lists for this experiment.", "perceived_area_list = []\nactual_area_list = []", "And define a function for the experiment.", "def run_area_exp_once(n1=0.005, n2=0.05): \n radius1 = np.sqrt(n1/np.pi) # area = pi * r * r\n radius2 = np.sqrt(n2/np.pi)\n \n mystery_number = random.uniform(1,10)\n random_radius = np.sqrt(n1*mystery_number/math.pi)\n\n plt.axis('equal')\n plt.axis('off')\n circ1 = plt.Circle( (0,0), radius1, clip_on=False )\n circ2 = plt.Circle( (4*radius2,0), radius2, clip_on=False )\n rand_circ = plt.Circle((2*radius2,0), random_radius, clip_on=False )\n plt.gca().add_artist(circ1)\n plt.gca().add_artist(circ2)\n plt.gca().add_artist(rand_circ) \n plt.show()\n \n perceived_area_list.append( float(input()) )\n actual_area_list.append(mystery_number)", "Q5: Now you can run the experiment many times, plot the result, and fit a power-law curve!", "# TODO: put your code here. You can use multiple cells. ", "What is your result? How are the exponents different from each other?" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
carthach/essentia
src/examples/tutorial/example_truepeakdetector.ipynb
agpl-3.0
[ "TruePeakDetector use example\nThis algorithm implements the “true-peak” level meter as descripted in the second annex of the ITU-R BS.1770-2[1] or the ITU-R BS.1770-4[2] (default).\nNote: the parameters 'blockDC' and 'emphatise' work only when 'version' is set to 2.\n\n References:\n [1] Series, B. S. (2011). Recommendation ITU-R BS.1770-2. Algorithms to\n measure audio programme loudness and true-peak audio level,\n https://www.itu.int/dms_pubrec/itu-r/rec/bs/R-REC-BS.1770-2-201103-S!!PDF-E.\n pdfe \n [2] Series, B. S. (2011). Recommendation ITU-R BS.1770-4. Algorithms\n to measure audio programme loudness and true-peak audio level,\n https://www.itu.int/dms_pubrec/itu-r/rec/bs/R-REC-BS.1770-4-201510-I!!PDF-E.\n pdf", "import essentia.standard as es\nimport numpy as np\nimport matplotlib\nmatplotlib.use('nbagg')\nimport matplotlib.pyplot as plt\nimport ipywidgets as wg\nfrom IPython.display import Audio \nfrom essentia import array as esarr\n\nplt.rcParams[\"figure.figsize\"] =(9, 5)", "The problem of true peak estimation\nThe following widget demonstrates two intersample detection techniques:\n- Signal upsampling. \n- parabolic interpolation. \nThe accuracy of both methods can be assessed in real-time by shifting the sampling points in a Sinc function and evaluating the error produced by both systems.", "# Parameters\n\nduration = 10 # s\nfs = 1 # hz \nk = 1. # amplitude\noversamplingFactor = 4 # factor of oversampling for the real signal\nnSamples = fs * duration\n\ntime = np.arange(-nSamples/2, nSamples/2,\n 2 ** -oversamplingFactor, dtype='float')\nsamplingPoints = time[::2 ** oversamplingFactor]\n\ndef shifted_sinc(x, k, offset):\n xShifted = x - offset\n y = np.zeros(len(xShifted))\n for idx, i in enumerate(xShifted):\n if not i: \n y[idx] = k\n else:\n y[idx] = (k * np.sin(np.pi * i) / (np.pi * i))\n return y\n\ndef resampleStrategy(y, fs, quality=0, oversampling=4):\n yResample = es.Resample(inputSampleRate=fs,\n outputSampleRate=fs*oversampling, \n quality=quality)(y.astype(np.float32))\n \n tResample = np.arange(np.min(samplingPoints), np.max(samplingPoints) \n + 1, 1. / (fs * oversampling))\n tResample = tResample[:len(yResample)] \n \n # getting the stimated peaks\n yResMax = np.max(yResample)\n tResMax = tResample[np.argmax(yResample)]\n \n return yResample, tResample, yResMax, tResMax\n\ndef parabolicInterpolation(y, threshold=.6):\n # todo plot the parabol maybe\n positions, amplitudes = es.PeakDetection(threshold=threshold)\\\n (y.astype(np.float32))\n \n pos = int(positions[0] * (len(y-1)))\n a = y[pos - 1]\n b = y[pos]\n c = y[pos + 1]\n\n tIntMax = samplingPoints[pos] + (a - c) / (2 * (a - 2 * b + c))\n yIntMax = b - ((a - b) ** 2) / (8 * (a - 2 * b + c))\n return tIntMax, yIntMax\n\ndef process():\n \n ## Processing\n \n # \"real\" sinc\n yReal = shifted_sinc(time, k, offset.value)\n \n # sampled sinc\n y = shifted_sinc(samplingPoints, k, offset.value)\n \n \n # Resample strategy\n yResample, tResample, yResMax, tResMax = \\\n resampleStrategy(y, fs, quality=0, oversampling=4)\n \n # Parabolic Interpolation extrategy\n tIntMax, yIntMax = parabolicInterpolation(y)\n \n \n \n ## Plotting\n ax.clear()\n plt.title('Interpeak detection estrategies')\n ax.grid(True)\n ax.grid(xdata=samplingPoints)\n \n \n ax.plot(time, yReal, label='real signal')\n yRealMax = np.max(yReal)\n \n sampledLabel = 'sampled signal. Error:{:.3f}'\\\n .format(np.abs(np.max(y) - yRealMax))\n ax.plot(samplingPoints, y, label=sampledLabel, ls='-.',\n color='r', marker='x', markersize=6, alpha=.7)\n\n ax.plot(tResample, yResample, ls='-.',\n color='y', marker='x', alpha=.7)\n\n resMaxLabel = 'Resample Peak. Error:{:.3f}'\\\n .format(np.abs(yResMax - yRealMax))\n ax.plot(tResMax, yResMax, label= resMaxLabel, \n color='y', marker = 'x', markersize=12)\n\n intMaxLabel = 'Interpolation Peak. Error:{:.3f}'\\\n .format(np.abs(yIntMax - yRealMax))\n ax.plot(tIntMax, yIntMax, label= intMaxLabel, \n marker = 'x', markersize=12)\n \n \n fig.legend()\n fig.show()\n\n# matplotlib.use('TkAgg')\noffset = wg.FloatSlider()\noffset.max = 1\noffset.min = -1\noffset.step = .1\ndisplay(offset)\nfig, ax = plt.subplots()\nprocess()\n\ndef on_value_change(change):\n process()\n \noffset.observe(on_value_change, names='value')", "As it can be seen from the widget, the oversampling strategy generates a smaller error in most of the cases.\nThe ITU-R BS.1770 approach\nThe ITU-R BS.1770 recommentation proposess the following signal chain based on the oversampling strategy:\n -12.04dB --&gt; x4 oversample --&gt; LowPass --&gt; abs() --&gt; 20 * log10() --&gt; +12.04dB\n\nIn our implementation, the gain control is suppressed from the chain as in not required when working with float point values, and the result is returned in natural units as it can be converted to dB as a postprocessing step. Here we can see an example.", "fs = 44100.\neps = np.finfo(np.float32).eps\naudio_dir = '../../audio/'\naudio = es.MonoLoader(filename='{}/{}'.format(audio_dir,\n 'recorded/distorted.wav'),\n sampleRate=fs)()\n\ntimes = np.linspace(0, len(audio) / fs, len(audio))\n\npeakLocations, output = es.TruePeakDetector(version=2)(audio)\n\noversampledtimes = np.linspace(0, len(output) / (fs*4), len(output))\n\nrandom_indexes = [1, 300, 1000, 3000]\n\nfigu, axes = plt.subplots(len(random_indexes))\nplt.subplots_adjust(hspace=.9)\nfor idx, ridx in enumerate(random_indexes):\n l0 = axes[idx].axhline(0, color='r', alpha=.7, ls = '--')\n l1 = axes[idx].plot(times, 20 * np.log10(np.abs(audio + eps)))\n l2 = axes[idx].plot(oversampledtimes, 20 * np.log10(output + eps), alpha=.8)\n \n axes[idx].set_xlim([peakLocations[ridx] / fs - .0002, peakLocations[ridx] / fs + .0002])\n axes[idx].set_ylim([-.15, 0.15])\n axes[idx].set_title('Clipping peak located at {:.2f}s'.format(peakLocations[ridx] / (fs*4)))\n axes[idx].set_ylabel('dB')\n \nfigu.legend([l0, l1[-1], l2[-1]], ['Dynamic range limit', 'Original signal', 'Resampled signal'])\nplt.show()", "The resampled signal is closer to the analog signal once it goes through the DAC. The part of this signal peaking above 1 may produce additional distortion if the analog amplifier doesn't have enough headroom to handle this additional amplitude. As this type of distortion depends entirely on the design of the playback hardware, it is always recommended to produce music so the true peak positions never overcome the dynamic range [-1, 1].\nThe parameters\nthis is an explanation of the most relevant parameters of the algorithm\n\n\noversamplingFactor. How many times to resample the signal\n\n\nversion. Version of the recommendation to use. Read the references to understand the differences\n\n\nblockDC and emphatise. Optional post-processing on the 2nd version of the recommendation. blockDC is a very low-frequency high pass filter and emphatise is a high-shelf filter." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
0.24/_downloads/bdc8ac519d8f54d70a73a5e0de598566/50_background_freesurfer_mne.ipynb
bsd-3-clause
[ "%matplotlib inline", "How MNE uses FreeSurfer's outputs\nThis tutorial explains how MRI coordinate frames are handled in MNE-Python,\nand how MNE-Python integrates with FreeSurfer for handling MRI data and\nsource space data in general.\nAs usual we'll start by importing the necessary packages; for this tutorial\nthat includes :mod:nibabel to handle loading the MRI images (MNE-Python also\nuses :mod:nibabel under the hood). We'll also use a special :mod:Matplotlib\n&lt;matplotlib.patheffects&gt; function for adding outlines to text, so that text is\nreadable on top of an MRI image.", "import os\n\nimport numpy as np\nimport nibabel\nimport matplotlib.pyplot as plt\nimport matplotlib.patheffects as path_effects\n\nimport mne\nfrom mne.transforms import apply_trans\nfrom mne.io.constants import FIFF", "MRI coordinate frames\nLet's start out by looking at the sample subject MRI. Following standard\nFreeSurfer convention, we look at :file:T1.mgz, which gets created from the\noriginal MRI :file:sample/mri/orig/001.mgz when you run the FreeSurfer\ncommand recon-all &lt;https://surfer.nmr.mgh.harvard.edu/fswiki/recon-all&gt;_.\nHere we use :mod:nibabel to load the T1 image, and the resulting object's\n:meth:~nibabel.spatialimages.SpatialImage.orthoview method to view it.", "data_path = mne.datasets.sample.data_path()\nsubjects_dir = os.path.join(data_path, 'subjects')\nsubject = 'sample'\nt1_fname = os.path.join(subjects_dir, subject, 'mri', 'T1.mgz')\nt1 = nibabel.load(t1_fname)\nt1.orthoview()", "Notice that the axes in the\n:meth:~nibabel.spatialimages.SpatialImage.orthoview figure are labeled\nL-R, S-I, and P-A. These reflect the standard RAS (right-anterior-superior)\ncoordinate system that is widely used in MRI imaging. If you are unfamiliar\nwith RAS coordinates, see the excellent nibabel tutorial\n:doc:nibabel:coordinate_systems.\nNibabel already takes care of some coordinate frame transformations under the\nhood, so let's do it manually so we understand what is happening. First let's\nget our data as a 3D array and note that it's already a standard size:", "data = np.asarray(t1.dataobj)\nprint(data.shape)", "These data are voxel intensity values. Here they are unsigned integers in the\nrange 0-255, though in general they can be floating point values. A value\ndata[i, j, k] at a given index triplet (i, j, k) corresponds to some\nreal-world physical location (x, y, z) in space. To get its physical\nlocation, first we have to choose what coordinate frame we're going to use.\nFor example, we could choose a geographical coordinate\nframe, with origin is at the center of the earth, Z axis through the north\npole, X axis through the prime meridian (zero degrees longitude), and Y axis\northogonal to these forming a right-handed coordinate system. This would not\nbe a very useful choice for defining the physical locations of the voxels\nduring the MRI acquisition for analysis, but you could nonetheless figure out\nthe transformation that related the (i, j, k) to this coordinate frame.\nInstead, each scanner defines a more practical, native coordinate system that\nit uses during acquisition, usually related to the physical orientation of\nthe scanner itself and/or the subject within it. During acquisition the\nrelationship between the voxel indices (i, j, k) and the physical\nlocation (x, y, z) in the scanner's native coordinate frame is saved in\nthe image's affine transformation.\n.. sidebar:: Under the hood\n``mne.transforms.apply_trans`` effectively does a matrix multiplication\n(i.e., :func:`numpy.dot`), with a little extra work to handle the shape\nmismatch (the affine has shape ``(4, 4)`` because it includes a\n*translation*, which is applied separately).\n\nWe can use :mod:nibabel to examine this transformation, keeping in mind\nthat it processes everything in units of millimeters, unlike MNE where things\nare always in SI units (meters).\nThis allows us to take an arbitrary voxel or slice of data and know where it\nis in the scanner's native physical space (x, y, z) (in mm) by applying\nthe affine transformation to the voxel coordinates.", "print(t1.affine)\nvox = np.array([122, 119, 102])\nxyz_ras = apply_trans(t1.affine, vox)\nprint('Our voxel has real-world coordinates {}, {}, {} (mm)'\n .format(*np.round(xyz_ras, 3)))", "If you have a point (x, y, z) in scanner-native RAS space and you want\nthe corresponding voxel number, you can get it using the inverse of the\naffine. This involves some rounding, so it's possible to end up off by one\nvoxel if you're not careful:", "ras_coords_mm = np.array([1, -17, -18])\ninv_affine = np.linalg.inv(t1.affine)\ni_, j_, k_ = np.round(apply_trans(inv_affine, ras_coords_mm)).astype(int)\nprint('Our real-world coordinates correspond to voxel ({}, {}, {})'\n .format(i_, j_, k_))", "Let's write a short function to visualize where our voxel lies in an\nimage, and annotate it in RAS space (rounded to the nearest millimeter):", "def imshow_mri(data, img, vox, xyz, suptitle):\n \"\"\"Show an MRI slice with a voxel annotated.\"\"\"\n i, j, k = vox\n fig, ax = plt.subplots(1, figsize=(6, 6))\n codes = nibabel.orientations.aff2axcodes(img.affine)\n # Figure out the title based on the code of this axis\n ori_slice = dict(P='Coronal', A='Coronal',\n I='Axial', S='Axial',\n L='Sagittal', R='Saggital')\n ori_names = dict(P='posterior', A='anterior',\n I='inferior', S='superior',\n L='left', R='right')\n title = ori_slice[codes[0]]\n ax.imshow(data[i], vmin=10, vmax=120, cmap='gray', origin='lower')\n ax.axvline(k, color='y')\n ax.axhline(j, color='y')\n for kind, coords in xyz.items():\n annotation = ('{}: {}, {}, {} mm'\n .format(kind, *np.round(coords).astype(int)))\n text = ax.text(k, j, annotation, va='baseline', ha='right',\n color=(1, 1, 0.7))\n text.set_path_effects([\n path_effects.Stroke(linewidth=2, foreground='black'),\n path_effects.Normal()])\n # reorient view so that RAS is always rightward and upward\n x_order = -1 if codes[2] in 'LIP' else 1\n y_order = -1 if codes[1] in 'LIP' else 1\n ax.set(xlim=[0, data.shape[2] - 1][::x_order],\n ylim=[0, data.shape[1] - 1][::y_order],\n xlabel=f'k ({ori_names[codes[2]]}+)',\n ylabel=f'j ({ori_names[codes[1]]}+)',\n title=f'{title} view: i={i} ({ori_names[codes[0]]}+)')\n fig.suptitle(suptitle)\n fig.subplots_adjust(0.1, 0.1, 0.95, 0.85)\n return fig\n\n\nimshow_mri(data, t1, vox, {'Scanner RAS': xyz_ras}, 'MRI slice')", "Notice that the axis scales (i, j, and k) are still in voxels\n(ranging from 0-255); it's only the annotation text that we've translated\ninto real-world RAS in millimeters.\n\"MRI coordinates\" in MNE-Python: FreeSurfer surface RAS\nWhile :mod:nibabel uses scanner RAS (x, y, z) coordinates,\nFreeSurfer uses a slightly different coordinate frame: MRI surface RAS.\nThe transform from voxels to the FreeSurfer MRI surface RAS coordinate frame\nis known in the FreeSurfer documentation\n&lt;https://surfer.nmr.mgh.harvard.edu/fswiki/CoordinateSystems&gt;_ as Torig,\nand in nibabel as :meth:vox2ras_tkr\n&lt;nibabel.freesurfer.mghformat.MGHHeader.get_vox2ras_tkr&gt;. This\ntransformation sets the center of its coordinate frame in the middle of the\nconformed volume dimensions (N / 2.) with the axes oriented along the\naxes of the volume itself. For more information, see\ncoordinate_systems.\n<div class=\"alert alert-info\"><h4>Note</h4><p>In general, you should assume that the MRI coordinate system for\n a given subject is specific to that subject, i.e., it is not the\n same coordinate MRI coordinate system that is used for any other\n FreeSurfer subject. Even though during processing FreeSurfer will\n align each subject's MRI to ``fsaverage`` to do reconstruction,\n all data (surfaces, MRIs, etc.) get stored in the coordinate frame\n specific to that subject. This is why it's important for group\n analyses to transform data to a common coordinate frame for example\n by `surface <ex-morph-surface>` or\n `volumetric <ex-morph-volume>` morphing, or even by just\n applying `mni-affine-transformation` to points.</p></div>\n\nSince MNE-Python uses FreeSurfer extensively for surface computations (e.g.,\nwhite matter, inner/outer skull meshes), internally MNE-Python uses the\nFreeurfer surface RAS coordinate system (not the :mod:nibabel scanner RAS\nsystem) for as many computations as possible, such as all source space\nand BEM mesh vertex definitions.\nWhenever you see \"MRI coordinates\" or \"MRI coords\" in MNE-Python's\ndocumentation, you should assume that we are talking about the\n\"FreeSurfer MRI surface RAS\" coordinate frame!\nWe can do similar computations as before to convert the given voxel indices\ninto FreeSurfer MRI coordinates (i.e., what we call \"MRI coordinates\" or\n\"surface RAS\" everywhere else in MNE), just like we did above to convert\nvoxel indices to scanner RAS:", "Torig = t1.header.get_vox2ras_tkr()\nprint(t1.affine)\nprint(Torig)\nxyz_mri = apply_trans(Torig, vox)\nimshow_mri(data, t1, vox, dict(MRI=xyz_mri), 'MRI slice')", "Knowing these relationships and being mindful about transformations, we\ncan get from a point in any given space to any other space. Let's start out\nby plotting the Nasion on a saggital MRI slice:", "fiducials = mne.coreg.get_mni_fiducials(subject, subjects_dir=subjects_dir)\nnasion_mri = [d for d in fiducials if d['ident'] == FIFF.FIFFV_POINT_NASION][0]\nprint(nasion_mri) # note it's in Freesurfer MRI coords", "When we print the nasion, it displays as a DigPoint and shows its\ncoordinates in millimeters, but beware that the underlying data is\nactually stored in meters &lt;units&gt;,\nso before transforming and plotting we'll convert to millimeters:", "nasion_mri = nasion_mri['r'] * 1000 # meters → millimeters\nnasion_vox = np.round(\n apply_trans(np.linalg.inv(Torig), nasion_mri)).astype(int)\nimshow_mri(data, t1, nasion_vox, dict(MRI=nasion_mri),\n 'Nasion estimated from MRI transform')", "We can also take the digitization point from the MEG data, which is in the\n\"head\" coordinate frame.\nLet's look at the nasion in the head coordinate frame:", "info = mne.io.read_info(\n os.path.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif'))\nnasion_head = [d for d in info['dig'] if\n d['kind'] == FIFF.FIFFV_POINT_CARDINAL and\n d['ident'] == FIFF.FIFFV_POINT_NASION][0]\nprint(nasion_head) # note it's in \"head\" coordinates", ".. sidebar:: Head coordinate frame\n The head coordinate frame in MNE is the \"Neuromag\" head coordinate\n frame. The origin is given by the intersection between a line connecting\n the LPA and RPA and the line orthogonal to it that runs through the\n nasion. It is also in RAS orientation, meaning that +X runs through\n the RPA, +Y goes through the nasion, and +Z is orthogonal to these\n pointing upward. See `coordinate_systems` for more information.\n\nNotice that in \"head\" coordinate frame the nasion has values of 0 for the\nx and z directions (which makes sense given that the nasion is used\nto define the y axis in that system).\nTo convert from head coordinate frame to voxels, we first apply the head →\nMRI (surface RAS) transform\nfrom a :file:trans file (typically created with the MNE-Python\ncoregistration GUI), then convert meters → millimeters, and finally apply the\ninverse of Torig to get to voxels.\nUnder the hood, functions like :func:mne.setup_source_space,\n:func:mne.setup_volume_source_space, and :func:mne.compute_source_morph\nmake extensive use of these coordinate frames.", "trans = mne.read_trans(\n os.path.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-trans.fif'))\n\n# first we transform from head to MRI, and *then* convert to millimeters\nnasion_dig_mri = apply_trans(trans, nasion_head['r']) * 1000\n\n# ...then we can use Torig to convert MRI to voxels:\nnasion_dig_vox = np.round(\n apply_trans(np.linalg.inv(Torig), nasion_dig_mri)).astype(int)\nimshow_mri(data, t1, nasion_dig_vox, dict(MRI=nasion_dig_mri),\n 'Nasion transformed from digitization')", "Using FreeSurfer's surface reconstructions\nAn important part of what FreeSurfer does is provide cortical surface\nreconstructions. For example, let's load and view the white surface\nof the brain. This is a 3D mesh defined by a set of vertices (conventionally\ncalled rr) with shape (n_vertices, 3) and a set of triangles\n(tris) with shape (n_tris, 3) defining which vertices in rr form\neach triangular facet of the mesh.", "fname = os.path.join(subjects_dir, subject, 'surf', 'rh.white')\nrr_mm, tris = mne.read_surface(fname)\nprint(f'rr_mm.shape == {rr_mm.shape}')\nprint(f'tris.shape == {tris.shape}')\nprint(f'rr_mm.max() = {rr_mm.max()}') # just to show that we are in mm", "Let's actually plot it:", "renderer = mne.viz.backends.renderer.create_3d_figure(\n size=(600, 600), bgcolor='w', scene=False)\ngray = (0.5, 0.5, 0.5)\nrenderer.mesh(*rr_mm.T, triangles=tris, color=gray)\nview_kwargs = dict(elevation=90, azimuth=0)\nmne.viz.set_3d_view(\n figure=renderer.figure, distance=350, focalpoint=(0., 0., 40.),\n **view_kwargs)\nrenderer.show()", "We can also plot the mesh on top of an MRI slice. The mesh surfaces are\ndefined in millimeters in the MRI (FreeSurfer surface RAS) coordinate frame,\nso we can convert them to voxels by applying the inverse of the Torig\ntransform:", "rr_vox = apply_trans(np.linalg.inv(Torig), rr_mm)\nfig = imshow_mri(data, t1, vox, {'Scanner RAS': xyz_ras}, 'MRI slice')\n# Based on how imshow_mri works, the \"X\" here is the last dim of the MRI vol,\n# the \"Y\" is the middle dim, and the \"Z\" is the first dim, so now that our\n# points are in the correct coordinate frame, we need to ask matplotlib to\n# do a tricontour slice like:\nfig.axes[0].tricontour(rr_vox[:, 2], rr_vox[:, 1], tris, rr_vox[:, 0],\n levels=[vox[0]], colors='r', linewidths=1.0,\n zorder=1)", "This is the method used by :func:mne.viz.plot_bem to show the BEM surfaces.\nCortical alignment (spherical)\nA critical function provided by FreeSurfer is spherical surface alignment\nof cortical surfaces, maximizing sulcal-gyral alignment. FreeSurfer first\nexpands the cortical surface to a sphere, then aligns it optimally with\nfsaverage. Because the vertex ordering is preserved when expanding to a\nsphere, a given vertex in the source (sample) mesh can be mapped easily\nto the same location in the destination (fsaverage) mesh, and vice-versa.", "renderer_kwargs = dict(bgcolor='w', smooth_shading=False)\nrenderer = mne.viz.backends.renderer.create_3d_figure(\n size=(800, 400), scene=False, **renderer_kwargs)\ncurvs = [\n (mne.surface.read_curvature(os.path.join(\n subjects_dir, subj, 'surf', 'rh.curv'),\n binary=False) > 0).astype(float)\n for subj in ('sample', 'fsaverage') for _ in range(2)]\nfnames = [os.path.join(subjects_dir, subj, 'surf', surf)\n for subj in ('sample', 'fsaverage')\n for surf in ('rh.white', 'rh.sphere')]\ny_shifts = [-450, -150, 450, 150]\nz_shifts = [-40, 0, -30, 0]\nfor name, y_shift, z_shift, curv in zip(fnames, y_shifts, z_shifts, curvs):\n this_rr, this_tri = mne.read_surface(name)\n this_rr += [0, y_shift, z_shift]\n renderer.mesh(*this_rr.T, triangles=this_tri, color=None, scalars=curv,\n colormap='copper_r', vmin=-0.2, vmax=1.2)\nzero = [0., 0., 0.]\nwidth = 50.\ny = np.sort(y_shifts)\ny = (y[1:] + y[:-1]) / 2. - width / 2.\nrenderer.quiver3d(zero, y, zero,\n zero, [1] * 3, zero, 'k', width, 'arrow')\nview_kwargs['focalpoint'] = (0., 0., 0.)\nmne.viz.set_3d_view(figure=renderer.figure, distance=1000, **view_kwargs)\nrenderer.show()", "Let's look a bit more closely at the spherical alignment by overlaying the\ntwo spherical meshes as wireframes and zooming way in (the purple points are\nseparated by about 1 mm):", "cyan = '#66CCEE'\npurple = '#AA3377'\nrenderer = mne.viz.backends.renderer.create_3d_figure(\n size=(800, 800), scene=False, **renderer_kwargs)\nfnames = [os.path.join(subjects_dir, subj, 'surf', 'rh.sphere')\n for subj in ('sample', 'fsaverage')]\ncolors = [cyan, purple]\nfor name, color in zip(fnames, colors):\n this_rr, this_tri = mne.read_surface(name)\n renderer.mesh(*this_rr.T, triangles=this_tri, color=color,\n representation='wireframe')\nmne.viz.set_3d_view(figure=renderer.figure, distance=20, **view_kwargs)\nrenderer.show()", "You can see that the fsaverage (purple) mesh is uniformly spaced, and the\nmesh for subject \"sample\" (in cyan) has been deformed along the spherical\nsurface by\nFreeSurfer. This deformation is designed to optimize the sulcal-gyral\nalignment.\nSurface decimation\nThese surfaces have a lot of vertices, and in general we only need to use\na subset of these vertices for creating source spaces. A uniform sampling can\neasily be achieved by subsampling in the spherical space. To do this, we\nuse a recursively subdivided icosahedron or octahedron. For example, let's\nload a standard oct-6 source space, and at the same zoom level as before\nvisualize how it subsampled the dense mesh:", "src = mne.read_source_spaces(os.path.join(subjects_dir, 'sample', 'bem',\n 'sample-oct-6-src.fif'))\nprint(src)\n\nblue = '#4477AA'\nrenderer = mne.viz.backends.renderer.create_3d_figure(\n size=(800, 800), scene=False, **renderer_kwargs)\nrr_sph, _ = mne.read_surface(fnames[0])\nfor tris, color in [(src[1]['tris'], cyan), (src[1]['use_tris'], blue)]:\n renderer.mesh(*rr_sph.T, triangles=tris, color=color,\n representation='wireframe')\nmne.viz.set_3d_view(figure=renderer.figure, distance=20, **view_kwargs)\nrenderer.show()", "We can also then look at how these two meshes compare by plotting the\noriginal, high-density mesh as well as our decimated mesh white surfaces.", "renderer = mne.viz.backends.renderer.create_3d_figure(\n size=(800, 400), scene=False, **renderer_kwargs)\ny_shifts = [-125, 125]\ntris = [src[1]['tris'], src[1]['use_tris']]\nfor y_shift, tris in zip(y_shifts, tris):\n this_rr = src[1]['rr'] * 1000. + [0, y_shift, -40]\n renderer.mesh(*this_rr.T, triangles=tris, color=None, scalars=curvs[0],\n colormap='copper_r', vmin=-0.2, vmax=1.2)\nrenderer.quiver3d([0], [-width / 2.], [0], [0], [1], [0], 'k', width, 'arrow')\nmne.viz.set_3d_view(figure=renderer.figure, distance=400, **view_kwargs)\nrenderer.show()", "<div class=\"alert alert-danger\"><h4>Warning</h4><p>Some source space vertices can be removed during forward computation.\n See `tut-forward` for more information.</p></div>\n\nFreeSurfer's MNI affine transformation\nIn addition to surface-based approaches, FreeSurfer also provides a simple\naffine coregistration of each subject's data to the fsaverage subject.\nLet's pick a point for sample and plot it on the brain:", "brain = mne.viz.Brain('sample', 'lh', 'white', subjects_dir=subjects_dir,\n background='w')\nxyz = np.array([[-55, -10, 35]])\nbrain.add_foci(xyz, hemi='lh', color='k')\nbrain.show_view('lat')", "We can take this point and transform it to MNI space:", "mri_mni_trans = mne.read_talxfm(subject, subjects_dir)\nprint(mri_mni_trans)\nxyz_mni = apply_trans(mri_mni_trans, xyz / 1000.) * 1000.\nprint(np.round(xyz_mni, 1))", "And because fsaverage is special in that it's already in MNI space\n(its MRI-to-MNI transform is identity), it should land in the equivalent\nanatomical location:", "brain = mne.viz.Brain('fsaverage', 'lh', 'white', subjects_dir=subjects_dir,\n background='w')\nbrain.add_foci(xyz_mni, hemi='lh', color='k')\nbrain.show_view('lat')" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
xdze2/thermique_appart
theo_wall_inertia.ipynb
mit
[ "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline", "One-dimensional homogeneous problems in a semiinfinite medium\npage 54, \"HEAT CONDUCTION\", M. Özisik, 1980\n\nLa température extérieure est définie à zéro.\nL'équation générale pour la température est :", "from scipy.integrate import quad\n\nrhoCp = 1400e3 # densité*Capacité thermique, J/m3/K\nk = 1.75 # conductivité, W/m/K\n\nalpha = k/rhoCp # diffusivité, s.m-2\n\n\nconditionlimite = 'T_fixe' # 'T_fixe', 'adia', '3thd'\n\nfun_F = lambda x: 100*np.exp( -x/0.3 )\n\nI_right = lambda beta: quad(fun_F, 0, np.inf, weight='sin', wvar=beta )[0]\n\ninv_N = 2.0 / np.pi\nfun_exp_t = lambda beta, t: np.exp( -alpha * beta**2 * t )\n\ndI = lambda beta, t : fun_exp_t( beta, t ) * inv_N * I_right( beta )\n\nTemperature = lambda x, t: quad( dI, 0, np.inf, weight='sin', wvar=x, args=(t,) )[0]\n\nTemperature( 1.0, 1000.0 )\n\nx_span = np.linspace( 0 , 1, 15 )\nt_span = np.linspace( 0, 3600, 3 )\n\nfor t in t_span:\n Tx = []\n for x in x_span:\n Tx.append( Temperature(x, t) )\n plt.plot( Tx )\n\nI_right( 10 )", "Avec la transformé de Laplace\npage 275\nAvec Odeint", "L = 1 # m\nN = 20\n\n\nX = np.linspace( 0, L, N )\n\ndx = L/(N-1)\nT = np.zeros_like( X )\n\nTzero = np.zeros_like( X )\n#Tzero = 1+Tzero\n\ndef flux_in( T, t ):\n \"\"\" Flux entrant\n T: Température de surface, °C\n t: temps, sec\n \"\"\"\n w = 2*np.pi/( 60*60*24 )\n F = 10*( 12*np.cos( w*t ) - T )\n return F\n\ndef Laplacien( U ):\n \"\"\" Calcul le laplacien du vecteur U\n avec des conditions aux limites adiabatiques \n \"\"\"\n d2Udx2 = np.zeros_like( U )\n \n U_i = U[1:-1]\n U_im1 = U[0:-2]\n U_ip1 = U[2:]\n\n d2Udx2[1:-1] = ( U_ip1 + U_im1 -2*U_i )/dx**2\n \n d2Udx2[0] = -(U[0]-U[1])/dx\n d2Udx2[-1] = (U[-2] - U[-1])/dx\n \n return d2Udx2\n\nfrom scipy.integrate import odeint\n\ndef get_dTdt( T, t ):\n dTdt = np.zeros_like( T )\n \n dTdt = alpha*Laplacien( T )\n\n dTdt[0] += flux_in( T[0], t )\n\n return dTdt\n\nt_span = np.linspace(0, 24*60*60*2, 16)\n\nr = odeint(get_dTdt, Tzero, t_span)\n\nplt.plot( X, r.T );\n\nr.shape\n\nplt.plot( r.sum(axis=1) )\n\nr" ]
[ "code", "markdown", "code", "markdown", "code" ]
rnwatanabe/projectPR
ExampleNotebooks/MNPoolWithRenshawCells-Copy1.ipynb
gpl-3.0
[ "This notebook is a simulation of 5000 ms of 400 independent descending commands following a gamma distribution with mean of 12 ms and order 10 and the Soleus muscle (800 motoneurons). Each descending command connects to approximately 30 % of the motor units. Also, a pool of 350 Renshaw cells is present.", "import sys\nsys.path.insert(0, '..')\nimport time\nimport matplotlib.pyplot as plt\n%matplotlib inline \nfrom IPython.display import set_matplotlib_formats\nset_matplotlib_formats('pdf', 'png')\nplt.rcParams['savefig.dpi'] = 75\n\nplt.rcParams['figure.autolayout'] = False\nplt.rcParams['figure.figsize'] = 10, 6\nplt.rcParams['axes.labelsize'] = 18\nplt.rcParams['axes.titlesize'] = 20\nplt.rcParams['font.size'] = 16\nplt.rcParams['lines.linewidth'] = 2.0\nplt.rcParams['lines.markersize'] = 8\nplt.rcParams['legend.fontsize'] = 14\n\nplt.rcParams['text.usetex'] = True\nplt.rcParams['font.family'] = \"serif\"\nplt.rcParams['font.serif'] = \"cm\"\nplt.rcParams['text.latex.preamble'] = \"\\usepackage{subdepth}, \\usepackage{type1cm}\"\n\nimport numpy as np\n\nfrom Configuration import Configuration\nfrom MotorUnitPoolNoChannel import MotorUnitPoolNoChannel\nfrom InterneuronPoolNoChannel import InterneuronPoolNoChannel\nfrom NeuralTract import NeuralTract\nfrom SynapsesFactory import SynapsesFactory\n\nconf = Configuration('confMNPoolWithRenshawCells.rmto')\nconf.simDuration_ms = 5000 # Here I change simulation duration without changing the Configuration file.\n\n# Time vector for the simulation\nt = np.arange(0.0, conf.simDuration_ms, conf.timeStep_ms)\n\nmembPotential = np.zeros_like(t, dtype = 'd')\n\npools = dict()\npools[0] = MotorUnitPoolNoChannel(conf, 'SOL')\npools[1] = NeuralTract(conf, 'CMExt')\npools[2] = InterneuronPoolNoChannel(conf, 'RC', 'ext')\n\nSyn = SynapsesFactory(conf, pools)\nGammaOrder = 10\nFR = 1000/12.0\n\ntic = time.time()\nfor i in xrange(0, len(t)-1):\n pools[1].atualizePool(t[i], FR, GammaOrder) # NeuralTract\n pools[0].atualizeMotorUnitPool(t[i]) # MN pool\n pools[3].atualizePool(t[i]) # RC synaptic Noise\n pools[2].atualizeInterneuronPool(t[i]) # RC pool\ntoc = time.time()\nprint str(toc - tic) + ' seconds'\n\npools[0].listSpikes()\npools[1].listSpikes()\npools[2].listSpikes()", "The spike times of all descending commands along the 5000 ms of simulation is shown in Fig. \\ref{fig:spikesDescRenshaw}.", "plt.figure()\nplt.plot(pools[1].poolTerminalSpikes[:, 0],\n pools[1].poolTerminalSpikes[:, 1]+1, '.')\nplt.xlabel('t (ms)')\nplt.ylabel('Descending Command index')", "The spike times of the MNs along the 5000 ms of simulation is shown in Fig. \\ref{fig:spikesMNRenshaw}.", "plt.figure()\nplt.plot(pools[0].poolTerminalSpikes[:, 0],\n pools[0].poolTerminalSpikes[:, 1]+1, '.')\nplt.xlabel('t (ms)')\nplt.ylabel('Motor Unit index')", "The spike times of the Renshaw cells along the 5000 ms of simulation is shown in Fig. \\ref{fig:spikesRenshawRenshaw}.", "plt.figure()\nplt.plot(pools[2].poolSomaSpikes[:, 0],\n pools[2].poolSomaSpikes[:, 1]+1, '.')\nplt.xlabel('t (ms)')\nplt.ylabel('Renshaw cell index')", "The muscle force during the simulation \\ref{fig:forceRenshaw}.", "plt.figure()\nplt.plot(t, pools[0].Muscle.force, '-')\nplt.xlabel('t (ms)')\nplt.ylabel('Muscle force (N)')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
gwtsa/gwtsa
examples/notebooks/4_menyanthes_file.ipynb
mit
[ "<figure>\n <IMG SRC=\"https://raw.githubusercontent.com/pastas/pastas/master/doc/_static/Art_logo.jpg\" WIDTH=250 ALIGN=\"right\">\n</figure>\n\nMenyanthes File\nDeveloped by Ruben Caljé\nMenyanthes is timeseries analysis software used by many people in the Netherlands. In this example a Menyanthes-file with one observation-series is imported, and simulated. There are several stresses in the Menyanthes-file, among which are three groundwater extractions with a significant influence on groundwater head.", "# First perform the necessary imports\nimport matplotlib.pyplot as plt\nimport pastas as ps\n\n%matplotlib notebook", "1. Importing the Menyanthes-file\nImport the Menyanthes-file with observations and stresses. Then plot the observations, together with the diferent stresses in the Menyanthes file.", "# how to use it?\nfname = '../data/MenyanthesTest.men'\nmeny = ps.read.MenyData(fname)\n\n# plot some series\nf1, axarr = plt.subplots(len(meny.IN)+1, sharex=True)\noseries = meny.H['Obsevation well'][\"values\"]\noseries.plot(ax=axarr[0])\naxarr[0].set_title(meny.H['Obsevation well'][\"Name\"])\nfor i, val in enumerate(meny.IN.items()):\n name, data = val\n data[\"values\"].plot(ax=axarr[i+1])\n axarr[i+1].set_title(name)\nplt.tight_layout(pad=0)\nplt.show()", "2. Run a model\nMake a model with precipitation, evaporation and three groundwater extractions.", "# Create the time series model\nml = ps.Model(oseries)\n\n# Add precipitation\nIN = meny.IN['Precipitation']['values']\nIN.index = IN.index.round(\"D\")\nIN2 = meny.IN['Evaporation']['values']\nIN2.index = IN2.index.round(\"D\")\nts = ps.StressModel2([IN, IN2], ps.Gamma, 'Recharge')\nml.add_stressmodel(ts)\n\n# Add well extraction 1\n# IN = meny.IN['Extraction 1']\n# # extraction amount counts for the previous month\n# ts = ps.StressModel(IN['values'], ps.Hantush, 'Extraction_1', up=False,\n# settings=\"well\")\n# ml.add_stressmodel(ts)\n\n# Add well extraction 2\nIN = meny.IN['Extraction 2']\n# extraction amount counts for the previous month\nts = ps.StressModel(IN['values'], ps.Hantush, 'Extraction_2', up=False,\n settings=\"well\")\nml.add_stressmodel(ts)\n\n# Add well extraction 3\nIN = meny.IN['Extraction 3']\n# extraction amount counts for the previous month\nts = ps.StressModel(IN['values'], ps.Hantush, 'Extraction_3', up=False,\n settings=\"well\")\nml.add_stressmodel(ts)\n\n# Solve the model (can take around 20 seconds..)\nml.solve()", "3. Plot the decomposition\nShow the decomposition of the groundwater head, by plotting the influence on groundwater head of each of the stresses.", "ax = ml.plots.decomposition(ytick_base=1.)\nax[0].set_title('Observations vs simulation')\nax[0].legend()\nax[0].figure.tight_layout(pad=0)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
JasonMDev/guidedprojects
jupyter-files/GP02.ipynb
mit
[ "GP02: Explore U.S. Births\nThe raw data behind the story Some People Are Too Superstitious To Have A Baby On Friday The 13th, which you can read here. \nWe'll be working with the data set from the Centers for Disease Control and Prevention's National National Center for Health Statistics. \nThe data set has the following structure:\n* year - Year\n* month - Month\n* date_of_month - Day number of the month\n* day_of_week - Day of week, where 1 is Monday and 7 is Sunday\n* births - Number of births\n1: Introduction To The Dataset\nLets explore the data and see how it looks.", "csv_list = open(\"../data/GP02/US_births_1994-2003_CDC_NCHS.csv\").read().split(\"\\n\")\n\ncsv_list[0:10]", "2: Converting Data Into A List Of Lists\nThe lists needs to be converted to a more structured format to be able to analyze it.", "def read_csv(filename):\n string_data = open(filename).read()\n string_list = string_data.split(\"\\n\")[1:]\n final_list = []\n \n for row in string_list:\n string_fields = row.split(\",\")\n int_fields = []\n for value in string_fields:\n int_fields.append(int(value))\n final_list.append(int_fields)\n return final_list\n \ncdc_list = read_csv(\"../data/GP02/US_births_1994-2003_CDC_NCHS.csv\")\n\ncdc_list[0:10]", "3: Calculating Number Of Births Each Month\nNow that the data is in a more usable format, we can start to analyze it.", "def month_births(data):\n births_per_month = {}\n \n for row in data:\n month = row[1]\n births = row[4]\n if month in births_per_month:\n births_per_month[month] = births_per_month[month] + births\n else:\n births_per_month[month] = births\n return births_per_month\n \ncdc_month_births = month_births(cdc_list)\n\ncdc_month_births", "4: Calculating Number Of Births Each Day Of Week\nLet's now create a function that calculates the total number of births for each unique day of the week.", "def dow_births(data):\n births_per_dow = {}\n \n for row in data:\n dow = row[3]\n births = row[4]\n if dow in births_per_dow:\n births_per_dow[dow] = births_per_dow[dow] + births\n else:\n births_per_dow[dow] = births\n return births_per_dow\n \ncdc_dow_births = dow_births(cdc_list)\n\ncdc_dow_births", "5: Creating A More General Function\nIt's better to create a single function that works for any column and specify the column we want as a parameter each time we call the function.", "def calc_counts(data, column):\n sums_dict = {}\n \n for row in data:\n col_value = row[column]\n births = row[4]\n if col_value in sums_dict:\n sums_dict[col_value] = sums_dict[col_value] + births\n else:\n sums_dict[col_value] = births\n return sums_dict\n\ncdc_year_births = calc_counts(cdc_list, 0)\ncdc_month_births = calc_counts(cdc_list, 1)\ncdc_dom_births = calc_counts(cdc_list, 2)\ncdc_dow_births = calc_counts(cdc_list, 3)\n\ncdc_year_births\n\ncdc_month_births\n\ncdc_dom_births\n\ncdc_dow_births" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
keras-team/keras-io
examples/vision/ipynb/token_learner.ipynb
apache-2.0
[ "Learning to tokenize in Vision Transformers\nAuthors: Aritra Roy Gosthipaty, Sayak Paul (equal contribution)<br>\nDate created: 2021/12/10<br>\nLast modified: 2021/12/15<br>\nDescription: Adaptively generating a smaller number of tokens for Vision Transformers.\nIntroduction\nVision Transformers (Dosovitskiy et al.) and many\nother Transformer-based architectures (Liu et al.,\nYuan et al., etc.) have shown strong results in\nimage recognition. The following provides a brief overview of the components involved in the\nVision Transformer architecture for image classification:\n\nExtract small patches from input images.\nLinearly project those patches.\nAdd positional embeddings to these linear projections.\nRun these projections through a series of Transformer (Vaswani et al.)\nblocks.\nFinally, take the representation from the final Transformer block and add a\nclassification head.\n\nIf we take 224x224 images and extract 16x16 patches, we get a total of 196 patches (also\ncalled tokens) for each image. The number of patches increases as we increase the\nresolution, leading to higher memory footprint. Could we use a reduced\nnumber of patches without having to compromise performance?\nRyoo et al. investigate this question in\nTokenLearner: Adaptive Space-Time Tokenization for Videos.\nThey introduce a novel module called TokenLearner that can help reduce the number\nof patches used by a Vision Transformer (ViT) in an adaptive manner. With TokenLearner\nincorporated in the standard ViT architecture, they are able to reduce the amount of\ncompute (measured in FLOPS) used by the model.\nIn this example, we implement the TokenLearner module and demonstrate its\nperformance with a mini ViT and the CIFAR-10 dataset. We make use of the following\nreferences:\n\nOfficial TokenLearner code\nImage Classification with ViTs on keras.io\nTokenLearner slides from NeurIPS 2021\n\nSetup\nWe need to install TensorFlow Addons to run this example. To install it, execute the\nfollowing:\nshell\npip install tensorflow-addons\nImports", "import tensorflow as tf\n\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nimport tensorflow_addons as tfa\n\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport math", "Hyperparameters\nPlease feel free to change the hyperparameters and check your results. The best way to\ndevelop intuition about the architecture is to experiment with it.", "# DATA\nBATCH_SIZE = 256\nAUTO = tf.data.AUTOTUNE\nINPUT_SHAPE = (32, 32, 3)\nNUM_CLASSES = 10\n\n# OPTIMIZER\nLEARNING_RATE = 1e-3\nWEIGHT_DECAY = 1e-4\n\n# TRAINING\nEPOCHS = 20\n\n# AUGMENTATION\nIMAGE_SIZE = 48 # We will resize input images to this size.\nPATCH_SIZE = 6 # Size of the patches to be extracted from the input images.\nNUM_PATCHES = (IMAGE_SIZE // PATCH_SIZE) ** 2\n\n# ViT ARCHITECTURE\nLAYER_NORM_EPS = 1e-6\nPROJECTION_DIM = 128\nNUM_HEADS = 4\nNUM_LAYERS = 4\nMLP_UNITS = [\n PROJECTION_DIM * 2,\n PROJECTION_DIM,\n]\n\n# TOKENLEARNER\nNUM_TOKENS = 4", "Load and prepare the CIFAR-10 dataset", "# Load the CIFAR-10 dataset.\n(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()\n(x_train, y_train), (x_val, y_val) = (\n (x_train[:40000], y_train[:40000]),\n (x_train[40000:], y_train[40000:]),\n)\nprint(f\"Training samples: {len(x_train)}\")\nprint(f\"Validation samples: {len(x_val)}\")\nprint(f\"Testing samples: {len(x_test)}\")\n\n# Convert to tf.data.Dataset objects.\ntrain_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))\ntrain_ds = train_ds.shuffle(BATCH_SIZE * 100).batch(BATCH_SIZE).prefetch(AUTO)\n\nval_ds = tf.data.Dataset.from_tensor_slices((x_val, y_val))\nval_ds = val_ds.batch(BATCH_SIZE).prefetch(AUTO)\n\ntest_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))\ntest_ds = test_ds.batch(BATCH_SIZE).prefetch(AUTO)", "Data augmentation\nThe augmentation pipeline consists of:\n\nRescaling\nResizing\nRandom cropping (fixed-sized or random sized)\nRandom horizontal flipping", "data_augmentation = keras.Sequential(\n [\n layers.Rescaling(1 / 255.0),\n layers.Resizing(INPUT_SHAPE[0] + 20, INPUT_SHAPE[0] + 20),\n layers.RandomCrop(IMAGE_SIZE, IMAGE_SIZE),\n layers.RandomFlip(\"horizontal\"),\n ],\n name=\"data_augmentation\",\n)", "Note that image data augmentation layers do not apply data transformations at inference time.\nThis means that when these layers are called with training=False they behave differently. Refer\nto the documentation for more\ndetails.\nPositional embedding module\nA Transformer architecture consists of multi-head\nself attention layers and fully-connected feed forward networks (MLP) as the main\ncomponents. Both these components are permutation invariant: they're not aware of\nfeature order.\nTo overcome this problem we inject tokens with positional information. The\nposition_embedding function adds this positional information to the linearly projected\ntokens.", "\ndef position_embedding(\n projected_patches, num_patches=NUM_PATCHES, projection_dim=PROJECTION_DIM\n):\n # Build the positions.\n positions = tf.range(start=0, limit=num_patches, delta=1)\n\n # Encode the positions with an Embedding layer.\n encoded_positions = layers.Embedding(\n input_dim=num_patches, output_dim=projection_dim\n )(positions)\n\n # Add encoded positions to the projected patches.\n return projected_patches + encoded_positions\n", "MLP block for Transformer\nThis serves as the Fully Connected Feed Forward block for our Transformer.", "\ndef mlp(x, dropout_rate, hidden_units):\n # Iterate over the hidden units and\n # add Dense => Dropout.\n for units in hidden_units:\n x = layers.Dense(units, activation=tf.nn.gelu)(x)\n x = layers.Dropout(dropout_rate)(x)\n return x\n", "TokenLearner module\nThe following figure presents a pictorial overview of the module\n(source).\n\nThe TokenLearner module takes as input an image-shaped tensor. It then passes it through\nmultiple single-channel convolutional layers extracting different spatial attention maps\nfocusing on different parts of the input. These attention maps are then element-wise\nmultiplied to the input and result is aggregated with pooling. This pooled output can be\ntrated as a summary of the input and has much lesser number of patches (8, for example)\nthan the original one (196, for example).\nUsing multiple convolution layers helps with expressivity. Imposing a form of spatial\nattention helps retain relevant information from the inputs. Both of these components are\ncrucial to make TokenLearner work, especially when we are significantly reducing the number of patches.", "\ndef token_learner(inputs, number_of_tokens=NUM_TOKENS):\n # Layer normalize the inputs.\n x = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(inputs) # (B, H, W, C)\n\n # Applying Conv2D => Reshape => Permute\n # The reshape and permute is done to help with the next steps of\n # multiplication and Global Average Pooling.\n attention_maps = keras.Sequential(\n [\n # 3 layers of conv with gelu activation as suggested\n # in the paper.\n layers.Conv2D(\n filters=number_of_tokens,\n kernel_size=(3, 3),\n activation=tf.nn.gelu,\n padding=\"same\",\n use_bias=False,\n ),\n layers.Conv2D(\n filters=number_of_tokens,\n kernel_size=(3, 3),\n activation=tf.nn.gelu,\n padding=\"same\",\n use_bias=False,\n ),\n layers.Conv2D(\n filters=number_of_tokens,\n kernel_size=(3, 3),\n activation=tf.nn.gelu,\n padding=\"same\",\n use_bias=False,\n ),\n # This conv layer will generate the attention maps\n layers.Conv2D(\n filters=number_of_tokens,\n kernel_size=(3, 3),\n activation=\"sigmoid\", # Note sigmoid for [0, 1] output\n padding=\"same\",\n use_bias=False,\n ),\n # Reshape and Permute\n layers.Reshape((-1, number_of_tokens)), # (B, H*W, num_of_tokens)\n layers.Permute((2, 1)),\n ]\n )(\n x\n ) # (B, num_of_tokens, H*W)\n\n # Reshape the input to align it with the output of the conv block.\n num_filters = inputs.shape[-1]\n inputs = layers.Reshape((1, -1, num_filters))(inputs) # inputs == (B, 1, H*W, C)\n\n # Element-Wise multiplication of the attention maps and the inputs\n attended_inputs = (\n attention_maps[..., tf.newaxis] * inputs\n ) # (B, num_tokens, H*W, C)\n\n # Global average pooling the element wise multiplication result.\n outputs = tf.reduce_mean(attended_inputs, axis=2) # (B, num_tokens, C)\n return outputs\n", "Transformer block", "\ndef transformer(encoded_patches):\n # Layer normalization 1.\n x1 = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(encoded_patches)\n\n # Multi Head Self Attention layer 1.\n attention_output = layers.MultiHeadAttention(\n num_heads=NUM_HEADS, key_dim=PROJECTION_DIM, dropout=0.1\n )(x1, x1)\n\n # Skip connection 1.\n x2 = layers.Add()([attention_output, encoded_patches])\n\n # Layer normalization 2.\n x3 = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x2)\n\n # MLP layer 1.\n x4 = mlp(x3, hidden_units=MLP_UNITS, dropout_rate=0.1)\n\n # Skip connection 2.\n encoded_patches = layers.Add()([x4, x2])\n return encoded_patches\n", "ViT model with the TokenLearner module", "\ndef create_vit_classifier(use_token_learner=True, token_learner_units=NUM_TOKENS):\n inputs = layers.Input(shape=INPUT_SHAPE) # (B, H, W, C)\n\n # Augment data.\n augmented = data_augmentation(inputs)\n\n # Create patches and project the pathces.\n projected_patches = layers.Conv2D(\n filters=PROJECTION_DIM,\n kernel_size=(PATCH_SIZE, PATCH_SIZE),\n strides=(PATCH_SIZE, PATCH_SIZE),\n padding=\"VALID\",\n )(augmented)\n _, h, w, c = projected_patches.shape\n projected_patches = layers.Reshape((h * w, c))(\n projected_patches\n ) # (B, number_patches, projection_dim)\n\n # Add positional embeddings to the projected patches.\n encoded_patches = position_embedding(\n projected_patches\n ) # (B, number_patches, projection_dim)\n encoded_patches = layers.Dropout(0.1)(encoded_patches)\n\n # Iterate over the number of layers and stack up blocks of\n # Transformer.\n for i in range(NUM_LAYERS):\n # Add a Transformer block.\n encoded_patches = transformer(encoded_patches)\n\n # Add TokenLearner layer in the middle of the\n # architecture. The paper suggests that anywhere\n # between 1/2 or 3/4 will work well.\n if use_token_learner and i == NUM_LAYERS // 2:\n _, hh, c = encoded_patches.shape\n h = int(math.sqrt(hh))\n encoded_patches = layers.Reshape((h, h, c))(\n encoded_patches\n ) # (B, h, h, projection_dim)\n encoded_patches = token_learner(\n encoded_patches, token_learner_units\n ) # (B, num_tokens, c)\n\n # Layer normalization and Global average pooling.\n representation = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(encoded_patches)\n representation = layers.GlobalAvgPool1D()(representation)\n\n # Classify outputs.\n outputs = layers.Dense(NUM_CLASSES, activation=\"softmax\")(representation)\n\n # Create the Keras model.\n model = keras.Model(inputs=inputs, outputs=outputs)\n return model\n", "As shown in the TokenLearner paper, it is\nalmost always advantageous to include the TokenLearner module in the middle of the\nnetwork.\nTraining utility", "\ndef run_experiment(model):\n # Initialize the AdamW optimizer.\n optimizer = tfa.optimizers.AdamW(\n learning_rate=LEARNING_RATE, weight_decay=WEIGHT_DECAY\n )\n\n # Compile the model with the optimizer, loss function\n # and the metrics.\n model.compile(\n optimizer=optimizer,\n loss=\"sparse_categorical_crossentropy\",\n metrics=[\n keras.metrics.SparseCategoricalAccuracy(name=\"accuracy\"),\n keras.metrics.SparseTopKCategoricalAccuracy(5, name=\"top-5-accuracy\"),\n ],\n )\n\n # Define callbacks\n checkpoint_filepath = \"/tmp/checkpoint\"\n checkpoint_callback = keras.callbacks.ModelCheckpoint(\n checkpoint_filepath,\n monitor=\"val_accuracy\",\n save_best_only=True,\n save_weights_only=True,\n )\n\n # Train the model.\n _ = model.fit(\n train_ds,\n epochs=EPOCHS,\n validation_data=val_ds,\n callbacks=[checkpoint_callback],\n )\n\n model.load_weights(checkpoint_filepath)\n _, accuracy, top_5_accuracy = model.evaluate(test_ds)\n print(f\"Test accuracy: {round(accuracy * 100, 2)}%\")\n print(f\"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%\")\n", "Train and evaluate a ViT with TokenLearner", "vit_token_learner = create_vit_classifier()\nrun_experiment(vit_token_learner)", "Results\nWe experimented with and without the TokenLearner inside the mini ViT we implemented\n(with the same hyperparameters presented in this example). Here are our results:\n| TokenLearner | # tokens in<br> TokenLearner | Top-1 Acc<br>(Averaged across 5 runs) | GFLOPs | TensorBoard |\n|:---:|:---:|:---:|:---:|:---:|\n| N | - | 56.112% | 0.0184 | Link |\n| Y | 8 | 56.55% | 0.0153 | Link |\n| N | - | 56.37% | 0.0184 | Link |\n| Y | 4 | 56.4980% | 0.0147 | Link |\n| N | - (# Transformer layers: 8) | 55.36% | 0.0359 | Link |\nTokenLearner is able to consistently outperform our mini ViT without the module. It is\nalso interesting to notice that it was also able to outperform a deeper version of our\nmini ViT (with 8 layers). The authors also report similar observations in the paper and\nthey attribute this to the adaptiveness of TokenLearner.\nOne should also note that the FLOPs count decreases considerably with the addition of\nthe TokenLearner module. With less FLOPs count the TokenLearner module is able to\ndeliver better results. This aligns very well with the authors' findings.\nAdditionally, the authors introduced\na newer version of the TokenLearner for smaller training data regimes. Quoting the authors:\n\nInstead of using 4 conv. layers with small channels to implement spatial attention,\n this version uses 2 grouped conv. layers with more channels. It also uses softmax\n instead of sigmoid. We confirmed that this version works better when having limited\n training data, such as training with ImageNet1K from scratch.\n\nWe experimented with this module and in the following table we summarize the results:\n| # Groups | # Tokens | Top-1 Acc | GFLOPs | TensorBoard |\n|:---:|:---:|:---:|:---:|:---:|\n| 4 | 4 | 54.638% | 0.0149 | Link |\n| 8 | 8 | 54.898% | 0.0146 | Link |\n| 4 | 8 | 55.196% | 0.0149 | Link |\nPlease note that we used the same hyperparameters presented in this example. Our\nimplementation is available\nin this notebook.\nWe acknowledge that the results with this new TokenLearner module are slightly off\nthan expected and this might mitigate with hyperparameter tuning.\nNote: To compute the FLOPs of our models we used\nthis utility\nfrom this repository.\nNumber of parameters\nYou may have noticed that adding the TokenLearner module increases the number of\nparameters of the base network. But that does not mean it is less efficient as shown by\nDehghani et al.. Similar findings were reported\nby Bello et al. as well. The TokenLearner module\nhelps reducing the FLOPS in the overall network thereby helping to reduce the memory\nfootprint.\nFinal notes\n\nTokenFuser: The authors of the paper also propose another module named TokenFuser. This\nmodule helps in remapping the representation of the TokenLearner output back to its\noriginal spatial resolution. To reuse the TokenLearner in the ViT architecture, the\nTokenFuser is a must. We first learn the tokens from the TokenLearner, build a\nrepresentation of the tokens from a Transformer layer and then remap the representation\ninto the original spatial resolution, so that it can again be consumed by a TokenLearner.\nNote here that you can only use the TokenLearner module once in entire ViT model if not\npaired with the TokenFuser.\nUse of these modules for video: The authors also suggest that TokenFuser goes really\nwell with Vision Transformers for Videos (Arnab et al.).\n\nWe are grateful to JarvisLabs and\nGoogle Developers Experts\nprogram for helping with GPU credits. Also, we are thankful to Michael Ryoo (first\nauthor of TokenLearner) for fruitful discussions." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
robertoalotufo/ia898
src/applylut.ipynb
mit
[ "Function applylut\nSynopse\nIntensity image transform.\n\n\ng = applylut(fi, it)\n\n\ng: Image. \n\n\nfi: Image. input image, gray scale or index image.\n\nit: Image. Intensity transform. Table of one or three columns.", "import numpy as np\n\ndef applylut(fi, it):\n g = it[fi]\n if len(g.shape) == 3:\n g = np.swapaxes(g, 0,2)\n g = np.swapaxes(g, 1,2)\n return g", "Description\nApply an intensity image transform to the input image. The input image can be seen as an gray scale \nimage or an index image. The intensity transform is represented by a table where the input (gray scale) \ncolor address the table line and its column contents indicates the output (gray scale) image color. \nThe table can have one or three columns. If it has three columns, the output image is a three color \nband image. This intensity image transformation is very powerful and can be use in many applications \ninvolving gray scale and color images. If the input image has an index (gray scale color) that is greater \nthan the size of the intensity table, an error is reported.\nExamples", "testing = (__name__ == \"__main__\")\n\nif testing:\n ! jupyter nbconvert --to python applylut.ipynb\n import numpy as np\n import sys,os\n import matplotlib.image as mpimg\n ia898path = os.path.abspath('../../')\n if ia898path not in sys.path:\n sys.path.append(ia898path)\n import ia898.src as ia\n", "Example 1\nThis first example shows a simple numeric 2 lines, 3 columns image with sequential pixel values. First the identity\ntable is applied and image g is generated with same values of f. Next, a new table, itn = 5 - it is generated creating\na negation table. The resultant image gn has the values of f negated.", "if testing: \n \n f = np.array([[0,1,2],\n [3,4,5]])\n print('f=\\n',f)\n it = np.array(list(range(6))) # identity transform\n print('it=',it)\n g = ia.applylut(f, it)\n print('g=\\n',g)\n itn = 5 - it # negation\n print('itn=',itn)\n gn = ia.applylut(f, itn)\n print('gn=\\n',gn)", "Example 2\nThis example shows the negation operation applying the intensity transform through a negation grayscale table: it = 255 - i.", "if testing:\n f = mpimg.imread('../data/cameraman.tif')\n it = (255 - np.arange(256)).astype('uint8')\n g = ia.applylut(f, it)\n ia.adshow(f,'f')\n ia.adshow(g,'g')", "Example 3\nIn this example, the colortable has 3 columns and the application of the colortable to an scalar image results in an image with 3 bands.", "if testing:\n f = np.array([[0,1,2], \n [2,0,1]])\n ct = np.array([[100,101,102],\n [110,111,112],\n [120,121,122]])\n #print iaimginfo(ct)\n g = ia.applylut(f,ct)\n print(g)", "Example 4\nIn this example, the colortable has 3 columns, R, G and B, where G and B are zeros and R is identity.", "if testing:\n f = mpimg.imread('../data/cameraman.tif')\n aux = np.resize(np.arange(256).astype('uint8'), (256,1))\n ct = np.concatenate((aux, np.zeros((256,2),'uint8')), 1)\n g = ia.applylut(f, ct) # generate (bands,H,W)\n g = g.transpose(1,2,0) # convert to (H,W,bands)\n ia.adshow(f)\n ia.adshow(g)", "Equation\n$$ g(r,c) = IT( f(r,c) ) $$\n$$\n g_{R}(r,c) = IT_R( f(r,c))\\\n g_{G}(r,c) = IT_G( f(r,c))\\\n g_{B}(r,c) = IT_B( f(r,c)) $$\nSee Also:\n\nia636:colormap Pseudocolor maps", "if testing:\n print('testing applylut')\n print(repr(ia.applylut(np.array([0,1,2,3]),np.array([0,1,2,3]))) == repr(np.array([0,1,2,3])))\n print(repr(ia.applylut(np.array([0,1,2,3]),np.array([[0,0,0],[1,1,1],[2,2,2],[3,3,3]]))) == repr(np.array([[0,0,0], [1,1,1], [2,2,2],[3,3,3]])))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
rocketproplab/Guides
Guides/python/excelToPandas.ipynb
mit
[ "Import Excel or CSV To Pandas\nThis file covers the process of importing excel and csv files into a pandas dataframe. Note: the methods for importing excel and csv files is almost identical. The major difference is in the method used. This notebook serves as a tutorial for both.\nImporting Excel (xlsx): <br>\nThe function used is read_excel. <br>\nImporting comma separated values (csv): <br>\nThe function used is read_csv. <br>\nStep 1\nLets start by importing pandas and os. We will be using pandas to create a dataframe from our data, and os to get file paths.", "import pandas as pd\nimport os", "Step 2\nNow lets create a variable, <code>filePath</code>, that is a string containing the full path to the file we want to import. The code below looks in the current working directory for the file given a file name input by the user. This isn't necessary, and is just included for convienence. Alternatively, user can input a full path into the <code>filePath</code> variable.", "dirPath = os.path.realpath('.')\nfileName = 'assets/coolingExample.xlsx'\nfilePath = os.path.join(dirPath, fileName)", "Step 3\nGreat! Now lets read the data into a dataframe called <code>df</code>.\nThis will allow our data to be accessible by the string in the header.", "df = pd.read_excel(filePath,header=0)\ndf.head()", "Our data is now accessible by a key value. The keys are the column headers in the dataframe. In this example case, those are 'Time (s) - Dev1/ai0' and 'Temperature - Dev1/ai0'. For example, lets access the data in the first column.", "df[df.columns[0]]", "What would happen if we tried to access the data with an invalid key, say <code>1</code> for example? Lets try it to find out.\nNote: I enclose this code in a <code>try: except:</code> statement in order to prevent a huge error from being generated.", "try:\n df[1]\nexcept KeyError:\n print(\"KeyError: 1 - not a valid key\")", "So lets say you have a large dataframe with unknown columns. There is a simple way to index them without having prior knowledge of what the dataframe columns are. Namely, the <code>columns</code> method in pandas.", "cols = df.columns\nfor col in cols:\n print(df[col])", "Data Manipulation (Plots)\nNow that we have the data easily accessible in python, lets look at how to plot it. <code>Pandas</code> allows you to use matplotlib to plot, however it is done using methods built into pandas.\nAlthough the methods to create an manipulate plots are built into <code>Pandas</code>, we will still have to import matplotlib to save and show the plots.", "import matplotlib.pyplot as plt", "In order to demonstrate the plotting capabilities of pandas arrays, lets use the example data that we imported earlier. The data frame contains only the two columns that were in the file; temperature and time. Because of this simplicity, we can trust pandas to properly interpret the first column as time and the second column as th measurement (temperature). Thus we can plot with the simple command.\n<code>df.plot()</code>", "plt.figure(1)\nax = df.plot()\nplt.show()", "While this simplification is nice, it is generally better to specify what data you want to plot. Particularly if you are automating the plotting of a large set of dataframes. To do this, specify the <code>x</code> and <code>y</code> arrays in your dataframe as you would in a standard <code>matplotlib</code> plot call, however since this plotting function is a method of the dataframe, you need only specify the column.\nI.e.", "plt.figure(2)\nax = df.plot(cols[0],cols[1])\nplt.show()", "Now that we have the basics down, lets spice up the plot a little bit.", "plt.figure(3)\nax = df.plot(cols[0],cols[1])\nax.set_title('This is a Title')\nax.set_ylabel('Temperature (deg F)')\nax.grid()\nplt.show()", "Data Manipulation (Timestamps)\nOne thing you probably noticed in these plots is that the time axis isn't all that useful. It would be better to change the timestamps to a more useful form like seconds since start. Lets go through the process of making that conversion.\nFirst, lets see what the timestamp currently looks like.", "df[cols[0]][0]", "Good news! Since python interpreted the date as a datetime object, we can use datetime object methods to determine the time in seconds. The one caveat is that we can only determine a time difference, not an absolute time. For more on this, read this stackoverflow question.\nThe first thing we have to do is convert these <code>datetime.time</code> objects into <code>datetime.datetime</code> objects using <code>datetime.combine</code>\nNote: importing datetime is a little weird.. <code>datetime</code> is both a module and a class.", "from datetime import datetime, date\n\nstartTime = df[cols[0]][0]\ntimeArray = []\nfor i in range(0,len(df[cols[0]])):\n timeArray.append((datetime.combine(date.today(), df[cols[0]][i]) - datetime.combine(date.today(), startTime)).total_seconds())", "Note: There is probably a better way of doing this (i.e. without a loop, but I'm tired and can't think of anything right now)", "plt.figure(4)\nplt.plot(timeArray, df[cols[1]], 'b')\nplt.title('This is a graph with a better time axis')\nplt.ylabel('Temperature (deg F)')\nplt.xlabel('Time (s)')\nplt.grid()\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
sdpython/ensae_teaching_cs
_doc/notebooks/td1a/td1a_correction_session4.ipynb
mit
[ "1A.2 - Modules, fichiers, expressions régulières (correction)\nCorrection.", "from jyquickhelper import add_notebook_menu\nadd_notebook_menu()\n\n%matplotlib inline", "Exercice 1 : Excel $\\rightarrow$ Python $\\rightarrow$ Excel\nIl faut télécharger le fichier seance4_excel.xlsx puis l'enregistrer au formet texte (séparateur : tabulation) (*.txt). On rappelle les étapes de l'exercice :\n\nenregistrer le fichier au format texte,\nle lire sous python\ncréer une matrice carrée 3x3 où chaque valeur est dans sa case (X,Y),\nenregistrer le résultat sous format texte,\nle récupérer sous Excel.", "with open (\"seance4_excel.txt\", \"r\") as f :\n mat = [ row.strip(' \\n').split('\\t') for row in f.readlines() ]\n \nmat = mat [1:]\nres = [ [ None ] * 3 for i in range(5) ] \nfor i,j,v in mat :\n res [ int(j)-1 ] [ int (i)-1 ] = float(v)\n\nwith open (\"seance4_excel_mat.txt\", \"w\") as f :\n f.write ( '\\n'.join ( [ '\\t'.join( [ str(x) for x in row ] ) for row in res ] ) ) ", "Il est très rare d'écrire ce genre de code. En règle générale, on se sert de modules déjà existant comme pandas, xlrd et openpyxl. Cela évite la conversion au format texte :", "import pandas\ndf = pandas.read_excel(\"seance4_excel.xlsx\", sheet_name=\"Feuil1\", engine='openpyxl')\nmat = df.pivot(\"X\", \"Y\", \"value\")\nmat.to_excel(\"seance4_excel_mat.xlsx\")\nmat", "C'est un peu plus rapide.\n<h3 id=\"exo2\">Exercice 2 : trouver un module (1)</h3>\n\nLe module random est celui qu'on cherche.", "import random\nalea = [ random.random() for i in range(10) ]\nprint (alea)\nrandom.shuffle(alea)\nprint (alea)", "Exercice 3 : trouver un module (2)\nLe module datetime permet de faire des opérations sur les dates.", "from datetime import datetime\ndate1 = datetime(2013,9,9)\ndate0 = datetime(2013,8,1)\nprint (date1 - date0)\nbirth = datetime (1975,8,11)\nprint (birth.weekday()) # lundi", "Exercice 4 : son propre module\nOn effectue le remplacement if __name__ == \"__main__\": par if True : :", "# fichier monmodule2.py\nimport math\n\ndef fonction_cos_sequence(seq) :\n return [ math.cos(x) for x in seq ]\n\nif __name__ == \"__main__\" :\n # et une petite astuce quand on travaille sous notebook\n code = \"\"\"\n # -*- coding: utf-8 -*-\n import math\n def fonction_cos_sequence(seq) :\n return [ math.cos(x) for x in seq ] \n if True :\n print (\"Ce message n'apparaît que si ce programme est le point d'entrée.\")\n \"\"\".replace(\" \",\"\")\n with open(\"monmodule3.py\", \"w\", encoding=\"utf8\") as f : f.write(code)\n\nimport monmodule3\nprint ( monmodule3.fonction_cos_sequence ( [ 1, 2, 3 ] ) )", "Le message ce message n'apparaît que ce programme est le point d'entrée apparaît maintenant alors qu'il n'apparaissait pas avec la version de l'énoncé. Comme il apparaît après *, cela montre que cette ligne est exécutée si le module est importé.", "import monmodule3", "Si on importe le module une seconde fois, le message n'apparaît plus : le langage Python a détecté que le module avait déjà été importé. Il ne le fait pas une seconde fois.\nExercice 5 : chercher un motif dans un texte\nL'expression régulière est je .{1,60}. Le symbol . signifie n'importe quel caractère. Suivi de {1,60} veut dire n'importe quel caractère répété entre 1 et 60 fois.", "import pyensae.datasource, re\ndiscours = pyensae.datasource.download_data('voeux.zip', website = 'xd')\n\nexp = re.compile (\"je .{1,60}\", re.IGNORECASE)\nfor fichier in discours :\n print(\"----\",fichier)\n try:\n with open(fichier,\"r\") as f : text = f.read()\n except:\n with open(fichier,\"r\", encoding=\"latin-1\") as f : text = f.read()\n je = exp.findall(text) \n for t in je :\n print (t)", "Exercice 6 : chercher un autre motif dans un texte\nPour les mots securite ou insecurite, on construit l'expression :", "import pyensae.datasource, re\ndiscours = pyensae.datasource.download_data('voeux.zip', website = 'xd')\n\nexp = re.compile (\"(.{1,15}(in)?sécurité.{1,50})\", re.IGNORECASE)\nfor fichier in discours :\n print(\"----\",fichier)\n try:\n with open(fichier,\"r\") as f : text = f.read()\n except:\n with open(fichier,\"r\", encoding=\"latin-1\") as f : text = f.read()\n je = exp.findall(text) \n for t in je :\n print (t)", "Exercice 7 : recherche les urls dans une page wikipédia\nOn pourra prendre comme exemple la page du programme Python. La première partie consiste à récupérer le contenu d'une page HTML.", "from urllib.request import urlopen\nurl = \"https://fr.wikipedia.org/wiki/Python_(langage)\"\nwith urlopen(url) as u:\n content = u.read()\ncontent[:300]", "Les données récupérées sont au format binaire d'où le préfixe b''. Pour éviter de télécharger les données à chaque fois, on sauve le contenu sur disque pour le récupérer la prochaine fois.", "with open('page.html', 'wb') as f:\n f.write(content)", "Et on le recharge.", "with open('page.html', 'rb') as f:\n page = f.read()\npage[:300]", "Les données sont sous forme d'octets, il faut d'abord les convertir sous forme de caractères. il y a plus de caractères que d'octets disponibles (256), c'est cela qu'il faut une sorte de code pour passer de l'un à l'autre : dans le cas d'internet, le plus utilisé est l'encoding utf-8.", "page_str = page.decode('utf-8')\npage_str[:300]", "On recherche maintenant les urls commençant par http...", "import re\nreg = re.compile(\"href=\\\\\\\"(http.*?)\\\\\\\"\")\nurls = reg.findall(page_str)\nurls[:10]", "Exercice 8 : construire un texte à motif\nA l'inverse des expressions régulières, des modules comme Mako ou Jinja2 permettent de construire simplement des documents qui suivent des règles. Ces outils sont très utilisés pour la construction de page web. On appelle cela faire du templating. Créer une page web qui affiche à l'aide d'un des modules la liste des dimanches de cette année.", "patron = \"\"\"\n<ul>{% for i, url in enumerate(urls) %}\n <li><a href=\"{{ url }}\">url {{ i }}</a></li>{% endfor %}\n</ul>\n\"\"\"\n\nfrom jinja2 import Template\ntpl = Template(patron)\n\nprint(tpl.render(urls=urls[:10], enumerate=enumerate))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
jhseu/tensorflow
tensorflow/lite/micro/examples/magic_wand/train/train_magic_wand_model.ipynb
apache-2.0
[ "Train a gesture recognition model for microcontroller use\nThis notebook demonstrates how to train a 20kb gesture recognition model for TensorFlow Lite for Microcontrollers. It will produce the same model used in the magic_wand example application.\nThe model is designed to be used with Google Colaboratory.\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/micro/examples/magic_wand/train/train_magic_wand_model.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/micro/examples/magic_wand/train/train_magic_wand_model.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>\n\nTraining is much faster using GPU acceleration. Before you proceed, ensure you are using a GPU runtime by going to Runtime -> Change runtime type and selecting GPU. Training will take around 5 minutes on a GPU runtime.\nConfigure dependencies\nRun the following cell to ensure the correct version of TensorFlow is used.", "%tensorflow_version 2.x\n", "We'll also clone the TensorFlow repository, which contains the training scripts, and copy them into our workspace.", "# Clone the repository from GitHub\n!git clone --depth 1 -q https://github.com/tensorflow/tensorflow\n# Copy the training scripts into our workspace\n!cp -r tensorflow/tensorflow/lite/micro/examples/magic_wand/train train", "Prepare the data\nNext, we'll download the data and extract it into the expected location within the training scripts' directory.", "# Download the data we will use to train the model\n!wget http://download.tensorflow.org/models/tflite/magic_wand/data.tar.gz\n# Extract the data into the train directory\n!tar xvzf data.tar.gz -C train 1>/dev/null", "We'll then run the scripts that split the data into training, validation, and test sets.", "# The scripts must be run from within the train directory\n%cd train\n# Prepare the data\n!python data_prepare.py\n# Split the data by person\n!python data_split_person.py", "Load TensorBoard\nNow, we set up TensorBoard so that we can graph our accuracy and loss as training proceeds.", "# Load TensorBoard\n%load_ext tensorboard\n%tensorboard --logdir logs/scalars", "Begin training\nThe following cell will begin the training process. Training will take around 5 minutes on a GPU runtime. You'll see the metrics in TensorBoard after a few epochs.", "!python train.py --model CNN --person true", "Create a C source file\nThe train.py script writes a model, model.tflite, to the training scripts' directory.\nIn the following cell, we convert this model into a C++ source file we can use with TensorFlow Lite for Microcontrollers.", "# Install xxd if it is not available\n!apt-get -qq install xxd\n# Save the file as a C source file\n!xxd -i model.tflite > /content/model.cc\n# Print the source file\n!cat /content/model.cc" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
EvenStrangest/tensorflow
tensorflow/examples/udacity/4_convolutions.ipynb
apache-2.0
[ "Deep Learning\nAssignment 4\nPreviously in 2_fullyconnected.ipynb and 3_regularization.ipynb, we trained fully connected networks to classify notMNIST characters.\nThe goal of this assignment is make the neural network convolutional.", "# These are all the modules we'll be using later. Make sure you can import them\n# before proceeding further.\nfrom __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\nfrom six.moves import cPickle as pickle\nfrom six.moves import range\n\npickle_file = 'notMNIST.pickle'\n\nwith open(pickle_file, 'rb') as f:\n save = pickle.load(f)\n train_dataset = save['train_dataset']\n train_labels = save['train_labels']\n valid_dataset = save['valid_dataset']\n valid_labels = save['valid_labels']\n test_dataset = save['test_dataset']\n test_labels = save['test_labels']\n del save # hint to help gc free up memory\n print('Training set', train_dataset.shape, train_labels.shape)\n print('Validation set', valid_dataset.shape, valid_labels.shape)\n print('Test set', test_dataset.shape, test_labels.shape)", "Reformat into a TensorFlow-friendly shape:\n- convolutions need the image data formatted as a cube (width by height by #channels)\n- labels as float 1-hot encodings.", "image_size = 28\nnum_labels = 10\nnum_channels = 1 # grayscale\n\nimport numpy as np\n\ndef reformat(dataset, labels):\n dataset = dataset.reshape(\n (-1, image_size, image_size, num_channels)).astype(np.float32)\n labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)\n return dataset, labels\ntrain_dataset, train_labels = reformat(train_dataset, train_labels)\nvalid_dataset, valid_labels = reformat(valid_dataset, valid_labels)\ntest_dataset, test_labels = reformat(test_dataset, test_labels)\nprint('Training set', train_dataset.shape, train_labels.shape)\nprint('Validation set', valid_dataset.shape, valid_labels.shape)\nprint('Test set', test_dataset.shape, test_labels.shape)\n\ndef accuracy(predictions, labels):\n return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))\n / predictions.shape[0])", "Let's build a small network with two convolutional layers, followed by one fully connected layer. Convolutional networks are more expensive computationally, so we'll limit its depth and number of fully connected nodes.", "batch_size = 16\npatch_size = 5\ndepth = 16\nnum_hidden = 64\n\ngraph = tf.Graph()\n\nwith graph.as_default():\n\n # Input data.\n tf_train_dataset = tf.placeholder(\n tf.float32, shape=(batch_size, image_size, image_size, num_channels))\n tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n tf_valid_dataset = tf.constant(valid_dataset)\n tf_test_dataset = tf.constant(test_dataset)\n \n # Variables.\n layer1_weights = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, num_channels, depth], stddev=0.1))\n layer1_biases = tf.Variable(tf.zeros([depth]))\n layer2_weights = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, depth, depth], stddev=0.1))\n layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth]))\n layer3_weights = tf.Variable(tf.truncated_normal(\n [image_size // 4 * image_size // 4 * depth, num_hidden], stddev=0.1))\n layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))\n layer4_weights = tf.Variable(tf.truncated_normal(\n [num_hidden, num_labels], stddev=0.1))\n layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))\n \n # Model.\n def model(data):\n conv = tf.nn.conv2d(data, layer1_weights, strides=[1, 2, 2, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer1_biases)\n conv = tf.nn.conv2d(hidden, layer2_weights, strides=[1, 2, 2, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer2_biases)\n shape = hidden.get_shape().as_list()\n reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])\n hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)\n return tf.matmul(hidden, layer4_weights) + layer4_biases\n \n # Training computation.\n logits = model(tf_train_dataset)\n loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))\n \n # Optimizer.\n optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)\n \n # Predictions for the training, validation, and test data.\n train_prediction = tf.nn.softmax(logits)\n valid_prediction = tf.nn.softmax(model(tf_valid_dataset))\n test_prediction = tf.nn.softmax(model(tf_test_dataset))\n\nnum_steps = 1001\n\nwith tf.Session(graph=graph) as session:\n tf.initialize_all_variables().run()\n print('Initialized')\n for step in range(num_steps):\n offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n batch_data = train_dataset[offset:(offset + batch_size), :, :, :]\n batch_labels = train_labels[offset:(offset + batch_size), :]\n feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n _, l, predictions = session.run(\n [optimizer, loss, train_prediction], feed_dict=feed_dict)\n if (step % 50 == 0):\n print('Minibatch loss at step %d: %f' % (step, l))\n print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))\n print('Validation accuracy: %.1f%%' % accuracy(\n valid_prediction.eval(), valid_labels))\n print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))", "Problem 1\nThe convolutional model above uses convolutions with stride 2 to reduce the dimensionality. Replace the strides by a max pooling operation (nn.max_pool()) of stride 2 and kernel size 2.", "batch_size = 16\npatch_size = 5\ndepth = 12 # was 16\nnum_hidden = 64\n\ngraph = tf.Graph()\n\nwith graph.as_default():\n\n # Input data.\n tf_train_dataset = tf.placeholder(\n tf.float32, shape=(batch_size, image_size, image_size, num_channels))\n tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n tf_valid_dataset = tf.constant(valid_dataset)\n tf_test_dataset = tf.constant(test_dataset)\n \n # Variables.\n layer1_weights = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, num_channels, depth], stddev=0.1))\n layer1_biases = tf.Variable(tf.zeros([depth]))\n layer2_weights = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, depth, depth], stddev=0.1))\n layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth]))\n layer3_weights = tf.Variable(tf.truncated_normal(\n [image_size // 4 * image_size // 4 * depth, num_hidden], stddev=0.1))\n layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))\n layer4_weights = tf.Variable(tf.truncated_normal(\n [num_hidden, num_labels], stddev=0.1))\n layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))\n \n # Model.\n def model(data):\n conv = tf.nn.conv2d(data, layer1_weights, strides=[1, 1, 1, 1], padding='SAME')\n pooled_logits = tf.nn.max_pool(conv + layer1_biases, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n hidden = tf.nn.relu(pooled_logits)\n conv = tf.nn.conv2d(hidden, layer2_weights, strides=[1, 1, 1, 1], padding='SAME')\n pooled_logits = tf.nn.max_pool(conv + layer2_biases, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n hidden = tf.nn.relu(pooled_logits)\n shape = hidden.get_shape().as_list()\n reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])\n hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)\n return tf.matmul(hidden, layer4_weights) + layer4_biases\n \n # Training computation.\n logits = model(tf_train_dataset)\n loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))\n \n # Optimizer.\n optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)\n \n # Predictions for the training, validation, and test data.\n train_prediction = tf.nn.softmax(logits)\n valid_prediction = tf.nn.softmax(model(tf_valid_dataset))\n test_prediction = tf.nn.softmax(model(tf_test_dataset))\n\nnum_steps = 1001\n\nwith tf.Session(graph=graph) as session:\n tf.initialize_all_variables().run()\n print('Initialized')\n for step in range(num_steps):\n offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n batch_data = train_dataset[offset:(offset + batch_size), :, :, :]\n batch_labels = train_labels[offset:(offset + batch_size), :]\n feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n _, l, predictions = session.run(\n [optimizer, loss, train_prediction], feed_dict=feed_dict)\n if (step % 50 == 0):\n print('Minibatch loss at step %d: %f' % (step, l))\n print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))\n print('Validation accuracy: %.1f%%' % accuracy(\n valid_prediction.eval(), valid_labels))\n print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))", "Problem 2\nTry to get the best performance you can using a convolutional net. Look for example at the classic LeNet5 architecture, adding Dropout, and/or adding learning rate decay." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
MehtapIsik/assaytools
examples/direct-fluorescence-assay/1 Simulating Experimental Fluorescence Binding Data.ipynb
lgpl-2.1
[ "In this notebook we will explore plotting complex concentration as a function of Kd.\nWe will simulate expected fluorescence results for a ligand protein with known Kd.", "import matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom IPython.display import display, Math, Latex #Do we even need this anymore?\n%pylab inline", "The Simple Model\n$$L + P \\underset{k_-1}{\\stackrel{k_1}{\\rightleftharpoons}} PL$$\nThis is a simple model of our system.\nWe are assuming complex concentration [PL] is proportional to complex fluorescence (in this particular assay).\nWe estimate/know the total Ligand $[L]{tot} = [L] + [PL]$ and Protein $[P]{tot} = [P] + [PL]$ concentration from the experimental setup, and presume we can measure the complex concentration in some way $[PL]$.\n$$K_{d} = \\frac{[L][P]}{[PL]}$$\nFrom this relation can calculate $K_d$ from these three values.\nLet's take a hypothetical case where $K_d$ = 2 nM. (2e-9 M)\nWhat binding curve would we expect?", "Kd = 2e-9 # M", "The protein concentration for our assay will be 1 nM (half of the Kd).", "Ptot = 1e-9 # M", "The ligand concentration will be a 12-point half-log dilution from 20 uM ligand (down to ~60 pM).", "Ltot = 20.0e-6 / np.array([10**(float(i)/2.0) for i in range(12)]) # M", "To calculate $[PL]$ as a function of $[P]{tot}$, $[L]{tot}$, and $K_d$, we start with\n$$[PL] = \\frac{[L][P]}{K_{d} }$$\nThen we need to put L and P in terms of $[L]{tot}$ and $[P]{tot}$, using\n$$[L] = [L]_{tot}-[PL]$$\n$$[P] = [P]_{tot}-[PL]$$\nThis gives us:\n$$[PL] = \\frac{([L]{tot}-[PL])([P]{tot}-[PL])}{K_{d} }$$\nRearranging to form a quadratic equations, we get:\n$$0 = [PL]^2 - PL + [P]{tot} [L]{tot}$$\nUsing the solution for the quadratic equation:\n$$x = \\frac{-b \\pm \\sqrt{b^2 - 4ac}}{2a}$$\nwhere $x = [PL]$, $a = 1$, $b = -([P]{tot}+[L]{tot}+K_d)$, and $c = [P]{tot} [L]{tot}$. We get as the only reasonable solution:\n$$[PL] = \\frac{([P]{tot} + [L]{tot} + K_{d}) - \\sqrt{([P]{tot} + [L]{tot} + K_{d})^2 - 4[P]{tot}[L]{tot}}}{2}$$", "# Now we can use this to define a function that gives us PL from Kd, Ptot, and Ltot.\ndef two_component_binding(Kd, Ptot, Ltot):\n \"\"\"\n Parameters\n ----------\n Kd : float\n Dissociation constant\n Ptot : float\n Total protein concentration\n Ltot : float\n Total ligand concentration\n \n Returns\n -------\n P : float\n Free protein concentration\n L : float\n Free ligand concentration\n PL : float\n Complex concentration\n \"\"\"\n \n PL = 0.5 * ((Ptot + Ltot + Kd) - np.sqrt((Ptot + Ltot + Kd)**2 - 4*Ptot*Ltot)) # complex concentration (uM)\n P = Ptot - PL; # free protein concentration in sample cell after n injections (uM) \n L = Ltot - PL; # free ligand concentration in sample cell after n injections (uM) \n return [P, L, PL]\n\n[L, P, PL] = two_component_binding(Kd, Ptot, Ltot)\n\nprint Ltot\n\nprint PL", "Now we can plot our complex concentration as a function of our ligand concentration!", "# y will be complex concentration\n# x will be total ligand concentration\nplt.semilogx(Ltot, PL, 'ko')\nplt.xlabel('$[L]_{tot}$ / M')\nplt.ylabel('$[PL]$')\nplt.ylim(0, 1.05*np.max(PL))\nplt.axvline(Kd,color='r',linestyle='--',label='K_d')\nplt.legend(loc=0);", "Okay, so now lets do something a little more fun.\nLet's overlap the curves we get for different amounts of protein in the assay.", "[L2, P2, PL2] = two_component_binding(Kd, Ptot/2, Ltot)\n[L3, P3, PL3] = two_component_binding(Kd, Ptot*2, Ltot)\n\n# y will be complex concentration\n# x will be total ligand concentration\nplt.semilogx(Ltot,PL,'b',Ltot,PL2,'g',Ltot,PL3,'k')\nplt.xlabel('$[L]_{tot}$ / M')\nplt.ylabel('$[PL]$ / M')\nplt.ylim(0,2.05e-9)\nplt.axhline(Ptot,color='b',linestyle='--',label='$[P]_{tot}$')\nplt.axhline(Ptot/2,color='g',linestyle='--',label='$[P]_{tot}$/2')\nplt.axhline(Ptot*2,color='k',linestyle='--', label='$[P]_{tot}$*2')\nplt.axvline(Kd,color='r',linestyle='--',label='$K_d$')\nplt.legend();", "Let's do even more fun things!\nSay we have one molecule that has a different Kd for a bunch of proteins. We'll keep the protein concentration the same, but look at how our complex concentration changes as a function of Kd.", "[L4, P4, PL4] = two_component_binding(Kd/10, Ptot, Ltot)\n[L5, P5, PL5] = two_component_binding(Kd*10, Ptot, Ltot)\n\n# y will be complex concentration\n# x will be total ligand concentration\nplt.semilogx(Ltot,PL,'o',label='$K_d$');\nplt.semilogx(Ltot,PL4,'violet',label='0.1 $K_d$');\nplt.semilogx(Ltot,PL5,'.75',label='10 $K_d$')\nplt.xlabel('$[L]_{tot} / M$')\nplt.ylabel('$[PL]$ / M')\nplt.ylim(0,1.05e-9)\nplt.axhline(Ptot,color='0.75',linestyle='--',label='$[P]_{tot}$')\n#plt.axvline(Kd/10,color='violet',label='Kd/10')\n#plt.axvline(Kd*10,color='.75',label='Kd*10')\nplt.axvline(Kd,color='r',linestyle='--',label='$K_d$')\nplt.legend();", "Now let's make this new plot for 'simulated model of dilution series experiment' figure", "# Let's plot Kd's ranging from 1mM to 10pM\nKd_max = 1e-3 # M\n\n[La, Pa, PLa] = two_component_binding(Kd_max, Ptot, Ltot)\n[Lb, Pb, PLb] = two_component_binding(Kd_max/10, Ptot, Ltot)\n[Lc, Pc, PLc] = two_component_binding(Kd_max/100, Ptot, Ltot)\n[Ld, Pd, PLd] = two_component_binding(Kd_max/1e3, Ptot, Ltot)\n[Le, Pe, PLe] = two_component_binding(Kd_max/1e4, Ptot, Ltot)\n[Lf, Pf, PLf] = two_component_binding(Kd_max/1e5, Ptot, Ltot)\n[Lg, Pg, PLg] = two_component_binding(Kd_max/1e6, Ptot, Ltot)\n[Lh, Ph, PLh] = two_component_binding(Kd_max/1e7, Ptot, Ltot)\n[Li, Pi, PLi] = two_component_binding(Kd_max/1e8, Ptot, Ltot)\n[Lj, Pj, PLj] = two_component_binding(Kd_max/1e9, Ptot, Ltot)\n\n# y will be complex concentration\n# x will be total ligand concentration\nplt.figure(figsize=(10,3))\nplt.semilogx(Ltot,PLa,'-bo',label='1 mM');\nplt.semilogx(Ltot,PLb,'-ko',label='100 $\\mu$M');\nplt.semilogx(Ltot,PLc,'-go',label='10 $\\mu$M');\nplt.semilogx(Ltot,PLd,'-ro',label='1 $\\mu$M');\nplt.semilogx(Ltot,PLe,'-co',label='100 nM');\nplt.semilogx(Ltot,PLf,'-mo',label='10 nM');\nplt.semilogx(Ltot,PLg,'-yo',label='1 nM');\nplt.semilogx(Ltot,PLh,'-bo',label='100 pM');\nplt.semilogx(Ltot,PLi,'-ko',label='10 pM');\nplt.semilogx(Ltot,PLj,'-go',label='1 pM');\nplt.xlabel('$[L]_{tot}$ / M')\nplt.ylabel('$[PL]$ / M')\nplt.xlim(1.5e-12,1.5e-4)\nplt.axhline(0.1e-9,color='0.75',linestyle='--',label='detection limit');\nplt.legend(loc=0);", "Okay! Now let's do some stuff with kinases!\nWe're going to pick 10 kinases and look at what binding curves we would expect to the fluorescent inhibitor bosutinib.\nInfo from: http://www.guidetopharmacology.org/GRAC/LigandDisplayForward?tab=screens&ligandId=5710\nSpecifically: http://www.guidetopharmacology.org/GRAC/LigandScreenDisplayForward?ligandId=5710&screenId=2\nAgain units in nM. Abl1 value is for nonphosphorylated form. Others don't seem to specify?", "Kd_Src = 1.0e-9 # M\nKd_Abl = 0.12e-9 # M\nKd_Abl_T315I = 21.0e-9 # M\nKd_p38 = 3000.0e-9 # M \nKd_Aur = 3000.0e-9 # M\nKd_CK2 = 3000.0e-9 # M\nKd_SYK = 290.0e-9 # M\nKd_DDR = 120.0e-9 # M\nKd_MEK = 19.0e-9 # M\n\n#This CK2, Aur, and p38 value is actually 'greater than'.", "We'll use the same Ltot and Ptot as before.", "[L6, P6, PL6] = two_component_binding(Kd_Src, Ptot, Ltot)\n[L7, P7, PL7] = two_component_binding(Kd_Abl, Ptot, Ltot)\n[L8, P8, PL8] = two_component_binding(Kd_Abl_T315I, Ptot, Ltot)\n[L9, P9, PL9] = two_component_binding(Kd_p38, Ptot, Ltot)\n\n# y will be complex concentration\n# x will be total ligand concentration\nSrc, = plt.semilogx(Ltot,PL6,'o', label='Src')\nAbl, = plt.semilogx(Ltot,PL7,'violet', label = 'Abl')\nAblGK, = plt.semilogx(Ltot,PL8,'.75', label = 'AblGK')\np38, = plt.semilogx(Ltot,PL9,'k', label = 'p38')\nplt.axhline(0.1e-9,color='0.75',linestyle='--', label='detection limit');\nplt.xlabel('$[L]_{tot}$')\nplt.ylabel('$[PL]$')\n#plt.legend(handles=[Src, Abl, AblGK, p38], loc =0);\nplt.legend(loc=0);", "Okay, this is all great! In theory we can use this with whatever protein-ligand combination we want!\nBut in practice there are limitations!\n\nExperimental error\nWe want to limit the amount of protein used\nThe ligand also fluoresces.\nThe inner filter effect\nThe Fluorescence detection has a detection limit.\n\nHow do these limit the Kd, kinase, inhibitor, and the concentrations of kinase and inhibitor we can effectively access in our experimental design?" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
hankcs/HanLP
plugins/hanlp_demo/hanlp_demo/zh/extractive_summarization_restful.ipynb
apache-2.0
[ "<h2 align=\"center\">点击下列图标在线运行HanLP</h2>\n<div align=\"center\">\n <a href=\"https://colab.research.google.com/github/hankcs/HanLP/blob/doc-zh/plugins/hanlp_demo/hanlp_demo/zh/extractive_summarization_restful.ipynb\" target=\"_blank\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\n <a href=\"https://mybinder.org/v2/gh/hankcs/HanLP/doc-zh?filepath=plugins%2Fhanlp_demo%2Fhanlp_demo%2Fzh%2Fextractive_summarization_restful.ipynb\" target=\"_blank\"><img src=\"https://mybinder.org/badge_logo.svg\" alt=\"Open In Binder\"/></a>\n</div>\n\n安装\n无论是Windows、Linux还是macOS,HanLP的安装只需一句话搞定:", "!pip install hanlp_restful -U", "创建客户端", "from hanlp_restful import HanLPClient\nHanLP = HanLPClient('https://www.hanlp.com/api', auth=None, language='zh') # auth不填则匿名,zh中文,mul多语种", "申请秘钥\n由于服务器算力有限,匿名用户每分钟限2次调用。如果你需要更多调用次数,建议申请免费公益API秘钥auth。\n抽取式自动摘要\n抽取式自动摘要的目标是从文章中筛选出一些作为摘要的中心句子:既要紧扣要点,又要避免赘语。\n中文\n抽取式自动摘要任务的输入为一段文本和所需的摘要句子数量的最大值topk:", "text = '''\n据DigiTimes报道,在上海疫情趋缓,防疫管控开始放松后,苹果供应商广达正在逐步恢复其中国工厂的MacBook产品生产。\n据供应链消息人士称,生产厂的订单拉动情况正在慢慢转强,这会提高MacBook Pro机型的供应量,并缩短苹果客户在过去几周所经历的延长交货时间。\n仍有许多苹果笔记本用户在等待3月和4月订购的MacBook Pro机型到货,由于苹果的供应问题,他们的发货时间被大大推迟了。\n据分析师郭明錤表示,广达是高端MacBook Pro的唯一供应商,自防疫封控依赖,MacBook Pro大部分型号交货时间增加了三到五周,\n一些高端定制型号的MacBook Pro配置要到6月底到7月初才能交货。\n尽管MacBook Pro的生产逐渐恢复,但供应问题预计依然影响2022年第三季度的产品销售。\n苹果上周表示,防疫措施和元部件短缺将继续使其难以生产足够的产品来满足消费者的强劲需求,这最终将影响苹果6月份的收入。\n'''\nHanLP.extractive_summarization(text, topk=3)", "返回值为最多topk个摘要句子以及相应的权重,权重取值区间为$[0, 1]$。由于Trigram Blocking技巧,实际返回的摘要句数量可能小于topk。\n可视化", "def highlight(text, scores):\n for k, v in scores.items():\n text = text.replace(k, f'<span style=\"background-color:rgba(255, 255, 0, {v});\">{k}</span>')\n from IPython.display import display, HTML\n display(HTML(text))\n\nscores = HanLP.extractive_summarization(text, topk=100)\nhighlight(text, scores)", "繁体中文\nHanLP的抽取式自动摘要接口支持繁体中文:", "text = '''\n華爾街日報周二(3日)報導,根據知情人透露,日前已宣布將以440億美元買下推特(Twitter)並下市的馬斯克,曾經跟一些潛在投資人說,他可以在短短幾年後,再將這家社群媒體公司重新上市。\n消息來源說,特斯拉創辦人兼執行長馬斯克表示,他計劃在買下推特後最短三年內,就展開推特的首次公開發行股票。\n馬斯克買推特的交易案預期在今年稍後走完程序,包括獲得股東同意以及監管機關核准等步驟。\n根據之前華爾街日報的報導,馬斯克為購買推特籌現金時,與私募股權公司等投資人討論出資事宜,Apollo Global Management有興趣參與。\n私募股權公司通常都先買下公司將之私有化,把公司移出眾人注目的焦點之外以後,整頓公司,接著再把公司上市,時間常是五年左右。\n華爾街日報指出,馬斯克暗示他對推特有類似的規劃的話,有助說服潛在投資人,他會很快行動,改善推特的營運和獲利。\n'''\nscores = HanLP.extractive_summarization(text)\nscores\n\nhighlight(text, scores)", "英文\n按照HanLP一贯的多语种设计,任何语言都支持。由于服务器GPU资源限制,目前英文接口暂未上线。如果你有相应需求,欢迎前往论坛发起请愿。" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
caporaso-lab/tax-credit
ipynb/novel-taxa/taxonomy-assignment.ipynb
bsd-3-clause
[ "Data generation: using python to sweep over methods and parameters\nIn this notebook, we illustrate how to use python to generate and run a list of commands. In this example, we generate a list of QIIME 1.9.0 assign_taxonomy.py commands, though this workflow for command generation is generally very useful for performing parameter sweeps (i.e., exploration of sets of parameters for achieving a specific result for comparative purposes). \nEnvironment preparation", "from os import system\nfrom os.path import join, expandvars \nfrom joblib import Parallel, delayed\nfrom glob import glob\nfrom tax_credit.framework_functions import (recall_novel_taxa_dirs,\n parameter_sweep,\n move_results_to_repository)\n\n\nproject_dir = \"../..\"\nanalysis_name= \"novel-taxa-simulations\"\n\nresults_dir = join('..', '..', 'novel-taxa-tmp')", "Preparing data set sweep\nFirst, we're going to define the data sets that we'll sweep over. As the simulated novel taxa dataset names depend on how the database generation notebook was executed, we must define the variables used to create these datasets. If you modified any variables in that notebook, set these same variables below. If you did not, then do not modify.\nrecall_novel_taxa_dirs() generates a list of dataset_reference_combinations and a dictionary of reference_dbs mapped to each dataset, which we feed to parameter_sweep below.", "iterations = 10\ndata_dir = join(project_dir, \"data\", analysis_name)\n# databases is a list of names given as dictionary keys in the second\n# cell of the database generation notebook. Just list the names here.\ndatabases = ['B1-REF', 'F1-REF']\n\n# Generate a list of input directories\n(dataset_reference_combinations, reference_dbs) = recall_novel_taxa_dirs(data_dir, databases, iterations)", "Preparing the method/parameter combinations and generating commands\nNow we set the methods and method-specific parameters that we want to sweep. Modify to sweep other methods. Note how method_parameters_combinations feeds method/parameter combinations to parameter_sweep() in the cell below.\nAssignment Using QIIME 1 or Command-Line Classifiers\nHere we provide an example of taxonomy assignment using legacy QIIME 1 classifiers executed on the command line. To accomplish this, we must first convert commands to a string, which we then pass to bash for execution. As QIIME 1 is written in python-2, we must also activate a separate environment in which QIIME 1 has been installed. If any environmental variables need to be set (in this example, the RDP_JAR_PATH), we must also source the .bashrc file.", "method_parameters_combinations = { # probabalistic classifiers\n 'rdp': {'confidence': [0.0, 0.1, 0.2, 0.3, 0.4, 0.5,\n 0.6, 0.7, 0.8, 0.9, 1.0]},\n \n # global alignment classifiers\n 'uclust': {'min_consensus_fraction': [0.51, 0.76, 1.0], \n 'similarity': [0.8, 0.9],\n 'uclust_max_accepts': [1, 3, 5]},\n \n # local alignment classifiers\n 'sortmerna': {'sortmerna_e_value': [1.0],\n 'min_consensus_fraction': [0.51, 0.76, 1.0], \n 'similarity': [0.8, 0.9],\n 'sortmerna_best_N_alignments ': [1, 3, 5],\n 'sortmerna_coverage' : [0.8, 0.9]},\n 'blast' : {'blast_e_value' : [0.0000000001, 0.001, 1, 1000]}\n }", "Now enter the template of the command to sweep, and generate a list of commands with parameter_sweep().\nFields must adhere to following format:\n {0} = output directory\n {1} = input data\n {2} = output destination\n {3} = reference taxonomy\n {4} = method name\n {5} = other parameters", "command_template = 'bash -c \"source activate qiime1; source ./.bashrc; mkdir -p {0} ; assign_taxonomy.py -v -i {1} -o {0} -r {2} -t {3} -m {4} {5} --rdp_max_memory 16000\"'\n\ncommands = parameter_sweep(data_dir, results_dir, reference_dbs,\n dataset_reference_combinations,\n method_parameters_combinations, command_template,\n infile='query.fasta', output_name='query_tax_assignments.txt')\n", "As a sanity check, we can look at the first command that was generated and the number of commands generated.", "for method in method_parameters_combinations:\n print(method)\n for command in commands:\n if '/'+method+'/' in command:\n print(command)\n break\n\nprint(len(commands))", "Finally, we run our commands.", "Parallel(n_jobs=23)(delayed(system)(command) for command in commands);", "BLAST+", "(dataset_reference_combinations, reference_dbs) = recall_novel_taxa_dirs(\n data_dir, databases, iterations, ref_seqs='ref_seqs.qza', ref_taxa='ref_taxa.qza')\n\nmethod_parameters_combinations = {\n 'blast+' : {'p-evalue': [0.001],\n 'p-maxaccepts': [1, 10],\n 'p-perc-identity': [0.80, 0.97, 0.99],\n 'p-min-consensus': [0.51, 0.75, 0.99]}\n }\n\ncommand_template = (\"mkdir -p {0}; \"\n \"qiime feature-classifier classify-consensus-blast --i-query {1} --o-classification \"\n \"{0}/rep_seqs_tax_assignments.qza --i-reference-reads {2} --i-reference-taxonomy {3} {5}; \"\n \"qiime tools export {0}/rep_seqs_tax_assignments.qza --output-dir {0}; \"\n \"mv {0}/taxonomy.tsv {0}/query_tax_assignments.txt\")\n \n\ncommands = parameter_sweep(data_dir, results_dir, reference_dbs,\n dataset_reference_combinations,\n method_parameters_combinations, command_template,\n infile='query.qza', output_name='rep_seqs_tax_assignments.qza')\n\nParallel(n_jobs=23)(delayed(system)(command) for command in commands);", "VSEARCH", "method_parameters_combinations = {\n 'vsearch' : {'p-maxaccepts': [1, 10],\n 'p-perc-identity': [0.80, 0.90, 0.97, 0.99],\n 'p-min-consensus': [0.51, 0.99]}\n }\n\ncommand_template = (\"mkdir -p {0}; \"\n \"qiime feature-classifier classify-consensus-vsearch --i-query {1} --o-classification \"\n \"{0}/rep_seqs_tax_assignments.qza --i-reference-reads {2} --i-reference-taxonomy {3} {5}; \"\n \"qiime tools export {0}/rep_seqs_tax_assignments.qza --output-dir {0}; \"\n \"mv {0}/taxonomy.tsv {0}/query_tax_assignments.txt\")\n \ncommands = parameter_sweep(data_dir, results_dir, reference_dbs,\n dataset_reference_combinations,\n method_parameters_combinations, command_template,\n infile='query.qza', output_name='rep_seqs_tax_assignments.qza')\n\nParallel(n_jobs=23)(delayed(system)(command) for command in commands);", "scikit-learn", "method_parameters_combinations = {\n 'naive-bayes' : {'p-feat-ext--ngram-range': \n ['[4,4]', '[6,6]', '[8,8]', '[16,16]', '[32,32]',\n '[7,7]', '[9,9]', '[10,10]', '[11,11]', \n '[12,12]', '[14,14]', '[18,18]'],\n 'p-classify--alpha': [0.001]},\n 'naive-bayes-bespoke' : {'p-feat-ext--ngram-range': \n ['[4,4]', '[6,6]', '[8,8]', '[16,16]', '[32,32]',\n '[7,7]', '[9,9]', '[10,10]', '[11,11]', \n '[12,12]', '[14,14]', '[18,18]'],\n 'p-classify--alpha': [0.001],\n 'p-classify--fit-prior': ['']}\n}\n\ncommand_template = ('mkdir -p \"{0}\"; '\n 'qiime feature-classifier fit-classifier-naive-bayes --o-classifier '\n '\"{0}/classifier.qza\" --i-reference-reads {2} --i-reference-taxonomy {3} {5}; ')\n\nconfidences = [0.0, 0.5, 0.7, 0.9, 0.92, 0.94,\n 0.96, 0.98, 1.0]\ncommand_template += ''.join(\n 'mkdir -p \"{0}:' + str(c) + '\"; '\n 'qiime feature-classifier classify-sklearn '\n '--o-classification \"{0}:' + str(c) + '/rep_seqs_tax_assignments.qza\" '\n '--i-classifier \"{0}/classifier.qza\" '\n '--i-reads {1} --p-confidence ' + str(c) + '; '\n 'qiime tools export \"{0}:' + str(c) + '/rep_seqs_tax_assignments.qza\" --output-dir \"{0}:' + str(c) + '\"; '\n 'mv \"{0}:' + str(c) + '/taxonomy.tsv\" \"{0}:' + str(c) + '/query_tax_assignments.txt\"; 'for c in confidences)\n\ncommand_template += 'rm \"{0}/classifier.qza\"; rmdir \"{0}\"'\n\ncommands = parameter_sweep(data_dir, results_dir, reference_dbs,\n dataset_reference_combinations,\n method_parameters_combinations, command_template,\n infile='query.qza', output_name='rep_seqs_tax_assignments.qza')\n\nprint(len(commands), 'commands')\nprint('\\n\\n'.join(commands[0].split(';')))\n\nParallel(n_jobs=23)(delayed(system)(command) for command in commands);", "Move result files to repository\nAdd results to the tax-credit directory (e.g., to push these results to the repository or compare with other precomputed results in downstream analysis steps). The precomputed_results_dir path and methods_dirs glob below should not need to be changed unless if substantial changes were made to filepaths in the preceding cells.", "precomputed_results_dir = join(project_dir, \"data\", \"precomputed-results\", analysis_name)\nmethod_dirs = glob(join(results_dir, '*', '*', '*', '*'))\nmove_results_to_repository(method_dirs, precomputed_results_dir)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
yevheniyc/C
1t_DataAnalysisMLPython/1j_ML/DS_ML_Py_SBO/DataScience/3_Distributions/Distributions.ipynb
mit
[ "Examples of Data Distributions\nUniform Distribution", "%matplotlib inline\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nvalues = np.random.uniform(-10.0, 10.0, 100000)\nplt.hist(values, 50)\nplt.show()", "Normal / Gaussian\nVisualize the probability density function:", "from scipy.stats import norm\nimport matplotlib.pyplot as plt\n\nx = np.arange(-3, 3, 0.001)\nplt.plot(x, norm.pdf(x))\n", "Generate some random numbers with a normal distribution. \"mu\" is the desired mean, \"sigma\" is the standard deviation:", "import numpy as np\nimport matplotlib.pyplot as plt\n\nmu = 5.0\nsigma = 2.0\nvalues = np.random.normal(mu, sigma, 10000)\nplt.hist(values, 50)\nplt.show()", "Exponential PDF / \"Power Law\"", "from scipy.stats import expon\nimport matplotlib.pyplot as plt\n\nx = np.arange(0, 10, 0.001)\nplt.plot(x, expon.pdf(x))", "Binomial Probability Mass Function", "from scipy.stats import binom\nimport matplotlib.pyplot as plt\n\n# n -> number of events, i.e. flipping a coin 10 times\n# p -> probability of the event occuring: 50% chance of getting heads\nn, p = 10, 0.5\nx = np.arange(0, 10, 0.001)\nplt.plot(x, binom.pmf(x, n, p))", "Poisson Probability Mass Function\nExample: My website gets on average 500 visits per day. What's the odds of getting 550?", "from scipy.stats import poisson\nimport matplotlib.pyplot as plt\n\nmu = 500\nx = np.arange(400, 600, 0.5)\nplt.plot(x, poisson.pmf(x, mu))", "Pop Quiz!\nWhat's the equivalent of a probability distribution function when using discrete instead of continuous data?", "# probability mass function" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
feststelltaste/software-analytics
demos/IntelliJ IDEA Analysis.ipynb
gpl-3.0
[ "IntelliJ IDEA Analysis\nCase study\nIntelliJ IDEA\n\nIDE for Java developers\nWritten almost entirely in Java\nLarge and long active project\n\nI. Stating Question (1/3)\n\nWrite down the question explicitly\nExplain the anaysis idea in an understandable way\n\nI. Stating Question (2/3)\n<b>Question</b>\n* Which source code files are particularly complex and changed frequently recently?\nI. Stating Question (3/3)\nImplementation ideas\n\nTools: jupyter, python, pandas, matplotlib\nHeuristics:\n\"complex\": many lines of source code.\n\"changes ... frequently\": high number of commits\n\"recently\": last 90 days\n\nMeta Objective: Learn basic mechanics.\nII. Exploratory Data Analysis\n\nFind and load possible software data\nClean and filter the raw data\n\nWe load a data export from a Git repository.", "import pandas as pd\nlog = pd.read_csv(\"dataset/git_log_intellij.csv.gz\")\nlog.head()", "We look at basic info about the dataset.", "log.info()", "<b>1</b> DataFrame (~ programmable Excel worksheet), <b>6</b> Series (= columns), <b>1128819</b> entries (= rows)\nWe convert the timestamps of texts into objects.", "log['timestamp'] = pd.to_datetime(log['timestamp'])\nlog.head()", "We're just looking at recent changes.", "# use log['timestamp'].max() instead of pd.Timedelta('today') to avoid outdated data in the future\nrecent = log[log['timestamp'] > log['timestamp'].max() - pd.Timedelta('90 days')]\nrecent.head()", "We want to use only Java code.", "java = recent[recent['filename'].str.endswith(\".java\")].copy()\njava.head() ", "III. Formal Modeling\n\nCreate new views\nBlend in more data\n\nWir zählen die Anzahl der Änderungen je Datei.", "changes = java.groupby('filename')[['sha']].count()\nchanges.head()", "We add info about the code lines...", "loc = pd.read_csv(\"dataset/cloc_intellij.csv.gz\", index_col=1)\nloc.head()", "...and blend them with the existing data.", "hotspots = changes.join(loc[['code']]).dropna(subset=['code'])\nhotspots.head()", "VI. Interpretation\n\nElaborate the core result of the analysis.\nMake the central message / new findings clear\n\nWe only show the TOP 10 hotspots in the code.", "top10 = hotspots.sort_values(by=\"sha\", ascending=False).head(10)\ntop10", "V. Communication\n\nTransform the findings into an understandable visualization\nCommunicate the next steps after analysis\n\nWe generate an XY chart from the TOP 10 list.", "ax = top10.plot.scatter('sha', 'code');\n\nfor k, v in top10.iterrows():\n ax.annotate(k.split(\"/\")[-1], v)", "<b>Result:</b> There are a few complex files that change very frequently. Next step is to investigate those files in more detail.\nBonus\nWhich files change particularly frequently in general?", "most_changes = hotspots['sha'].sort_values(ascending=False)\nmost_changes.head(10)", "*We visualize this with a simple line graph.", "most_changes.plot(rot=90);", "End of Demo" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
lmoresi/UoM-VIEPS-Intro-to-Python
Notebooks/SphericalMeshing/CartesianTriangulations/Ex7-Refinement-of-Triangulations.ipynb
mit
[ "Example 7 - Refining a triangulation\nWe have seen how the standard meshes can be uniformly refined to finer resolution. The routines used for this task are available to the stripy user for non-uniform refinement as well. \nNotebook contents\n\nUniform meshes\nRefinement strategies\nVisualisation\nTargetted refinement\nVisualisation", "import stripy as stripy\nimport numpy as np\n\nxmin = 0.0\nxmax = 10.0\nymin = 0.0\nymax = 10.0\nextent = [xmin, xmax, ymin, ymax]\n\nspacingX = 0.5\nspacingY = 0.5", "Uniform meshes by refinement\nThe refinement_level parameter of the stripy meshes makes repeated loops determining the bisection points of all the existing edges in the triangulation and then creating a new triangulation that includes these points and the original ones. These refinement operations can also be used for non-uniform refinement.", "ellip0 = stripy.cartesian_meshes.elliptical_mesh(extent, spacingX, spacingY, refinement_levels=0)\nellip1 = stripy.cartesian_meshes.elliptical_mesh(extent, spacingX, spacingY, refinement_levels=1)\nellip2 = stripy.cartesian_meshes.elliptical_mesh(extent, spacingX, spacingY, refinement_levels=2)\nellip3 = stripy.cartesian_meshes.elliptical_mesh(extent, spacingX, spacingY, refinement_levels=3)\nellip4 = stripy.cartesian_meshes.elliptical_mesh(extent, spacingX, spacingY, refinement_levels=4)\nellip5 = stripy.cartesian_meshes.elliptical_mesh(extent, spacingX, spacingY, refinement_levels=5)\nellip6 = stripy.cartesian_meshes.elliptical_mesh(extent, spacingX, spacingY, refinement_levels=6)\nellip7 = stripy.cartesian_meshes.elliptical_mesh(extent, spacingX, spacingY, refinement_levels=7)\n\nprint(\"Size of mesh - 1 {}\".format(ellip1.points.shape[0]))\nprint(\"Size of mesh - 2 {}\".format(ellip2.points.shape[0]))\nprint(\"Size of mesh - 3 {}\".format(ellip3.points.shape[0]))\nprint(\"Size of mesh - 4 {}\".format(ellip4.points.shape[0]))\nprint(\"Size of mesh - 5 {}\".format(ellip5.points.shape[0]))\nprint(\"Size of mesh - 6 {}\".format(ellip6.points.shape[0]))\nprint(\"Size of mesh - 7 {}\".format(ellip7.points.shape[0]))\n", "Refinement strategies\nFive refinement strategies:\n\nBisect all segments connected to a given node\nRefine all triangles connected to a given node by adding a point at the centroid or bisecting all edges\nRefine a given triangle by adding a point at the centroid or bisecting all edges\n\nThese are provided as follows:", "\nmx, my = ellip2.midpoint_refine_triangulation_by_vertices(vertices=[1,2,3,4,5,6,7,8,9,10])\nellip2mv = stripy.Triangulation(mx, my)\n\nmx, my = ellip2.edge_refine_triangulation_by_vertices(vertices=[1,2,3,4,5,6,7,8,9,10])\nellip2ev = stripy.Triangulation(mx, my)\n\nmx, my = ellip2.centroid_refine_triangulation_by_vertices(vertices=[1,2,3,4,5,6,7,8,9,10])\nellip2cv = stripy.Triangulation(mx, my)\n\nmx, my = ellip2.edge_refine_triangulation_by_triangles(triangles=[1,2,3,4,5,6,7,8,9,10])\nellip2et = stripy.Triangulation(mx, my)\n\nmx, my = ellip2.centroid_refine_triangulation_by_triangles(triangles=[1,2,3,4,5,6,7,8,9,10])\nellip2ct = stripy.Triangulation(mx, my)\n\n\nstr_fmt = \"refinement --- {} points, {} simplices\"\nprint(str_fmt.format(ellip2mv.npoints, ellip2mv.simplices.shape[0]))\nprint(str_fmt.format(ellip2ev.npoints, ellip2ev.simplices.shape[0]))\nprint(str_fmt.format(ellip2cv.npoints, ellip2cv.simplices.shape[0]))\nprint(str_fmt.format(ellip2et.npoints, ellip2et.simplices.shape[0]))\nprint(str_fmt.format(ellip2ct.npoints, ellip2ct.simplices.shape[0]))\n", "Visualisation of refinement strategies", "%matplotlib inline\nimport matplotlib.pyplot as plt\n\n\ndef mesh_fig(mesh, meshR, name):\n\n fig = plt.figure(figsize=(10, 10), facecolor=\"none\")\n ax = plt.subplot(111)\n ax.axis('off')\n\n generator = mesh\n refined = meshR\n\n x0 = generator.x\n y0 = generator.y\n\n xR = refined.x\n yR = refined.y\n \n\n ax.scatter(x0, y0, color=\"Red\", marker=\"o\", s=50)\n ax.scatter(xR, yR, color=\"DarkBlue\", marker=\"o\", s=10)\n \n ax.triplot(xR, yR, refined.simplices, color=\"black\", linewidth=0.5)\n\n fig.savefig(name, dpi=250, transparent=True)\n \n return\n\n\nmesh_fig(ellip2, ellip2mv, \"EdgeByVertex1to10\" )\nmesh_fig(ellip2, ellip2ev, \"EdgeByVertexT1to10\" )\nmesh_fig(ellip2, ellip2cv, \"CentroidByVertexT1to10\" )\nmesh_fig(ellip2, ellip2et, \"EdgeByTriangle1to10\" )\nmesh_fig(ellip2, ellip2ct, \"CentroidByTriangle1to10\" )\n\n", "Targetted refinement\nHere we refine a triangulation to a specific criterion - resolving two points in distinct triangles or with distinct nearest neighbour vertices.", "points = np.array([[ 3.33, 3.33], [7.77, 7.77]]).T\n\ntriangulations = [ellip1]\nnearest, distances = triangulations[-1].nearest_vertex(points[:,0], points[:,1])\n\nmax_depth = 10\n\nwhile nearest[0] == nearest[1] and max_depth > 0:\n\n xs, ys = triangulations[-1].centroid_refine_triangulation_by_vertices(vertices=nearest[0])\n new_triangulation = stripy.Triangulation(xs, ys)\n nearest, distances = new_triangulation.nearest_vertex(points[:,0], points[:,1])\n triangulations.append(new_triangulation)\n \n max_depth -= 1\n\nprint(\"refinement_steps = {}\".format(len(triangulations)))\n\ncentroid_triangulations = triangulations[:]\n\ntriangulations = [ellip1]\nnearest, distances = triangulations[-1].nearest_vertex(points[:,0], points[:,1])\n\nmax_depth = 10\n\nwhile nearest[0] == nearest[1] and max_depth > 0:\n\n xs, ys = triangulations[-1].edge_refine_triangulation_by_vertices(vertices=nearest[0])\n new_triangulation = stripy.Triangulation(xs, ys)\n nearest, distances = new_triangulation.nearest_vertex(points[:,0], points[:,1])\n triangulations.append(new_triangulation)\n \n max_depth -= 1\n\nprint(\"refinement_steps = {}\".format(len(triangulations)))\n\nedge_triangulations = triangulations[:]\n\ntriangulations = [ellip1]\n\nin_triangle = triangulations[-1].containing_triangle(points[:,0], points[:,1])\n\nmax_depth = 10\n\nwhile in_triangle[0] == in_triangle[1] and max_depth > 0:\n\n xs, ys = triangulations[-1].edge_refine_triangulation_by_triangles(in_triangle[0])\n new_triangulation = stripy.Triangulation(xs, ys)\n in_triangle = new_triangulation.containing_triangle(points[:,0], points[:,1])\n triangulations.append(new_triangulation)\n \n print(in_triangle)\n\n\n \n if in_triangle.shape[0] == 0:\n break\n \n max_depth -= 1\n\nprint(\"refinement_steps = {}\".format(len(triangulations)))\n\nedge_t_triangulations = triangulations[:]\n\ntriangulations = [ellip1]\n\nin_triangle = triangulations[-1].containing_triangle(points[:,0], points[:,1])\n\nmax_depth = 10\n\nwhile in_triangle[0] == in_triangle[1] and max_depth > 0:\n\n xs, ys = triangulations[-1].centroid_refine_triangulation_by_triangles(in_triangle[0])\n new_triangulation = stripy.Triangulation(xs, ys)\n in_triangle = new_triangulation.containing_triangle(points[:,0], points[:,1])\n triangulations.append(new_triangulation)\n \n print(in_triangle)\n \n if in_triangle.shape[0] == 0:\n break\n \n max_depth -= 1\n\nprint(\"refinement_steps = {}\".format(len(triangulations)))\n\ncentroid_t_triangulations = triangulations[:]", "Visualisation of targetted refinement", "import matplotlib.pyplot as plt\n%matplotlib inline\n\nstr_fmt = \"{:18} --- {} simplices, equant max = {:.2f}, equant min = {:.2f}, size ratio = {:.2f}\"\n\n\nmesh_fig(edge_triangulations[0], edge_triangulations[-1], \"EdgeByVertex\" )\n\nT = edge_triangulations[-1]\nE = np.array(T.edge_lengths()).T\nA = np.array(T.areas()).T\nequant = np.max(E, axis=1) / np.min(E, axis=1)\nsize_ratio = np.sqrt(np.max(A) / np.min(A))\nprint(str_fmt.format(\"EdgeByVertex\", T.simplices.shape[0], equant.max(), equant.min(), size_ratio))\n\n\nmesh_fig(edge_t_triangulations[0], edge_t_triangulations[-1], \"EdgeByTriangle\" )\n\nT = edge_t_triangulations[-1]\nE = np.array(T.edge_lengths()).T\nA = np.array(T.areas()).T\nequant = np.max(E, axis=1) / np.min(E, axis=1)\nsize_ratio = np.sqrt(np.max(A) / np.min(A))\nprint(str_fmt.format(\"EdgeByTriangle\", T.simplices.shape[0], equant.max(), equant.min(), size_ratio))\n\n\nmesh_fig(centroid_triangulations[0], centroid_triangulations[-1], \"CentroidByVertex\" )\n\nT = centroid_triangulations[-1]\nE = np.array(T.edge_lengths()).T\nA = np.array(T.areas()).T\nequant = np.max(E, axis=1) / np.min(E, axis=1)\nsize_ratio = np.sqrt(np.max(A) / np.min(A))\nprint(str_fmt.format(\"CentroidByVertex\", T.simplices.shape[0], equant.max(), equant.min(), size_ratio))\n\n\n\nmesh_fig(centroid_t_triangulations[0], centroid_t_triangulations[-1], \"CentroidByTriangle\" )\n\nT = centroid_t_triangulations[-1]\nE = np.array(T.edge_lengths()).T\nA = np.array(T.areas()).T\nequant = np.max(E, axis=1) / np.min(E, axis=1)\nsize_ratio = np.sqrt(np.max(A) / np.min(A))\nprint(str_fmt.format(\"CentroidByTriangle\", T.simplices.shape[0], equant.max(), equant.min(), size_ratio))\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
sebsch/WT-2_Such-_und_Texttechnologien
Decision-Tree_Learning.ipynb
gpl-3.0
[ "import pandas as pd\nfrom math import log2\n%matplotlib inline", "Decision-Tree Learning\nDecision tree learning uses a decision tree as a predictive model observations about an item (represented in the branches) to conclusions about the item's target value (represented in the leaves). It is one of the predictive modelling approaches used in statistics, data mining and machine learning. Tree models where the target variable can take a finite set of values are called classification trees; in these tree structures, leaves represent class labels and branches represent conjunctions of features that lead to those class labels. Decision trees where the target variable can take continuous values (typically real numbers) are called regression trees.\nSource\nEntropy\n$$\nE(S) = - \\sum\\limits_{i=1}^{n} p_1 \\cdot \\log_2(p_i) \n$$", "def entropy(S):\n \n outcomes = pd.unique(S[final])\n ent = lambda p: 0 if p == 0 else p * log2(p) \n \n return -sum([ ent( S[S[final] == o].size / S.size ) for o in outcomes ])\n", "Information\n$$\nI(S, A) = - \\sum\\limits_{i=1}^{n} \\frac{|S_i|}{|S|} \\cdot E(S_i)\n$$", "def information(S, A):\n partitions = pd.unique(S[A])\n\n return sum([( S[A][S[A] == p].size / S[A].size ) * \n entropy( S[S[A] == p]) for p in partitions])\n ", "Information Gain\n$$\n\\mbox{Gain}(S, A) = E(S) - I(S,A)\n$$", "def gain(S, A): \n return entropy(S) - information(S, A)", "Instrinsic Information\n$$\n\\mbox{IntI}(S,A) = - \\sum\\limits_i \\frac{|S_i|}{|S|} \\cdot \\log_2(\\frac{|S_i|}{|S|})\n$$", "def intrinsic_information(S, A):\n partitions = pd.unique(S[A])\n\n return -sum([( S[A][S[A] == p].size / S[A].size ) * \n log2( S[A][S[A] == p].size / S[A].size ) for p in partitions])", "Gain Ratio\n$$\nGR(S,A) = \\frac{\\mbox{Gain}(S,A)}{\\mbox{IntI}(S,A)}\n$$", "def gain_ratio(S, A):\n return gain(S,A) / intrinsic_information(S,A)", "Gini-Index\n$$\n \\mbox{Gini}(S) = 1- \\sum\\limits_i p_i^2\n$$\n$$\n \\mbox{Gini}(S, A) = \\sum\\limits_i \\frac{|s_i|}{|S|} \\cdot \\mbox{Gini}(S)\n$$", "def gini(S, A=None):\n \n if A == None:\n return 1-sum( \n [(S[S[final] == o].size / S.size)**2 for o in pd.unique(S[final])] \n )\n \n return sum( [ ( S[A][S[A] == p].size / S[A].size ) * gini(S[S[A] == p]) for p in pd.unique(S[A])] )\n\n_exec = lambda f : {col: f(data, col) for col in data.columns if not col == final}\n\ndata = pd.read_csv('playgolf.txt')\nfinal='Play Golf?'\n\nout = pd.DataFrame( dict(\n gain = _exec(gain), \n information = _exec(information), \n gain_ratio = _exec(gain_ratio),\n gini = _exec(gini)\n) )\nout\n\n_ = out.plot(kind='bar')\n\ndata = pd.read_csv('lens24.dat')\nfinal = 'class'\n \nout = pd.DataFrame( dict(\n gain = _exec(gain), \n information = _exec(information), \n gain_ratio = _exec(gain_ratio),\n gini = _exec(gini)\n) )\nout\n\n_ = out.plot(kind='bar')" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
hauser-tristan/heatwave-defcomp-examples
scripts/india_map.ipynb
mit
[ "Data preprocessing\nHere will download and subset NCEP reanalysis data, and read in files created from the DesInventar database. Then create a map showing the regions where disaster records are available, and how this relates to the resolution of the meteorological data. \nSet up\nImport needed packages", "#--- Libraries\nimport pandas as pd # statistics packages \nimport numpy as np # linear algebra packages\nimport matplotlib.pyplot as plt # plotting routines\nimport seaborn as sns # more plotting routines\nimport shapefile # routines for using 'shapefiles'\nimport urllib # routines for interacting with websites\nimport subprocess # routines for calling external OS commands\n\nfrom mpl_toolkits.basemap import Basemap # plotting routines for map making\nfrom matplotlib import gridspec # plotting routines for multiple plots\nfrom netCDF4 import Dataset # routines for interacting with NetCDF files\n\nfrom matplotlib import cm # more plotting routines\nfrom matplotlib.collections import LineCollection # more plotting routines\n\nfrom cdo import * # routines for interacting with NetCDF files\ncdo = Cdo() # via an external program\n\n# place graphics in the notebook document\n%matplotlib inline", "Specify region\nFor this exercise, using examples from India.", "#--- Identify country for example \n# label country\ncountry = 'India' \n# define bounding box for region\nmlat = '0' ; Mlat = '40' ; mlon = '65' ; Mlon = '105'", "Set data\nDisaster records\nA spreadsheet of availble data was obtained from the DesInventar website, and then exported to .csv format. Both versions are available in the data repository. When pulling data from the website sometimes there can be little formatting issues, which we repair here. Also want to learn what span of years is covered by the database for our example country (India), so that we can save disk space by paring down the reanalysis data to the smallest possible file.", "#--- Pull in data from DesInvetar records\n# Read file of reported heatwaves (original spreadsheet)\nheatwave_data = pd.read_csv('../data/Heatwaves_database.csv')\n# repair region name with space before name\nheatwave_data.loc[(heatwave_data.Region==' Tamil Nadu'),'Region'] = 'Tamil Nadu'\n# list out the dates for example country (India)\nindia_dates = heatwave_data['Date (YMD)'][heatwave_data['Country'].isin(['India'])]\n# find year of earliest entry\nmin_year = np.min([int(x.split('/')[0]) for x in india_dates])\n# find year of latest entry\nmax_year = np.max([int(x.split('/')[0]) for x in india_dates])", "Reanalysis\nNeed to pull the renalysis data from NCEP's online database. Going to pull the full global files at first, so that have the data avaialbe if want to look at other regions of the world. This requires a lot of download time and storage space, the resulting minimally sized files are stored in the repository (others are deleated or moved to save disk space) so don't run these code blocks unless you need to change something about the data is being aquired or it's final form (which means, yeah, probably you'll end up having to run the script).", "#---Download NetCDF files\n# path to data directory for max/min daily temperatures\npath_maxmin = 'ftp://ftp.cdc.noaa.gov/Datasets/ncep.reanalysis.dailyavgs/surface_gauss'\n# path to data directory for 6hr temperature records\npath_hourly = 'ftp://ftp.cdc.noaa.gov/Datasets/ncep.reanalysis/surface_gauss'\n# loop through years\nfor yr in range(1948,2016) :\n # write max 2meter temperature to new file\n path = path_maxmin+'/tmax.2m.gauss.'+str(yr)+'.nc'\n ofile = open('../data/t2m.max.daily.'+str(yr)+'.nc','w')\n ofile.write(urllib.urlopen(path).read())\n ofile.close()\n # write min 2meter temperature to new file\n path = path_maxmin+'/tmin.2m.gauss.'+str(yr)+'.nc'\n ofile = open('../data/t2m.min.daily.'+str(yr)+'.nc','w')\n ofile.write(urllib.urlopen(path).read())\n ofile.close()\n # write 2meter temperature at 6-hour intervals to new file\n path = path_hourly+'/air.2m.gauss.'+str(yr)+'.nc'\n ofile = open('../data/t2m.subdaily.'+str(yr)+'.nc','w')\n ofile.write(urllib.urlopen(path).read())\n ofile.close()\n\n\n\n# set data as single multiyear files\n_ = cdo.mergetime(input='../data/t2m.max.daily.*.nc',output='../data/t2m.max.daily.nc')\n_ = cdo.mergetime(input='../data/t2m.min.daily.*.nc',output='../data/t2m.min.daily.nc')\n_ = cdo.mergetime(input='../data/t2m.subdaily.*.nc',output='../data/t2m.subdaily.nc')", "Once have full data set can then subdivide to create individual files for different regions to reduce the run time when reading in data for individual regions.", "#--- Create data files of region\n# select region from min-temperature data\n_ = cdo.sellonlatbox(','.join([mlon,Mlon,mlat,Mlat]),\n input='../data/t2m.min.daily.nc',\n output='../data/'+country+'.t2m.min.daily.nc')\n# select region from max-temperature data\n_ = cdo.sellonlatbox(','.join([mlon,Mlon,mlat,Mlat]),\n input='../data/t2m.max.daily.nc',\n output='../data/'+country+'.t2m.max.daily.nc')\n# select region from hourly-temperature data\n_ = cdo.sellonlatbox(','.join([mlon,Mlon,mlat,Mlat]),\n input='../data/t2m.subdaily.nc',\n output='../data/'+country+'.t2m.subdaily.nc')\n# create a daily mean temperature file\n_ = cdo.daymean(input='../data/'+country+'.t2m.subdaily.nc',\n output='../data/'+country+'.t2m.daily.nc')\n\n#--- Trim time range of file to match disaster records\n# list years in time range\nyears_in_record = ','.join([ str(x) for x in range(min_year,max_year+1) ])\n# subset regional data\n_ = cdo.selyear(years_in_record,\n input='../data/'+country+'.t2m.min.daily.nc',\n output='../data/'+country+'.t2m.min.daily.subset.nc')\n_ = cdo.selyear(years_in_record,\n input='../data/'+country+'.t2m.max.daily.nc',\n output='../data/'+country+'.t2m.max.daily.subset.nc')\n# _ = cdo.selyear(years_in_record,\n# input='../data/'+country+'.t2m.subdaily.nc',\n# output='../data/'+country+'.t2m.subdaily.subset.nc')\n_ = cdo.selyear(years_in_record,\n input='../data/'+country+'.t2m.daily.nc',\n output='../data/'+country+'.t2m.daily.subset.nc')\n# retain base period file (needed for one of the heat wave definitions)\nyears = ','.join([ str(x) for x in range(1960,1991)])\n_ = cdo.selyear(years,\n input='../data/'+country+'.t2m.max.daily.nc',\n output='../data/'+country+'basefile.nc')", "Region masks\nThe way we arranged the analysis (which as you can see is a bit of an ad hoc, duct tape style procedure) requires masking out the individual districts, or rather the closest approximation of them possible using the low resolution, gridded reanalysis data. \nThe first step is creating a 'blanked' file of the region, where all the values are set to unity.", "#--- Create blank file for region\n# write grid information to file\nofile = open('../data/ncep_grid.asc','w')\nofile.write('\\n'.join(cdo.griddes(input='../data/'+country+'.t2m.daily.nc')))\nofile.close()\n# create data file with all values set to 1\n_ = cdo.const('1','../data/ncep_grid.asc',\n output='../data/'+country+'.blank.ncepgrid.nc',\n options='-f nc')", "The actual mask files are made with a different script, writen in NCL The code here modifies the generic script based on what region we're interested in at the moment. \nFor some countries, e.g., Chile, the region labels in the shapefiles and the region labels in the heatwave database are not rendered the same (typically this has to do with how accented letters are notated), so some tweaking has to be done.", "#--- Identify regions of interest \n# make list of unique region names for country\nregions = list( set(heatwave_data.Region.where(heatwave_data.Country==country)) )\n# remove nans (from regions that arent in the selected country) \nregions = [x for x in regions if str(x) != 'nan']\nregions = [x.title() for x in regions]\nif ( country == 'Chile') :\n regions_shapefile = [u'Antofagasta',u'Araucan\\xeda',\n u'Ais\\xe9n del General Carlos Ib\\xe1\\xf1ez del Campo',\n u'Regi\\xf3n Metropolitana de Santiago',\n u'Magallanes y Ant\\xe1rtica Chilena',\n u\"Libertador General Bernardo O'Higgins\"]\n\n\nelse :\n regions_shapefile = regions\n\n\n#--- Create masks\n# loop through regions\nfor i in range(len(regions)) :\n # find the name of the region\n reg = regions[i].title()\n # find the name of the region as defined by the shapefile\n reg_shapefile = regions_shapefile[i] #reg_shapefile = regions_shapefile[i].decode('utf-8')\n # remove spaces\n reg = reg.strip()\n # report what's happening\n print(\"Creating masking script for \"+reg+\", aka \"+reg_shapefile)\n # create NCL script from defualt file with name of region \n with open('maskregions_'+\"\".join(country.split(\" \"))+'.ncl', 'r') as input_file, open('crMaskFile.ncl', 'w') as output_file:\n # check lines for dummy line\n for line in input_file :\n if line.strip() == 'region = \"STATE/PROVINCE\"' :\n # overwrite with region name\n output_file.write(' region = \"'+reg_shapefile.encode('utf-8')+'\"\\n')\n else :\n output_file.write(line)\n # run NCL routine\n print(\"Running masking script\")\n # subprocess.call(['/bin/bash','-i','-c','ncl crMaskFile.ncl'])\n subprocess.call(['/bin/bash','-c','ncl crMaskFile.ncl'])\n # create a file that masks the region\n print(\"Renaming mask and copying to data folder.\")\n subprocess.call(['cp','mask.nc',\"../data/\"+\"_\".join(reg.split())+'.mask.nc'])\n\n\n#--- Create single mask file showing all considered regions\n# combine all the individual mask files\n_ = cdo.add(input='../data/Orissa.mask.nc ../data/Uttar_Pradesh.mask.nc',\n output='../data/tmp.nc')\n_ = cdo.add(input='../data/tmp.nc ../data/Tamil_Nadu.mask.nc',\n output='../data/India.masks.nc')", "Drawing a map\nWant to create a graphic to show that reports only exist for certain regions, and how the grid spacing of the meterological fields imperfectly matches the actual region boundaries. Have currently set things so that a grid cell is considered informative about the political region as long some part of the region boundary is within 50kms of the grid point (cell center). Played around with a few things before settling on this. The distance is pretty conservative; as in tends towards considering information from outside the region, rather than excluding information from within, but still keeps a more \"fair\" evaluation, by not evaluating against grid cells which contain only a minimal amount of the geographical region. Considering that most political boundaries are linked to geographical features/divides, if only a small fraction of the region extends into another grid cell, would expect its weather to more correlated with that shown by cells over the rest of the region than that of this other area. Example of this can be seen for Uttar Pradesh (India), where a sliver of the region overlaps with a gird cell that is mostly representitive of the Himalayas, so it is not considered when calculating the warm spell durations.\nLooking at the individual administrative regions requires working\nwith shape files. These are obtained from the Database of Global\nAdministrative Areas.", "#--- Map regions of India used in this example\n# read which regions are included in disaster database\nregions = list(set(heatwave_data.loc[(heatwave_data.Country=='India'),'Region']))\n# Create a map object\nchart = Basemap(projection='lcc',resolution='c',\n lat_0=20,lon_0=85,\n llcrnrlat=5,urcrnrlat=35,\n llcrnrlon=70,urcrnrlon=100) \n# add geographic features\nchart.shadedrelief()\n# draw parallels and meridians.\nchart.drawparallels(np.arange(-90.,91.,10.),labels=[False,True,True,False])\nchart.drawmeridians(np.arange(-180.,181.,10.),labels=[True,False,False,True])\n# add country outline \nchart.readshapefile('../data/IND_adm0', 'IND0',drawbounds=True) ;\n# add region outlines, for regions in data set\nchart.readshapefile('../data/IND_adm1', 'IND1',drawbounds=False) ;\nfor info, shape in zip(chart.IND1_info, chart.IND1):\n if info['NAME_1'] in regions :\n x, y = zip(*shape) \n chart.plot(x, y, marker=None,color=sns.xkcd_rgb['dusty orange'])\n \n# load file of combined regional masks\nncfile = Dataset('../data/India.masks.nc')\n# read mask data\nrmask = ncfile.variables['region_mask'][:]\n# get coordinates of data\nlons = ncfile.variables['lon'][:]\nlats = ncfile.variables['lat'][:]\n# shift so that lines show grid box boundaries, \n# rather than grid point locations\nlons = lons - (1.875/2)\nlats = lats + (1.9047/2)\n# if in western hemisphere, need to label as \n# \"all the way round\", rather than +/- \n# lons = lons - 360\n# set coordinates list as grid of locations\nlons, lats = np.meshgrid(lons,lats)\n# overlay region masks \nchart.pcolormesh(lons,lats,rmask,shading='flat',latlon=True, alpha=0.2) ;\n# save image\nplt.savefig('../figures/india.png')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
CalPolyPat/phys202-2015-work
assignments/assignment03/NumpyEx01.ipynb
mit
[ "Numpy Exercise 1\nImports", "import numpy as np\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport antipackage\nimport github.ellisonbg.misc.vizarray as va", "Checkerboard\nWrite a Python function that creates a square (size,size) 2d Numpy array with the values 0.0 and 1.0:\n\nYour function should work for both odd and even size.\nThe 0,0 element should be 1.0.\nThe dtype should be float.", "def checkerboard(size):\n \"\"\"Return a 2d checkboard of 0.0 and 1.0 as a NumPy array\"\"\"\n board = np.ones((size,size), dtype=float)\n for i in range(size):\n if i%2==0:\n board[i,1:size:2]=0\n else:\n board[i,0:size:2]=0\n va.enable()\n return board\ncheckerboard(10)\n\na = checkerboard(4)\nassert a[0,0]==1.0\nassert a.sum()==8.0\nassert a.dtype==np.dtype(float)\nassert np.all(a[0,0:5:2]==1.0)\nassert np.all(a[1,0:5:2]==0.0)\n\nb = checkerboard(5)\nassert b[0,0]==1.0\nassert b.sum()==13.0\nassert np.all(b.ravel()[0:26:2]==1.0)\nassert np.all(b.ravel()[1:25:2]==0.0)", "Use vizarray to visualize a checkerboard of size=20 with a block size of 10px.", "va.set_block_size(10)\ncheckerboard(20)\n\nassert True", "Use vizarray to visualize a checkerboard of size=27 with a block size of 5px.", "va.set_block_size(5)\ncheckerboard(27)\n\nassert True" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tombstone/models
official/colab/nlp/nlp_modeling_library_intro.ipynb
apache-2.0
[ "Copyright 2020 The TensorFlow Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "Introduction to the TensorFlow Models NLP library\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/official_models/nlp/nlp_modeling_library_intro\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/models/blob/master/official/colab/nlp/nlp_modeling_library_intro.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/models/blob/master/official/colab/nlp/nlp_modeling_library_intro.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/models/official/colab/nlp/nlp_modeling_library_intro.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>\n\nLearning objectives\nIn this Colab notebook, you will learn how to build transformer-based models for common NLP tasks including pretraining, span labelling and classification using the building blocks from NLP modeling library.\nInstall and import\nInstall the TensorFlow Model Garden pip package\n\ntf-models-nightly is the nightly Model Garden package created daily automatically.\npip will install all models and dependencies automatically.", "!pip install -q tf-nightly\n!pip install -q tf-models-nightly", "Import Tensorflow and other libraries", "import numpy as np\nimport tensorflow as tf\n\nfrom official.nlp import modeling\nfrom official.nlp.modeling import layers, losses, models, networks", "BERT pretraining model\nBERT (Pre-training of Deep Bidirectional Transformers for Language Understanding) introduced the method of pre-training language representations on a large text corpus and then using that model for downstream NLP tasks.\nIn this section, we will learn how to build a model to pretrain BERT on the masked language modeling task and next sentence prediction task. For simplicity, we only show the minimum example and use dummy data.\nBuild a BertPretrainer model wrapping TransformerEncoder\nThe TransformerEncoder implements the Transformer-based encoder as described in BERT paper. It includes the embedding lookups and transformer layers, but not the masked language model or classification task networks.\nThe BertPretrainer allows a user to pass in a transformer stack, and instantiates the masked language model and classification networks that are used to create the training objectives.", "# Build a small transformer network.\nvocab_size = 100\nsequence_length = 16\nnetwork = modeling.networks.TransformerEncoder(\n vocab_size=vocab_size, num_layers=2, sequence_length=16)", "Inspecting the encoder, we see it contains few embedding layers, stacked Transformer layers and are connected to three input layers:\ninput_word_ids, input_type_ids and input_mask.", "tf.keras.utils.plot_model(network, show_shapes=True, dpi=48)\n\n# Create a BERT pretrainer with the created network.\nnum_token_predictions = 8\nbert_pretrainer = modeling.models.BertPretrainer(\n network, num_classes=2, num_token_predictions=num_token_predictions, output='predictions')", "Inspecting the bert_pretrainer, we see it wraps the encoder with additional MaskedLM and Classification heads.", "tf.keras.utils.plot_model(bert_pretrainer, show_shapes=True, dpi=48)\n\n# We can feed some dummy data to get masked language model and sentence output.\nbatch_size = 2\nword_id_data = np.random.randint(vocab_size, size=(batch_size, sequence_length))\nmask_data = np.random.randint(2, size=(batch_size, sequence_length))\ntype_id_data = np.random.randint(2, size=(batch_size, sequence_length))\nmasked_lm_positions_data = np.random.randint(2, size=(batch_size, num_token_predictions))\n\noutputs = bert_pretrainer(\n [word_id_data, mask_data, type_id_data, masked_lm_positions_data])\nlm_output = outputs[\"masked_lm\"]\nsentence_output = outputs[\"classification\"]\nprint(lm_output)\nprint(sentence_output)", "Compute loss\nNext, we can use lm_output and sentence_output to compute loss.", "masked_lm_ids_data = np.random.randint(vocab_size, size=(batch_size, num_token_predictions))\nmasked_lm_weights_data = np.random.randint(2, size=(batch_size, num_token_predictions))\nnext_sentence_labels_data = np.random.randint(2, size=(batch_size))\n\nmlm_loss = modeling.losses.weighted_sparse_categorical_crossentropy_loss(\n labels=masked_lm_ids_data,\n predictions=lm_output,\n weights=masked_lm_weights_data)\nsentence_loss = modeling.losses.weighted_sparse_categorical_crossentropy_loss(\n labels=next_sentence_labels_data,\n predictions=sentence_output)\nloss = mlm_loss + sentence_loss\nprint(loss)", "With the loss, you can optimize the model.\nAfter training, we can save the weights of TransformerEncoder for the downstream fine-tuning tasks. Please see run_pretraining.py for the full example.\nSpan labeling model\nSpan labeling is the task to assign labels to a span of the text, for example, label a span of text as the answer of a given question.\nIn this section, we will learn how to build a span labeling model. Again, we use dummy data for simplicity.\nBuild a BertSpanLabeler wrapping TransformerEncoder\nBertSpanLabeler implements a simple single-span start-end predictor (that is, a model that predicts two values: a start token index and an end token index), suitable for SQuAD-style tasks.\nNote that BertSpanLabeler wraps a TransformerEncoder, the weights of which can be restored from the above pretraining model.", "network = modeling.networks.TransformerEncoder(\n vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length)\n\n# Create a BERT trainer with the created network.\nbert_span_labeler = modeling.models.BertSpanLabeler(network)", "Inspecting the bert_span_labeler, we see it wraps the encoder with additional SpanLabeling that outputs start_position and end_postion.", "tf.keras.utils.plot_model(bert_span_labeler, show_shapes=True, dpi=48)\n\n# Create a set of 2-dimensional data tensors to feed into the model.\nword_id_data = np.random.randint(vocab_size, size=(batch_size, sequence_length))\nmask_data = np.random.randint(2, size=(batch_size, sequence_length))\ntype_id_data = np.random.randint(2, size=(batch_size, sequence_length))\n\n# Feed the data to the model.\nstart_logits, end_logits = bert_span_labeler([word_id_data, mask_data, type_id_data])\nprint(start_logits)\nprint(end_logits)", "Compute loss\nWith start_logits and end_logits, we can compute loss:", "start_positions = np.random.randint(sequence_length, size=(batch_size))\nend_positions = np.random.randint(sequence_length, size=(batch_size))\n\nstart_loss = tf.keras.losses.sparse_categorical_crossentropy(\n start_positions, start_logits, from_logits=True)\nend_loss = tf.keras.losses.sparse_categorical_crossentropy(\n end_positions, end_logits, from_logits=True)\n\ntotal_loss = (tf.reduce_mean(start_loss) + tf.reduce_mean(end_loss)) / 2\nprint(total_loss)", "With the loss, you can optimize the model. Please see run_squad.py for the full example.\nClassification model\nIn the last section, we show how to build a text classification model.\nBuild a BertClassifier model wrapping TransformerEncoder\nBertClassifier implements a simple token classification model containing a single classification head using the TokenClassification network.", "network = modeling.networks.TransformerEncoder(\n vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length)\n\n# Create a BERT trainer with the created network.\nnum_classes = 2\nbert_classifier = modeling.models.BertClassifier(\n network, num_classes=num_classes)", "Inspecting the bert_classifier, we see it wraps the encoder with additional Classification head.", "tf.keras.utils.plot_model(bert_classifier, show_shapes=True, dpi=48)\n\n# Create a set of 2-dimensional data tensors to feed into the model.\nword_id_data = np.random.randint(vocab_size, size=(batch_size, sequence_length))\nmask_data = np.random.randint(2, size=(batch_size, sequence_length))\ntype_id_data = np.random.randint(2, size=(batch_size, sequence_length))\n\n# Feed the data to the model.\nlogits = bert_classifier([word_id_data, mask_data, type_id_data])\nprint(logits)", "Compute loss\nWith logits, we can compute loss:", "labels = np.random.randint(num_classes, size=(batch_size))\n\nloss = modeling.losses.weighted_sparse_categorical_crossentropy_loss(\n labels=labels, predictions=tf.nn.log_softmax(logits, axis=-1))\nprint(loss)", "With the loss, you can optimize the model. Please see run_classifier.py or the colab fine_tuning_bert.ipynb for the full example." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
saketkc/hatex
2015_Fall/MATH-578B/Homework5/Homework5.ipynb
mit
[ "Problem 1\nGiven: $N \\sim Poisson(\\lambda)$ and $X_1, \\dots, X_n \\sim \\vec{\\pi}$\n$X_k(t)$ is continous time MC with $X_k(0) = X_k$\n$N_t(a) = ${k:X_k(t) = a}$\ni.e. $N_t$ is the number of visits to state $a$ in time $t$.\n$\\sum_a\\pi(a)Q_{ab}=0$ for each $b$ with the constraint $\\sum_a\\pi(a)=1$\n$\\sum_a\\pi(a)Q_{ab}=0$ $\\implies$ $\\vec{\\pi}^TQ=0$ $\\implies$ \n$$\n\\begin{align}\n\\vec{\\pi}^TQ&=0\\\n\\Longleftrightarrow \\vec{\\pi}^TQ^n&=0\\ \\ \\forall n \\geq 1\\ \n\\Longleftrightarrow \\sum_{n\\geq 1}\\vec{\\pi}\\frac{t^n}{n!}Q^n &=0 \\ \\ \\forall t \\geq 0\\\n\\Longleftrightarrow \\vec{\\pi}\\sum_{n\\geq 0}\\frac{t^n}{n!}Q^n &=\\vec{\\pi}\\\n\\Longleftrightarrow \\vec{\\pi}P &=\\vec{\\pi}\\\n\\Longleftrightarrow \\vec{\\pi}\\ \\text{is a stationary distribution}\n\\end{align}\n$$\nNow, $P(X_k(t)=a)=\\pi(a)$ and $N_t(a) = {k:X_k(t) = a}$ $\\implies$ $N_t(a)|N \\sim Binom(N, \\pi(a))$ and \n$N \\sim Poisson(\\lambda)$ then $\\boxed{N_t \\sim Poisson(\\lambda \\pi)}$\nProblem 2", "%matplotlib inline\nfrom __future__ import division\nimport pandas as pd\nimport matplotlib\nimport itertools\nmatplotlib.rcParams['figure.figsize'] = (16,12)\nimport matplotlib.pyplot as plt\nimport numpy as np\nnp.random.seed(1)\n\ndef propose(S):\n r = np.random.choice(len(S), 2)\n rs = np.sort(r)\n j,k=rs[0],rs[1]\n y=np.copy(S)\n y[j:k+1] = y[j:k+1][::-1]\n return y\n\ndef count_cycles(S):\n sample_length = len(S)\n n_cycles = 0\n index = 0\n length_travelled = 0\n visited = []\n while length_travelled < sample_length:\n if S[index] == index and index < sample_length :\n index+=1\n n_cycles+=1\n length_travelled+=1\n else:\n visited.append(index)\n index = S[index]\n length_travelled+=1\n if index not in visited:\n n_cycles+=1\n return n_cycles\n\nN = [2,3,4, 100]\nalpha = 3\n\nassert count_cycles([0,1]) == 2\nassert count_cycles([0,2,1]) == 2\nassert count_cycles([1,0]) == 1\n\nN_iterations = 10000\n\ndef theoretical(S, alpha, denom):\n n_cycles = count_cycles(S)\n return n_cycles**alpha/denom\n\n\ndef run(n, show=True):\n oldS = np.arange(n)\n old_n_cycles = count_cycles(oldS)\n count_dict = {}\n if show:\n denom = sum([count_cycles(x)**alpha for x in itertools.permutations(range(n))])\n for i in range(N_iterations):\n proposedS = propose(oldS)\n new_n_cycles = count_cycles(proposedS)\n pi_ab = new_n_cycles**alpha/(old_n_cycles**alpha)\n q = min(1,pi_ab)\n if q>= np.random.uniform():\n oldS = proposedS\n old_n_cycles = new_n_cycles\n tkey = ','.join([str(x+1) for x in oldS.tolist()])\n key=\"[\"+tkey+\"]\"\n if key not in count_dict:\n if show:\n count_dict[key] = [0,0,0]\n count_dict[key][1] = theoretical(oldS,alpha,denom)\n count_dict[key][2] = old_n_cycles\n else:\n count_dict[key] = [0,0]\n count_dict[key][1] = old_n_cycles\n\n\n count_dict[key][0]+=1\n df = pd.DataFrame(count_dict)\n df=df.transpose()\n if show:\n df.columns=[r'Simulated $\\pi(s)$', 'Theoretical', 'c(s)']\n df[r'Simulated $\\pi(s)$'] = df[r'Simulated $\\pi(s)$']/N_iterations\n df['Percentage Error'] = 100*(df[r'Simulated $\\pi(s)$']/df['Theoretical']-1)\n else:\n df.columns=[r'Simulated $\\pi(s)$', 'c(s)']\n df[r'Simulated $\\pi(s)$'] = df[r'Simulated $\\pi(s)$']/N_iterations\n \n df.index.name='State'\n return df\n", "n=2", "df = run(N[0])\ndf", "n=3", "df = run(N[1])\ndf", "n=4", "count_dict = run(N[2])\ncount_dict", "N=100", "df = run(N[3], show=False)", "$$\\sum_{s \\in S_a}\\pi(s)c(s)=E[c(s)]$$\nand similarly, \n$$\\sum_{s \\in S_a}\\pi(s)c^2(s)=E[c^2(s)]=Var(c(s))+E^2[c(s)]$$", "expectation = sum(df[r'Simulated $\\pi(s)$']*df['c(s)'])\nexpectation2 = sum(df[r'Simulated $\\pi(s)$']*df['c(s)']*df['c(s)'])\nt_expectation = np.mean(df['c(s)'])\nt_expectation2 = np.var(df['c(s)'])+np.mean(df['c(s)'])**2\nprint 'Simulated E[c(s)] = {}\\t\\t Theoretical(M0M) E[c(s)] = {}'.format(expectation, t_expectation)\nprint 'Simulated E[c^2(s)] = {}\\t\\t Theoretical(M0M) E[c^2(s)] = {}'.format(expectation2, t_expectation2)\n", "I use method of moments to calculate the Theoretical values. They seem to be in sync with the simulated values. The estimates seem to be in sync even though MOM is just a first approximation because the sample size is large enough to capture the dynamics of the population distribution.", "cycles = df['c(s)']\nplt.hist(cycles, normed=True)", "Problem 3\nPart (A)", "def run():\n N = 1000\n N_iterations = 200\n chrom_length = 3*(10**9)\n transposon_length = 3*1000\n mu = 0.05\n t_positions = []\n\n n_initial = np.random.random_integers(N-1)\n x_initial = np.random.random_integers(chrom_length-1)\n\n offspring_positions = []\n all_positions = [[] for t in range(N)]\n all_positions[n_initial].append(x_initial)\n all_t_count =[]\n\n for nn in range(N_iterations):\n for i in range(N):\n indicator = np.random.binomial(1,mu,len(all_positions[i]))\n temp_indices = []\n for ind, ind_value in enumerate(indicator):\n if ind_value == 1:\n temp_indices.append(ind)\n\n for j in temp_indices:\n x_temp = np.random.random_integers(chrom_length-1)\n all_positions[i][j] = x_temp\n all_positions[i].append(np.random.random_integers(chrom_length-1))\n offspring_positions = [[] for t in range(N)]\n for j in range(N):\n y,z = np.random.random_integers(0,N-1,2)\n y_parent = np.random.binomial(1,0.5,len(all_positions[y]))\n z_parent = np.random.binomial(1,0.5,len(all_positions[z]))\n temp_y = []\n temp_z = []\n for index,value in enumerate(y_parent):\n if value>=1:\n temp_y.append(all_positions[y][index])\n for index,value in enumerate(z_parent):\n if value>=1:\n temp_z.append(all_positions[z][index])\n for t_y in temp_y:\n offspring_positions[j].append(t_y)\n for t_z in temp_z:\n offspring_positions[j].append(t_z)\n all_positions = offspring_positions\n count_t = 0\n count_x = []\n for p in range(N):\n count_t += len(all_positions[p])\n count_x.append(all_positions[p])\n survived_t = np.unique(count_x, return_counts=True)[1]\n all_t_count.append((count_t, len(survived_t[survived_t>=N*mu])))\n return all_t_count\n\nall_t_count = run()\ndie_out_transposons = all_t_count\n\nfig, axs = plt.subplots(2,2)\naxs[0][0].plot([x[0] for x in die_out_transposons])\naxs[0][0].set_title('No. of Transpososn v/s Generations')\naxs[0][0].set_xlabel('Generations')\naxs[0][0].set_ylabel('No. of Transpososn')\n\naxs[0][1].plot([x[1] for x in die_out_transposons])\naxs[0][1].set_title('No. of Common Transpososn v/s Generations')\naxs[0][1].set_xlabel('Generations')\naxs[0][1].set_ylabel('No. of Common Transposons')\n\n\nincreasing_rate = []\nfor i in range(1,len(die_out_transposons)):\n increasing_rate.append(die_out_transposons[i][0]/(die_out_transposons[i-1][0]+0.000001))\naxs[1][0].plot(increasing_rate)\naxs[1][0].set_title('Increasing rate v/s Generations')\naxs[1][0].set_xlabel('Generations')\naxs[1][0].set_ylabel('Increasing Rate')", "The above example shows one case when the \"the transposon does not spread\"", "all_t_count = run()\nnondie_out_transposons = all_t_count\n\nfig, axs = plt.subplots(2,2)\naxs[0][0].plot([x[0] for x in nondie_out_transposons])\naxs[0][0].set_title('No. of Transpososn v/s Generations')\naxs[0][0].set_xlabel('Generations')\naxs[0][0].set_ylabel('No. of Transpososn')\n\naxs[0][1].plot([x[1] for x in die_out_transposons])\naxs[0][1].set_title('No. of Surviving Transpososn v/s Generations')\naxs[0][1].set_xlabel('Generations')\naxs[0][1].set_ylabel('No. of Common Transposons')\n\n\nincreasing_rate = []\nfor i in range(1,len(die_out_transposons)):\n increasing_rate.append(die_out_transposons[i][0]/(die_out_transposons[i-1][0]+0.000001))\naxs[1][0].plot(increasing_rate)\naxs[1][0].set_title('Increasing rate v/s Generations')\naxs[1][0].set_xlabel('Generations')\naxs[1][0].set_ylabel('Increasing Rate')", "The above example shows one case when the \"the transposon does spread\", with rate being exponential and the common transposons still being limited\nPart (B)\nTreating the total number of transposons $N(t)$ at any time $t$ to be a branching process, then $N(t+1) = \\sum_{i=1}^{N(t)} W_{t,i}$ Where $W_{t,i}$ is the number of locations of the $i^{th}$ transposon in the offspring.\nNow consider $E[N_t]$\nClaim: $E[N_t] = (1+\\mu)^t$\nProof: \nWith probability of $\\mu$ the transposon undergoes becomes 2 from 1. and hence $W_{t,i}$, the number of locations of the $i^{th}$ transposon in the offspring is a poisson random varaible with mean $1+\\mu$\n$W_k \\sim Poisson(1+\\mu)$\n$$\n\\begin{align}\nE[N_t] = E[\\sum_{i=1}^{N(t-1)} W_{t,i}] &= E[E[\\sum_{i=1}^{n} W_{t,i}|N(t-1)=n]]\\ \n&= E[N(t-1)] \\times (1+\\mu)\\\n&= N(1+\\mu)^t\n\\end{align}\n$$\nThus, the expected number of total transposons is an exponential.\nPart (C)\n$P(X>0) \\leq EX$\nConsider $X_t$ as the total number of trasposon copies at location $x$ at generation $t$ \nFor each new generation, the new arrival at $x$ is a poisson process with mean = $2 \\times \\mu \\times N(t) \\times \\frac{1}{L}$. Let $R(t)$ represent the new arrivals at x\n$N(t)$ represents the number of transposon copies of the transposon suriving for $t$ generations!\nThen, $E[N_1]=1+\\mu$ By indution, $E[N_t] = (1+\\mu)^t$\nThus, $R(t) \\sim \\text{Poisson}(\\frac{2\\mu N(t)}{L})$\nNow Using a branching process model for number of transposon copies located at location $x$, the offspring mean number of offspring transposons is = $(1-\\mu)1 + \\mu2 = 1+\\mu$\nLet $Z_{t,k}(u)$= Number of offspring copies of $k^{th}$ transposon at $x$ that occured at time $t$ inserted at time u ($u \\leq t)\nUsing branching process property, $E[Z_{t,k}(t+u)] = (1+\\mu)^u$\nThen \n$$\n\\begin{align}\nEX_t &= \\sum_{u \\leq 0} \\sum_{k=1}^{R(u)} Z_{u,k}(0)\\ \n&= \\sum_{u \\leq 0}E[R(u)]E[Z_{u,1}(0)]\\ \n&= \\sum_{u \\leq 0} (1+\\mu)^t \\frac{2 N \\mu }{L} \\times (1+\\mu)^u \\\n&= \\sum_{u\\geq 0}(1+\\mu)^t \\frac{2 N \\mu }{L(1+\\mu)^u}\\\n&\\approx \\frac{2 \\mu }{L}\\times(1+\\frac{1}{\\mu})\\\n&= \\frac{2}{L}(1+\\mu)^{t+1}\n\\end{align}\n$$\nThus, $P(X>0) \\leq \\frac{2}{L}(1+\\mu)^{t+1}$\nPart (D)\n$\\mu = 10^{-2}$\n$N = 10^7$\nFor an individual $ EX = \\frac{2}{L}(1+\\mu)^{t+1} \\times \\frac{1}{N} = \\frac{2}{NL}(1+\\mu)^{t+1} $\nNow, $\\frac{2}{NL}(1+\\mu)^{t+1}=0.1L$ $\\implies$ $(1+\\mu)^{t+1}=0.1NL^2/2$", "from math import log\nN=10**7\nmu=0.01\nL=3*(10**9)\n\nt = log(0.1*N*L*L/2)/log(1+mu)\n\nprint(t)", "Thus it takes t=5703 generations for transposons to conver 10% of genome(I ignored the length of transposon itself)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ML4DS/ML4all
C4.Classification_SVM/.ipynb_checkpoints/SupportVectorMachines-checkpoint.ipynb
mit
[ "Support Vector Machines\nNotebook version: 1.0 (Oct 26, 2015)\n\nAuthor: Jesús Cid Sueiro (jcid@tsc.uc3m.es)\n\nChanges: v.1.0 - First version", "# To visualize plots in the notebook\n%matplotlib inline\n\n# Imported libraries\n#import csv\n#import random\n#import matplotlib\n#import matplotlib.pyplot as plt\n#import pylab\n\n#import numpy as np\n#from mpl_toolkits.mplot3d import Axes3D\n#from sklearn.preprocessing import PolynomialFeatures\n#from sklearn import linear_model\nfrom sklearn import svm\n\n", "1. Introduction\nSupport vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection.\nThe advantages of support vector machines are:\n\nEffective in high dimensional spaces.\nStill effective in cases where number of dimensions is greater than the number of samples.\nUses a subset of training points in the decision function (called support vectors), so it is also memory efficient.\nVersatile: different Kernel functions can be specified for the decision function. Common kernels are provided, but it is also possible to specify custom kernels.\n\nThe disadvantages of support vector machines include:\n\nIf the number of features is much greater than the number of samples, the method is likely to give poor performances.\nSVMs do not directly provide probability estimates, these are calculated using an expensive five-fold cross-validation (see Scores and probabilities, below).\nThe support vector machines in scikit-learn support both dense (numpy.ndarray and convertible to that by numpy.asarray) and sparse (any scipy.sparse) sample vectors as input. However, to use an SVM to make predictions for sparse data, it must have been fit on such data. For optimal performance, use C-ordered numpy.ndarray (dense) or scipy.sparse.csr_matrix (sparse) with dtype=float64.\n\nSVM implementations in Scikit Learn\nSVC, NuSVC and LinearSVC are classes capable of performing multi-class classification on a dataset.\nSVC and NuSVC are similar methods, but accept slightly different sets of parameters and have different mathematical formulations (see section Mathematical formulation). On the other hand, LinearSVC is another implementation of Support Vector Classification for the case of a linear kernel. Note that LinearSVC does not accept keyword kernel, as this is assumed to be linear. It also lacks some of the members of SVC and NuSVC, like support_.\nAs other classifiers, SVC, NuSVC and LinearSVC take as input two arrays: an array X of size [n_samples, n_features] holding the training samples, and an array y of class labels (strings or integers), size [n_samples]:", "\nX = [[0, 0], [1, 1]]\ny = [0, 1]\nclf = svm.SVC()\nclf.fit(X, y) ", "After being fitted, the model can then be used to predict new values:", "clf.predict([[2., 2.]])", "SVMs decision function depends on some subset of the training data, called the support vectors. Some properties of these support vectors can be found in members support_vectors_, support_ and n_support:", "# get support vectors\nprint clf.support_vectors_\n# get indices of support vectors\nprint clf.support_ \n# get number of support vectors for each class\nprint clf.n_support_ \n", "3.2.1 Example: Iris Dataset.\nAs an illustration, consider the <a href = http://archive.ics.uci.edu/ml/datasets/Iris> Iris dataset </a>, taken from the <a href=http://archive.ics.uci.edu/ml/> UCI Machine Learning repository</a>. This data set contains 3 classes of 50 instances each, where each class refers to a type of iris plant (setosa, versicolor or virginica). Each instance contains 4 measurements of given flowers: sepal length, sepal width, petal length and petal width, all in centimeters. \nWe will try to fit the logistic regression model to discriminate between two classes using only two attributes.\nFirst, we load the dataset and split them in training and test subsets.", "# Adapted from a notebook by Jason Brownlee\ndef loadDataset(filename, split):\n xTrain = []\n cTrain = []\n xTest = []\n cTest = []\n\n with open(filename, 'rb') as csvfile:\n lines = csv.reader(csvfile)\n dataset = list(lines)\n for i in range(len(dataset)-1):\n for y in range(4):\n dataset[i][y] = float(dataset[i][y])\n item = dataset[i]\n if random.random() < split:\n xTrain.append(item[0:4])\n cTrain.append(item[4])\n else:\n xTest.append(item[0:4])\n cTest.append(item[4])\n return xTrain, cTrain, xTest, cTest\n\nwith open('iris.data', 'rb') as csvfile:\n lines = csv.reader(csvfile)\n\nxTrain_all, cTrain_all, xTest_all, cTest_all = loadDataset('iris.data', 0.66)\nnTrain_all = len(xTrain_all)\nnTest_all = len(xTest_all)\nprint 'Train: ' + str(nTrain_all)\nprint 'Test: ' + str(nTest_all)", "Now, we select two classes and two attributes.", "# Select attributes\ni = 0 # Try 0,1,2,3\nj = 1 # Try 0,1,2,3 with j!=i\n\n# Select two classes\nc0 = 'Iris-versicolor' \nc1 = 'Iris-virginica'\n\n# Select two coordinates\nind = [i, j]\n\n# Take training test\nX_tr = np.array([[xTrain_all[n][i] for i in ind] for n in range(nTrain_all) \n if cTrain_all[n]==c0 or cTrain_all[n]==c1])\nC_tr = [cTrain_all[n] for n in range(nTrain_all) \n if cTrain_all[n]==c0 or cTrain_all[n]==c1]\nY_tr = np.array([int(c==c1) for c in C_tr])\nn_tr = len(X_tr)\n\n# Take test set\nX_tst = np.array([[xTest_all[n][i] for i in ind] for n in range(nTest_all) \n if cTest_all[n]==c0 or cTest_all[n]==c1])\nC_tst = [cTest_all[n] for n in range(nTest_all) \n if cTest_all[n]==c0 or cTest_all[n]==c1]\nY_tst = np.array([int(c==c1) for c in C_tst])\nn_tst = len(X_tst)", "3.2.2. Data normalization\nNormalization of data is a common pre-processing step in many machine learning algorithms. Its goal is to get a dataset where all input coordinates have a similar scale. Learning algorithms usually show less instabilities and convergence problems when data are normalized.\nWe will define a normalization function that returns a training data matrix with zero sample mean and unit sample variance.", "def normalize(X, mx=None, sx=None):\n \n # Compute means and standard deviations\n if mx is None:\n mx = np.mean(X, axis=0)\n if sx is None:\n sx = np.std(X, axis=0)\n\n # Normalize\n X0 = (X-mx)/sx\n\n return X0, mx, sx", "Now, we can normalize training and test data. Observe in the code that the same transformation should be applied to training and test data. This is the reason why normalization with the test data is done using the means and the variances computed with the training set.", "# Normalize data\nXn_tr, mx, sx = normalize(X_tr)\nXn_tst, mx, sx = normalize(X_tst, mx, sx)", "The following figure generates a plot of the normalized training data.", "# Separate components of x into different arrays (just for the plots)\nx0c0 = [Xn_tr[n][0] for n in range(n_tr) if Y_tr[n]==0]\nx1c0 = [Xn_tr[n][1] for n in range(n_tr) if Y_tr[n]==0]\nx0c1 = [Xn_tr[n][0] for n in range(n_tr) if Y_tr[n]==1]\nx1c1 = [Xn_tr[n][1] for n in range(n_tr) if Y_tr[n]==1]\n\n# Scatterplot.\nlabels = {'Iris-setosa': 'Setosa', \n 'Iris-versicolor': 'Versicolor',\n 'Iris-virginica': 'Virginica'}\nplt.plot(x0c0, x1c0,'r.', label=labels[c0])\nplt.plot(x0c1, x1c1,'g+', label=labels[c1])\nplt.xlabel('$x_' + str(ind[0]) + '$')\nplt.ylabel('$x_' + str(ind[1]) + '$')\nplt.legend(loc='best')\nplt.axis('equal')", "In order to apply the gradient descent rule, we need to define two methods: \n - A fit method, that receives the training data and returns the model weights and the value of the negative log-likelihood during all iterations.\n - A predict method, that receives the model weight and a set of inputs, and returns the posterior class probabilities for that input, as well as their corresponding class predictions.", "def logregFit(Z_tr, Y_tr, rho, n_it):\n\n # Data dimension\n n_dim = Z_tr.shape[1]\n\n # Initialize variables\n nll_tr = np.zeros(n_it)\n pe_tr = np.zeros(n_it)\n w = np.random.randn(n_dim,1)\n\n # Running the gradient descent algorithm\n for n in range(n_it):\n \n # Compute posterior probabilities for weight w\n p1_tr = logistic(np.dot(Z_tr, w))\n p0_tr = logistic(-np.dot(Z_tr, w))\n\n # Compute negative log-likelihood\n nll_tr[n] = - np.dot(Y_tr.T, np.log(p1_tr)) - np.dot((1-Y_tr).T, np.log(p0_tr))\n\n # Update weights\n w += rho*np.dot(Z_tr.T, Y_tr - p1_tr)\n\n return w, nll_tr\n\ndef logregPredict(Z, w):\n\n # Compute posterior probability of class 1 for weights w.\n p = logistic(np.dot(Z, w))\n \n # Class\n D = [int(round(pn)) for pn in p]\n \n return p, D", "We can test the behavior of the gradient descent method by fitting a logistic regression model with ${\\bf z}({\\bf x}) = (1, {\\bf x}^\\intercal)^\\intercal$.", "# Parameters of the algorithms\nrho = float(1)/50 # Learning step\nn_it = 200 # Number of iterations\n\n# Compute Z's\nZ_tr = np.c_[np.ones(n_tr), Xn_tr] \nZ_tst = np.c_[np.ones(n_tst), Xn_tst]\nn_dim = Z_tr.shape[1]\n\n# Convert target arrays to column vectors\nY_tr2 = Y_tr[np.newaxis].T\nY_tst2 = Y_tst[np.newaxis].T\n\n# Running the gradient descent algorithm\nw, nll_tr = logregFit(Z_tr, Y_tr2, rho, n_it)\n\n# Classify training and test data\np_tr, D_tr = logregPredict(Z_tr, w)\np_tst, D_tst = logregPredict(Z_tst, w)\n \n# Compute error rates\nE_tr = D_tr!=Y_tr\nE_tst = D_tst!=Y_tst\n\n# Error rates\npe_tr = float(sum(E_tr)) / n_tr\npe_tst = float(sum(E_tst)) / n_tst\n\n# NLL plot.\nplt.plot(range(n_it), nll_tr,'b.:', label='Train')\nplt.xlabel('Iteration')\nplt.ylabel('Negative Log-Likelihood')\nplt.legend()\n\nprint \"The optimal weights are:\"\nprint w\nprint \"The final error rates are:\"\nprint \"- Training: \" + str(pe_tr)\nprint \"- Test: \" + str(pe_tst)\nprint \"The NLL after training is \" + str(nll_tr[len(nll_tr)-1])", "3.2.3. Free parameters\nUnder certaing conditions, the gradient descent method can be shown to converge assymtotically (i.e. as the number of iterations goes to infinity) to the ML estimate of the logistic model. However, in practice, the final estimate of the weights ${\\bf w}$ depend on several factors:\n\nNumber of iterations\nInitialization\nLearning step\n\nExercise: Visualize the variability of gradient descent caused by initializations. To do so, fix the number of iterations to 200 and the learning step, and execute the gradient descent 100 times, storing the training error rate of each execution. Plot the histogram of the error rate values.\nNote that you can do this exercise with a loop over the 100 executions, including the code in the previous code slide inside the loop, with some proper modifications. To plot a histogram of the values in array p with nbins, you can use plt.hist(p, n)\n3.2.3.1. Learning step\nThe learning step, $\\rho$, is a free parameter of the algorithm. Its choice is critical for the convergence of the algorithm. too large values of $\\rho$ make the algorithm diverge. For too small values, the convergence is too slown and more iterations are required for a good convergence.\nExercise 3: Observe the evolution of the negative log-likelihood with the number of iterations, for different values of $\\rho$. It is easy to check that, for $\\rho$ large enough, the gradient descent method does not converge. Can you estimate (through manual observation) and approximate value of $\\rho$ stating a boundary between convergence and non-convergence?\nExercise 4: In this exercise we explore the influence of the learning step more sistematically. Use the code in the previouse exercises to compute, for every value of $\\rho$, the average error rate over 100 executions. Plot the average error rate vs. $\\rho$. \nNote that you should explore the values of $\\rho$ in a logarithmic scale. For instance, you can take $\\rho = 1, 1/10, 1/100, 1/1000, \\ldots$\nIn practice, the selection of $\\rho$ may be a matter of trial an error. Also there is some theoretical evidence that the learning step should decrease along time up to cero, and the sequence $\\rho_n$ should satisfy two conditions:\n- C1: $\\sum_{n=0}^{\\infty} \\rho_n^2 < \\infty$ (decrease slowly)\n- C2: $\\sum_{n=0}^{\\infty} \\rho_n = \\infty$ (do not decrease too much slowly)\nFor instance, we can take $\\rho_n= 1/n$. Another common choice is $\\rho_n = \\alpha/(1+\\beta n)$ where $\\alpha$ and $\\beta$ are also free parameters that can be selected by trial and error with some heuristic method.\n3.2.4. Visualizing the posterior map.\nWe can also visualize the posterior probability map estimated by the logistic regression model for the estimated weights.", "# Create a regtangular grid.\nx_min, x_max = Xn_tr[:, 0].min(), Xn_tr[:, 0].max() \ny_min, y_max = Xn_tr[:, 1].min(), Xn_tr[:, 1].max()\ndx = x_max - x_min\ndy = y_max - y_min\nh = dy /400\nxx, yy = np.meshgrid(np.arange(x_min - 0.1 * dx, x_max + 0.1 * dx, h),\n np.arange(y_min - 0.1 * dx, y_max + 0.1 * dy, h))\nX_grid = np.array([xx.ravel(), yy.ravel()]).T\n\n# Compute Z's\nZ_grid = np.c_[np.ones(X_grid.shape[0]), X_grid] \n\n# Compute the classifier output for all samples in the grid.\npp, dd = logregPredict(Z_grid, w)\n\n# Put the result into a color plot\nplt.plot(x0c0, x1c0,'r.', label=labels[c0])\nplt.plot(x0c1, x1c1,'g+', label=labels[c1])\nplt.xlabel('$x_' + str(ind[0]) + '$')\nplt.ylabel('$x_' + str(ind[1]) + '$')\nplt.legend(loc='best')\nplt.axis('equal')\npp = pp.reshape(xx.shape)\nplt.contourf(xx, yy, pp, cmap=plt.cm.copper)", "3.2.5. Polynomial Logistic Regression\nThe error rates of the logitic regression model can be potentially reduce by using polynomial transformations.\nTo compute the polinomial transformation up to a given degree, we can use the PolynomialFeatures method in sklearn.preprocessing.", "# Parameters of the algorithms\nrho = float(1)/50 # Learning step\nn_it = 500 # Number of iterations\ng = 5 # Degree of polynomial\n\n# Compute Z_tr\npoly = PolynomialFeatures(degree=g)\nZ_tr = poly.fit_transform(Xn_tr)\n# Normalize columns (this is useful to make algorithms more stable).)\nZn, mz, sz = normalize(Z_tr[:,1:])\nZ_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1)\n\n# Compute Z_tst\nZ_tst = poly.fit_transform(Xn_tst)\nZn, mz, sz = normalize(Z_tst[:,1:], mz, sz)\nZ_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1)\n\n# Convert target arrays to column vectors\nY_tr2 = Y_tr[np.newaxis].T\nY_tst2 = Y_tst[np.newaxis].T\n\n# Running the gradient descent algorithm\nw, nll_tr = logregFit(Z_tr, Y_tr2, rho, n_it)\n\n# Classify training and test data\np_tr, D_tr = logregPredict(Z_tr, w)\np_tst, D_tst = logregPredict(Z_tst, w)\n \n# Compute error rates\nE_tr = D_tr!=Y_tr\nE_tst = D_tst!=Y_tst\n\n# Error rates\npe_tr = float(sum(E_tr)) / n_tr\npe_tst = float(sum(E_tst)) / n_tst\n\n# NLL plot.\nplt.plot(range(n_it), nll_tr,'b.:', label='Train')\nplt.xlabel('Iteration')\nplt.ylabel('Negative Log-Likelihood')\nplt.legend()\n\nprint \"The optimal weights are:\"\nprint w\nprint \"The final error rates are:\"\nprint \"- Training: \" + str(pe_tr)\nprint \"- Test: \" + str(pe_tst)\nprint \"The NLL after training is \" + str(nll_tr[len(nll_tr)-1])\n", "Visualizing the posterior map we can se that the polynomial transformations produces nonlinear decision boundaries." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
bobbyangelov/ipython-notebooks
maps-ipython.ipynb
mit
[ "Simple Maps in IPython\nThis notebook demonstrates the basics of mapping data in IPython. All you need is a simple dataset, containing coordinate values.", "%pylab inline\nfrom pylab import *\n\npylab.rcParams['figure.figsize'] = (8.0, 6.4)\n\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nimport numpy as np", "And now let's test if the Basemap package is loaded and the graphics displayed correctly.", "map = Basemap(projection='ortho', lat_0=50, lon_0=-100,\n resolution='l', area_thresh=1000.0)\n \nmap.drawcoastlines()\n \nplt.show()", "Now to the cool part!", "import csv\n\n# Open the cities population data file.\nfilename = 'city_longlat.csv'\n\n# Create empty lists for the latitudes, longitudes and population.\nlats, lons, population = [], [], []\n\n# Read through the entire file, skip the first line,\n# and pull out the data we need.\nwith open(filename) as f:\n # Create a csv reader object.\n reader = csv.reader(f)\n \n # Ignore the header row.\n next(reader)\n \n # Store the latitudes, longitudes and populations in the appropriate lists.\n for row in reader:\n lats.append(float(row[1]))\n lons.append(float(row[2]))\n population.append(float(row[3]))\n \n# --- Build Map ---\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef get_marker_color(population):\n if population < 2000000:\n return ('ro')\n elif population < 7000000:\n return ('yo')\n else:\n return ('go')\n \nmap = Basemap(projection='merc', resolution = 'h', area_thresh = 1000.0,\n lat_0=0, lon_0=-130,\n llcrnrlon=-18.968978, llcrnrlat=33.679432,\n urcrnrlon=41.968945, urcrnrlat=58.940191)\nmap.drawcoastlines()\nmap.drawcountries()\nmap.bluemarble()\nmap.drawmapboundary()\nmap.drawmeridians(np.arange(0, 360, 30))\nmap.drawparallels(np.arange(-90, 90, 30))\n\nfor lons, lats, population in zip(lons, lats, population):\n x,y = map(lats, lons)\n marker_string = get_marker_color(population)\n map.plot(x, y, marker_string, markersize=population/150000)\n\ntitle_string = \"Most Populous Cities in Europe\"\n\nfigsize(18, 12)\nplt.title(title_string)\nplt.show()", "The green circle represents the cities with population above 7 million inhabitants, the yellow between 2 and 7 million, and the red represents all others." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
fionapigott/Data-Science-45min-Intros
language-processing-vocab/language_processing_vocab.ipynb
unlicense
[ "Introduction to Language Processing Concepts\nOriginal tutorial by Brain Lehman, with updates by Fiona Pigott\nThe goal of this tutorial is to introduce a few basical vocabularies, ideas, and Python libraries for thinking about topic modeling, in order to make sure that we have a good set of vocabulary to talk more in-depth about processing languge with Python later. We'll spend some time on defining vocabulary for topic modeling and using basic topic modeling tools.\nA big thank-you to the good people at the Stanford NLP group, for their informative and helpful online book: https://nlp.stanford.edu/IR-book/.\nDefinitions.\n\nDocument: a body of text (eg. tweet)\nTokenization: dividing a document into pieces (and maybe throwing away some characters), in English this often (but not necessarily) means words separated by spaces and puctuation.\nText corpus: the set of documents that contains the text for the analysis (eg. many tweets)\nStop words: words that occur so frequently, or have so little topical meaning, that they are excluded (e.g., \"and\")\nVectorize: Turn some documents into vectors\nVector corpus: the set of documents transformed such that each token is a tuple (token_id , doc_freq)", "# first, get some text:\nimport fileinput\ntry:\n import ujson as json\nexcept ImportError:\n import json\ndocuments = []\nfor line in fileinput.FileInput(\"example_tweets.json\"):\n documents.append(json.loads(line)[\"text\"])", "1) Document\nIn the case of the text that we just imported, each entry in the list is a \"document\"--a single body of text, hopefully with some coherent meaning.", "print(\"One document: \\\"{}\\\"\".format(documents[0]))", "2) Tokenization\nWe split each document into smaller pieces (\"tokens\") in a process called tokenization. Tokens can be counted, and most importantly, compared between documents. There are potentially many different ways to tokenize text--splitting on spaces, removing punctionation, diving the document into n-character pieces--anything that gives us tokens that we can, hopefully, effectively compare across documents and derive meaning from.\nRelated to tokenization are processes called stemming and lemmatiztion which can help when using tokens to model topics based on the meaning of a word. In the phrases \"they run\" and \"he runs\" (space separated tokens: [\"they\", \"run\"] and [\"he\", \"runs\"]) the words \"run\" and \"runs\" mean basically the same thing, but are two different tokens. Stemming and/or lemmatization help us compare tokens with the same meaning but different spelling/suffixes.\nLemmatization:\nUses a dictionary of words and their possible morphologies to map many different forms of a base word (\"lemma\") to a single lemma, comparable across documents. E.g.: \"run\", \"ran\", \"runs\", and \"running\" might all map to the lemma \"run\"\nStemming:\nUses a set of heuristic rules to try to approximate lemmatization, without knowing the words in advance. For the English language, a simple and effective stemming algorithm might simply be to remove an \"s\" from the ends of words, or an \"ing\" from the end of words. E.g.: \"run\", \"runs\", and \"running\" all map to \"run,\" but \"ran\" (an irregularrly conjugated verb) would not. \nStemming is particularly interesting and applicable in social data, because while some words are decidely not standard English, conventinoal rules of grammar still apply. A fan of the popular singer Justin Bieber might call herself a \"belieber,\" while a group of fans call themselves \"beliebers.\" You won't find \"belieber\" in any English lemmatization dictionary, but a good stemming algorithm will still map \"belieber\" and \"beliebers\" to the same token (\"belieber\", or even \"belieb\", if we remover the common suffix \"er\").", "from nltk.stem import porter\nfrom nltk.tokenize import TweetTokenizer\n\n# tokenize the documents\n# find good information on tokenization:\n# https://nlp.stanford.edu/IR-book/html/htmledition/tokenization-1.html\n# find documentation on pre-made tokenizers and options here:\n# http://www.nltk.org/api/nltk.tokenize.html\ntknzr = TweetTokenizer(reduce_len = True)\n\n# stem the documents\n# find good information on stemming and lemmatization:\n# https://nlp.stanford.edu/IR-book/html/htmledition/stemming-and-lemmatization-1.html\n# find documentation on available pre-implemented stemmers here:\n# http://www.nltk.org/api/nltk.stem.html\nstemmer = porter.PorterStemmer()\nfor doc in documents[0:10]:\n tokenized = tknzr.tokenize(doc)\n stemmed = [stemmer.stem(x) for x in tokenized]\n print(\"Original document:\\n{}\\nTokenized result:\\n{}\\nStemmed result:\\n{}\\n\".format(\n doc, tokenized, stemmed))", "3) Text corpus\nThe text corpus is a collection of all of the documents (Tweets) that we're interested in modeling. Topic modeling and/or clustering on a corpus tends to work best if that corpus has some similar themes--this will mean that some tokens overlap, and we can get signal out of when documents share (or do not share) tokens. \nModeling text tends to get much harder the more different, uncommon and unrelated tokens appear in a text, especially when we are working with social data, where tokens don't necessarily appear in a dictionary. This difficultly (of having many, many unrelated tokens as dimension in our model) is one example of the curse of dimensionality.", "# number of documents in the corpus\nprint(\"There are {} documents in the corpus.\".format(len(documents)))", "4) Stop words:\nStop words are simply tokens that we've chosen to remove from the corpus, for any reason. In English, removing words like \"and\", \"the\", \"a\", \"at\", and \"it\" are common choices for stop words. Stop words can also be edited per project requirement, in case some words are too common in a particular dataset to be meaningful (another way to do stop word removal is to simply remove any word that appears in more than some fixed percentage of documents).", "from nltk.corpus import stopwords\n\nstopset = set(stopwords.words('english'))\nprint(\"The English stop words list provided by NLTK: \")\nprint(stopset)\n\nstopset.update([\"twitter\"]) # add token\nstopset.remove(\"i\") # remove token\nprint(\"\\nAdd or remove stop words form the set: \")\nprint(stopset)", "5) Vectorize:\nTransform each document into a vector. There are several good choices that you can make about how to do this transformation, and I'll talk about each of them in a second.\nIn order to vectorize documents in a corpus (without any dimensional reduction around the vocabulary), think of each document as a row in a matrix, and each column as a word in the vocabulary of the entire corpus. In order to vectorize a corpus, we must read the entire corpus, assign one word to each column, and then turn each document into a row.\nExample: \nDocuments: \"I love cake\", \"I hate chocolate\", \"I love chocolate cake\", \"I love cake, but I hate chocolate cake\" \nStopwords: Say, because the word \"but\" is a conjunction, we want to make it a stop word (not include it in our document vectors)\nVocabulary: \"I\" (column 1), \"love\" (column 2), \"cake\" (column 3), \"hate\" (column 4), \"chocolate\" (column 5)\n\\begin{equation}\n\\begin{matrix}\n\\text{\"I love cake\" } & =\\\n\\text{\"I hate chocolate\" } & =\\\n\\text{\"I love chocolate cake\" } & = \\\n\\text{\"I love cake, but I hate chocolate cake\"} & =\n\\end{matrix}\n\\qquad\n\\begin{bmatrix}\n1 & 1 & 1 & 0 & 0\\\n1 & 0 & 0 & 1 & 1\\\n1 & 1 & 1 & 0 & 1\\\n2 & 1 & 2 & 1 & 1\n\\end{bmatrix}\n\\end{equation}\nVectorization like this don't take into account word order (we call this property \"bag of words\"), and in the above example I am simply counting the frequency of each term in each document.", "# we're going to use the vectorizer functions that scikit learn provides\n\n# define the tokenizer that we want to use\n# must be a callable function that takes a document and returns a list of tokens\ntknzr = TweetTokenizer(reduce_len = True)\nstemmer = porter.PorterStemmer()\ndef myTokenizer(doc):\n return [stemmer.stem(x) for x in tknzr.tokenize(doc)]\n\n# choose the stopword set that we want to use\nstopset = set(stopwords.words('english'))\nstopset.update([\"http\",\"https\",\"twitter\",\"amp\"])\n\n# vectorize\n# we're using the scikit learn CountVectorizer function, which is very handy\n# documentation here: \n# http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nvectorizer = CountVectorizer(tokenizer = myTokenizer, stop_words = stopset)\nvectorized_documents = vectorizer.fit_transform(documents)\n\nvectorized_documents\n\nimport matplotlib.pyplot as plt\n%matplotlib inline\n_ = plt.hist(vectorized_documents.todense().sum(axis = 1))\n_ = plt.title(\"Number of tokens per document\")\n_ = plt.xlabel(\"Number of tokens\")\n_ = plt.ylabel(\"Number of documents with x tokens\")\n\nfrom numpy import logspace, ceil, histogram, array\n# get the token frequency\ntoken_freq = sorted(vectorized_documents.todense().astype(bool).sum(axis = 0).tolist()[0], reverse = False)\n# make a histogram with log scales\nbins = array([ceil(x) for x in logspace(0, 3, 5)])\nwidths = (bins[1:] - bins[:-1])\nhist = histogram(token_freq, bins=bins)\nhist_norm = hist[0]/widths\n# plot (notice that most tokens only appear in one document)\nplt.bar(bins[:-1], hist_norm, widths)\nplt.xscale('log')\nplt.yscale('log')\n_ = plt.title(\"Number of documents in which each token appears\")\n_ = plt.xlabel(\"Number of documents\")\n_ = plt.ylabel(\"Number of tokens\")", "Bag of words\nTaking all the words from a document, and sticking them in a bag. Order does not matter, which could cause a problem. \"Alice loves cake\" might have a different meaning than \"Cake loves Alice.\"\nFrequency\nCounting the number of times a word appears in a document.\nTf-Idf (term frequency inverse document frequency):\nA statistic that is intended to reflect how important a word is to a document in a collection or corpus. The Tf-Idf value increases proportionally to the number of times a word appears in the document and is inversely proportional to the frequency of the word in the corpus--this helps control words that are generally more common than others. \nThere are several different possibilities for computing the tf-idf statistic--choosing whether to normalize the vectors, choosing whether to use counts or the logarithm of counts, etc. I'm going to show how scikit-learn computed the tf-idf statistic by default, with more information available in the documentation of the sckit-learn TfidfVectorizer.\n$tf(t)$ : Term Frequency, count of the number of times each term appears in the document.\n$idf(d,t)$ : Inverse document frequency.\n$df(d,t)$ : Document frequency, the count of the number of documents in which the term appears. \n$$\ntfidf(t) = tf(t) * \\log\\big(\\frac{1 + n}{1 + df(d, t)}\\big) + 1\n$$\nWe also then take the Euclidean ($l2$) norm of each document vector, so that long documents (documents with many non-stopword tokens) have the same norm as shorter documents.", "# documentation on this sckit-learn function here:\n# http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfTransformer.html\ntfidf_vectorizer = TfidfVectorizer(tokenizer = myTokenizer, stop_words = stopset)\ntfidf_vectorized_documents = tfidf_vectorizer.fit_transform(documents)\n\ntfidf_vectorized_documents\n\n# you can look at two vectors for the same document, from 2 different vectorizers:\ntfidf_vectorized_documents[0].todense().tolist()[0]\n\nvectorized_documents[0].todense().tolist()[0]", "That's all for now!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
georgetown-analytics/envirohealth
CapstoneSEER/SEER Data Analysis Phase 3- Data Exploration.ipynb
mit
[ "SEER Data Analysis\nPhase 3: Data Exploration", "%matplotlib inline\n\nimport os\nimport time\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom MasterSeer import MasterSeer\nfrom sklearn.feature_selection import SelectPercentile, f_classif, SelectFromModel\nfrom sklearn.linear_model import LinearRegression\nfrom lifelines.plotting import plot_lifetimes\nfrom lifelines import KaplanMeierFitter\nfrom numpy.random import uniform, exponential\nfrom pandas.tools.plotting import scatter_matrix, radviz, parallel_coordinates", "To begin exploring the data we took a sample of the SEER data, defined the features and dependent variable, printed the top few lines to ensure a successful data ingest, and ran descriptive statistics", "FEATURES = [\n \"Birth Year\",\n \"Age at Diagnosis\",\n \"Race\",\n \"Origin\",\n \"laterality\",\n \"Radiation\",\n \"Histrec\",\n \"ER Status\",\n \"PR Status\",\n \"Behanal\",\n \"Stage\",\n \"Numprimes\",\n \"Survival Time\",\n \"Bucket\"\n]\n\nLABEL_MAP = {\n 0: \"< 60 Months\",\n 1: \"60 < months > 120\",\n 2: \"> 120 months\",\n}\n\n# Read the data into a DataFrame\ndf = pd.read_csv(\"clean1.csv\", sep=',' , header=0, names=FEATURES)\n\n\n# Convert class labels into text\nfor k,v in LABEL_MAP.items():\n df.ix[df.Bucket == k, 'Bucket'] = v\n\nprint(df.head(n=5))\n\ndf.describe()", "Next we checked our data type and determine the frequency of each class", "print (df.groupby('Bucket')['Bucket'].count())", "We used a histogram to see the distribution of survival time in months", "fig = plt.figure()\nax = fig.add_subplot(111)\nax.hist(df['Survival Time'], bins = 10, range = (df['Survival Time'].min(),df['Survival Time'].max()))\nplt.title('Survival Time Distribution')\nplt.xlabel('Survival Time')\nplt.ylabel('Months')\nplt.show()", "Next we played around with a few visualization to get a better understanding of the data", "scatter_matrix(df, alpha=0.2, figsize=(12, 12), diagonal='kde')\nplt.show()\n\nplt.figure(figsize=(12,12))\nparallel_coordinates(df, 'Bucket')\nplt.show()", "The plot below shows a lot of overlap between the 3 classes which alludes to the fact that classification models may not perform great. However, the plot also shows a more clear classification along the birth year and age at diagnosis features.", "plt.figure(figsize=(12,12))\nradviz(df, 'Bucket')\nplt.show()", "Next we moved to creating survival charts using a larger sample size. We created a class with a \"plot_survival\" function. For the graph we picked variables that the scientific literature finds significant-- Stage, ER status, PR status, age, and radiation treatment. The second plot compares the frequency of survival for censored and non-censored patients.", "class ExploreSeer(MasterSeer):\n\n def __init__(self, path=r'./data/', testMode=False, verbose=True, sample_size=5000):\n\n # user supplied parameters\n self.testMode = testMode # import one file, 500 records and return\n self.verbose = verbose # prints status messages\n self.sample_size = sample_size # number of rows to pull for testing\n\n if type(path) != str:\n raise TypeError('path must be a string')\n\n if path[-1] != '/':\n path += '/' # if path does not end with a backslash, add one\n\n self.path = path\n\n # open connection to the database\n super().__init__(path, False, verbose=verbose)\n self.db_conn, self.db_cur = super().init_database(False)\n\n\n def __del__(self):\n super().__del__()\n\n \n def plot_survival(self):\n\n df = super().load_data(col = ['YR_BRTH','AGE_DX','LATERAL','RADIATN','HISTREC','ERSTATUS',\n 'PRSTATUS','BEHANAL','HST_STGA','NUMPRIMS', 'SRV_TIME_MON', \n 'SRV_TIME_MON_PA', 'DTH_CLASS', 'O_DTH_CLASS', 'STAT_REC'], \n cond = 'SRV_TIME_MON < 1000 AND HST_STGA < 8 AND DTH_CLASS < 9 AND ERSTATUS < 4 AND PRSTATUS < 4', \n sample_size = 100000)\n\n kmf = KaplanMeierFitter()\n\n try:\n df.RADIATN = df.RADIATN.replace(7, 0)\n df = df[df.RADIATN < 7] \n except Exception as err:\n pass\n\n # 0-negative, 1-borderline,, 2-positive\n df = df[df.ERSTATUS != 4]\n df = df[df.ERSTATUS != 9]\n df.ERSTATUS = df.ERSTATUS.replace(2, 0)\n df.ERSTATUS = df.ERSTATUS.replace(1, 2)\n df.ERSTATUS = df.ERSTATUS.replace(3, 1)\n\n # 0-negative, 1-borderline,, 2-positive\n df = df[df.PRSTATUS != 4]\n df = df[df.PRSTATUS != 9]\n df.PRSTATUS = df.PRSTATUS.replace(2, 0)\n df.PRSTATUS = df.PRSTATUS.replace(1, 2)\n df.PRSTATUS = df.PRSTATUS.replace(3, 1)\n\n rad = df.RADIATN > 0\n er = df.ERSTATUS > 0\n pr = df.PRSTATUS > 0\n\n st0 = df.HST_STGA == 0\n st1 = df.HST_STGA == 1\n st2 = df.HST_STGA == 2\n st4 = df.HST_STGA == 4\n\n age = df.AGE_DX < 50\n\n\n df['SRV_TIME_YR'] = df['SRV_TIME_MON'] / 12\n T = df['SRV_TIME_YR']\n #C = (np.logical_or(df.DTH_CLASS == 1, df.O_DTH_CLASS == 1))\n C = df.STAT_REC == 4\n\n \n f, ax = plt.subplots(5, sharex=True, sharey=True)\n ax[0].set_title(\"Lifespans of cancer patients\");\n\n # radiation\n kmf.fit(T[rad], event_observed=C[rad], label=\"Radiation\")\n kmf.plot(ax=ax[0]) #, ci_force_lines=True)\n kmf.fit(T[~rad], event_observed=C[~rad], label=\"No Radiation\")\n kmf.plot(ax=ax[0]) #, ci_force_lines=True)\n\n # ER Status\n kmf.fit(T[er], event_observed=C[er], label=\"ER Positive\")\n kmf.plot(ax=ax[1]) #, ci_force_lines=True)\n kmf.fit(T[~er], event_observed=C[~er], label=\"ER Negative\")\n kmf.plot(ax=ax[1]) #, ci_force_lines=True)\n\n # PR Status\n kmf.fit(T[pr], event_observed=C[pr], label=\"PR Positive\")\n kmf.plot(ax=ax[2]) #, ci_force_lines=True)\n kmf.fit(T[~pr], event_observed=C[~pr], label=\"PR Negative\")\n kmf.plot(ax=ax[2]) #, ci_force_lines=True)\n\n # stage\n kmf.fit(T[st0], event_observed=C[st0], label=\"Stage 0\")\n kmf.plot(ax=ax[3]) #, ci_force_lines=True)\n kmf.fit(T[st1], event_observed=C[st1], label=\"Stage 1\")\n kmf.plot(ax=ax[3]) #, ci_force_lines=True)\n kmf.fit(T[st2], event_observed=C[st2], label=\"Stage 2\")\n kmf.plot(ax=ax[3]) #, ci_force_lines=True)\n kmf.fit(T[st4], event_observed=C[st4], label=\"Stage 4\")\n kmf.plot(ax=ax[3]) #, ci_force_lines=True)\n\n # age\n kmf.fit(T[age], event_observed=C[age], label=\"Age < 50\")\n kmf.plot(ax=ax[4]) #, ci_force_lines=True)\n kmf.fit(T[~age], event_observed=C[~age], label=\"Age >= 50\")\n kmf.plot(ax=ax[4]) #, ci_force_lines=True)\n\n ax[0].legend(loc=3,prop={'size':10})\n ax[1].legend(loc=3,prop={'size':10})\n ax[2].legend(loc=3,prop={'size':10})\n ax[3].legend(loc=3,prop={'size':10})\n ax[4].legend(loc=3,prop={'size':10})\n\n ax[len(ax)-1].set_xlabel('Survival in years')\n\n f.text(0.04, 0.5, 'Survival %', va='center', rotation='vertical')\n plt.tight_layout()\n\n plt.ylim(0,1);\n plt.show()\n\n f, ax = plt.subplots(2, sharex=True, sharey=True)\n\n df.hist('SRV_TIME_YR', by=df.STAT_REC != 4, ax=(ax[0], ax[1]))\n ax[0].set_title('Histogram of Non Censored Patients')\n ax[0].set_ylabel('Number of Patients')\n\n ax[1].set_ylabel('Number of Patients')\n ax[1].set_title('Histogram of Censored Patients')\n ax[1].set_xlabel('Survival in Years')\n plt.show()\n\n return\n\n # second plot of survival\n\n fig, ax = plt.subplots(figsize=(8, 6))\n\n cen = df[df.STAT_REC != 4].SRV_TIME_MON\n nc = df[df.STAT_REC == 4].SRV_TIME_MON\n cen = cen.sort_values()\n nc = nc.sort_values()\n\n ax.hlines([x for x in range(len(nc))] , 0, nc , color = 'b', label='Uncensored');\n ax.hlines([x for x in range(len(nc), len(nc)+len(cen))], 0, cen, color = 'r', label='Censored');\n\n ax.set_xlim(left=0);\n ax.set_xlabel('Months');\n ax.set_ylim(-0.25, len(df) + 0.25);\n ax.legend(loc='best');\n plt.show()\n\n return\n\n\n\nseer = ExploreSeer(sample_size=10000)\nseer.plot_survival()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
morganics/bayesianpy
examples/notebook/diabetes_non_linear_regression.ipynb
apache-2.0
[ "Non Linear regression using the diabetes dataset\nThis example follows on from the previous linear regression example, to demonstrate how additional latent states are synonymous with the number of degrees of freedom in traditional non-linear regression (e.g. non-linear least squares).\nI'm not going to spend much time explaining the code. The only difference to the linear regression is the additional 'Cluster' variable specified in the MixtureNaiveBayes template. I can start off with 2 latent states.", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn.metrics import r2_score\n\nimport sys\nsys.path.append(\"../../../bayesianpy\")\n\nimport bayesianpy\n\nimport pandas as pd\nimport logging\nfrom sklearn.model_selection import train_test_split\n\n# Load the diabetes dataset\ndiabetes = datasets.load_diabetes()\n\n# Use only one feature\ndiabetes_X = diabetes.data[:, np.newaxis, 2]\ndf = pd.DataFrame({'A': [x[0] for x in diabetes_X], 'target': diabetes.target})\ntrain, test = train_test_split(df, test_size=0.4)\n\nlogger = logging.getLogger()\nbayesianpy.jni.attach(logger)\nf = bayesianpy.utils.get_path_to_parent_dir('')\n\nwith bayesianpy.data.DataSet(df, f, logger) as dataset:\n tpl = bayesianpy.template.MixtureNaiveBayes(logger, continuous=df, latent_states=2)\n network = tpl.create(bayesianpy.network.NetworkFactory(logger))\n\n plt.figure()\n layout = bayesianpy.visual.NetworkLayout(network)\n graph = layout.build_graph()\n pos = layout.fruchterman_reingold_layout(graph)\n layout.visualise(graph, pos)\n\n model = bayesianpy.model.NetworkModel(network, logger)\n\n model.train(dataset.subset(train.index.tolist()))\n \n results = model.batch_query(dataset.subset(test.index.tolist()),\n [bayesianpy.model.QueryMeanVariance('target',output_dtype=df['target'].dtype)])\n\n results.sort_values(by='A', ascending=True, inplace=True)\n plt.figure(figsize=(10, 10))\n plt.scatter(df['A'].tolist(), df['target'].tolist(), label='Actual')\n plt.plot(results['A'], results['target_mean'], 'ro-', label='Predicted')\n\n plt.fill_between(results.A, \n results.target_mean-results.target_variance.apply(np.sqrt),\n results.target_mean+results.target_variance.apply(np.sqrt),\n color='darkgrey', alpha=0.4,\n label='Variance'\n )\n plt.xlabel(\"A\")\n plt.ylabel(\"Predicted Target\")\n plt.legend()\n plt.show()\n \n print(\"R2 score: {}\".format(r2_score(results.target.tolist(), results.target_mean.tolist())))", "With 5 latent states:", "with bayesianpy.data.DataSet(df, f, logger) as dataset:\n tpl = bayesianpy.template.MixtureNaiveBayes(logger, continuous=df, latent_states=5)\n network = tpl.create(bayesianpy.network.NetworkFactory(logger))\n\n model = bayesianpy.model.NetworkModel(network, logger)\n\n model.train(dataset.subset(train.index.tolist()))\n \n results = model.batch_query(dataset.subset(test.index.tolist()),\n [bayesianpy.model.QueryMeanVariance('target',output_dtype=df['target'].dtype)])\n\n results.sort_values(by='A', ascending=True, inplace=True)\n plt.figure(figsize=(10, 10))\n plt.scatter(df['A'].tolist(), df['target'].tolist(), label='Actual')\n plt.plot(results['A'], results['target_mean'], 'ro-', label='Predicted')\n\n plt.fill_between(results.A, \n results.target_mean-results.target_variance.apply(np.sqrt),\n results.target_mean+results.target_variance.apply(np.sqrt),\n color='darkgrey', alpha=0.4,\n label='Variance'\n )\n plt.xlabel(\"A\")\n plt.ylabel(\"Predicted Target\")\n plt.legend()\n plt.show()\n\nprint(\"R2 score: {}\".format(r2_score(results.target.tolist(), results.target_mean.tolist())))", "Finally 10 latent states:", "with bayesianpy.data.DataSet(df, f, logger) as dataset:\n tpl = bayesianpy.template.MixtureNaiveBayes(logger, continuous=df, latent_states=10)\n network = tpl.create(bayesianpy.network.NetworkFactory(logger))\n\n model = bayesianpy.model.NetworkModel(network, logger)\n\n model.train(dataset.subset(train.index.tolist()))\n \n results = model.batch_query(dataset.subset(test.index.tolist()),\n [bayesianpy.model.QueryMeanVariance('target',output_dtype=df['target'].dtype)])\n\n results.sort_values(by='A', ascending=True, inplace=True)\n plt.figure(figsize=(10, 10))\n plt.scatter(df['A'].tolist(), df['target'].tolist(), label='Actual')\n plt.plot(results['A'], results['target_mean'], 'ro-', label='Predicted')\n\n plt.fill_between(results.A, \n results.target_mean-results.target_variance.apply(np.sqrt),\n results.target_mean+results.target_variance.apply(np.sqrt),\n color='darkgrey', alpha=0.4,\n label='Variance'\n )\n plt.xlabel(\"A\")\n plt.ylabel(\"Predicted Target\")\n plt.legend()\n plt.show()\n\nprint(\"R2 score: {}\".format(r2_score(results.target.tolist(), results.target_mean.tolist())))", "Obviously, the R2 score doesn't take variance in to account, but it looks like we've reached peak R2 at around 5 latent states (incidentally, a similar iteration can be used to select the optimal number of latent states).\nOur base R2 was around 0.34, so it seems like a linear regression model fits the data better than a non-linear regressor." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
irockafe/revo_healthcare
notebooks/MTBLS315/exploratory/auc_vs_rt_window_plots.ipynb
mit
[ "<h2> Using 4ppm file</h2>\nEnough retcor groups, not as many peak insertion problems", "import time\n\nimport pandas as pd\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import NullFormatter\n\nimport numpy as np\n\nfrom sklearn import preprocessing\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.cross_validation import StratifiedShuffleSplit\nfrom sklearn.cross_validation import cross_val_score\n#from sklearn.model_selection import StratifiedShuffleSplit\n#from sklearn.model_selection import cross_val_score\n\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.utils import shuffle\n\nfrom scipy import interp\nimport scipy.stats as stats\n\nimport pickle\n\n# My libraries\nimport data.preprocessing as preproc\nimport util.rt_window_prediction as rtwin\n\n%matplotlib inline\n%load_ext autoreload\n%autoreload 2", "<h2> Get the xcms feature table </h2>", "### Subdivide the data into a feature table\nlocal_path = '/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/'\ndata_path = local_path + '/revo_healthcare/data/processed/MTBLS315/'\\\n'uhplc_pos/xcms_result_4.csv'\n\n## Import the data and remove extraneous columns\ndf = pd.read_csv(data_path, index_col=0)\n# convert column names to remove X added to them\nnew_idx = [i.replace('X', '') for i in df.columns]\ndf.columns = new_idx\nprint df.columns\n# replace 0 values with nans, so it's easier to replace them later\ndf = df.replace(to_replace=0.0 , value=np.nan, )\n\n# Make a new index of mz:rt\nmz = df.loc[:,\"mz\"].astype('str')\nrt = df.loc[:,\"rt\"].astype('str')\nidx = mz+':'+rt\ndf.index = idx\ndf\n# separate samples from xcms/camera things to make feature table\nnot_samples = ['mz', 'mzmin', 'mzmax', 'rt', 'rtmin', 'rtmax', \n 'npeaks', 'uhplc_pos', \n ]\nsamples_list = df.columns.difference(not_samples)\nmz_rt_df = df[not_samples]\n\n# convert to samples x features\nX_df_raw = df[samples_list].T\n\nprint \"original shape: %s \\n# nans: %f\\n\" % (X_df_raw.shape, X_df_raw.isnull().sum().sum())", "<h2> Get mapping between samples names, class names, and file names </h2>", "# Get mapping between sample name and assay names\npath_sample_name_map = (local_path + 'revo_healthcare/data/raw/' +\n 'MTBLS315/metadata/a_UPLC_POS_nmfi_and_bsi_diagnosis.txt')\n\n# Sample name to Assay name\n# Index is the sample name\n# value we want is the Assay name\nsample_df = pd.read_csv(path_sample_name_map, \n sep='\\t', index_col=0)\nsample_df = sample_df['MS Assay Name']\nsample_df.shape\n\n# sample name to sample class\npath_sample_class_map = (local_path + 'revo_healthcare/data/raw/' +\n 'MTBLS315/metadata/s_NMFI and BSI diagnosis.txt')\nclass_df = pd.read_csv(path_sample_class_map,\n sep='\\t')\nclass_df.set_index('Sample Name', inplace=True)\nclass_df = class_df['Factor Value[patient group]']\n\n# Combine sample > assay and sample > class into one\nclass_map_df = pd.concat([sample_df, class_df], axis=1)\nclass_map_df.rename(columns={'Factor Value[patient group]': 'class'}, inplace=True)\n\n# convert all non-malarial classes into a single classes \n# (collapse non-malarial febril illness and bacteremia together)\nbinary_class_map = class_map_df.replace(to_replace=['non-malarial febrile illness', 'bacterial bloodstream infection' ], \n value='non-malarial fever')\n\nprint \"binary class map:\\n\", binary_class_map.head()\n\n# Get case and control samples in their own dataframes\ncase_str = 'malaria'\ncontrol_str = 'non-malarial fever'\n# get that assay names based on class\ncase_labels = binary_class_map[binary_class_map['class'] == case_str]['MS Assay Name']\ncontrol_labels = binary_class_map[binary_class_map['class'] == control_str]['MS Assay Name']\n# select the assay names from X_df_raw based on class\ncase = X_df_raw.loc[case_labels]\ncontrol = X_df_raw.loc[control_labels]\n\nprint 'case shape: ', case.shape\nprint 'control shape: ', control.shape\n\n# convert classes to numbers\nle = preprocessing.LabelEncoder()\nle.fit(binary_class_map['class'])\ny = le.transform(binary_class_map['class'])\ny\n\n# Get ", "<h2> Preprocess feature table </h2>\nRemove systematic intensity biases between samples. Fill nan values", "# fill nan values with 1/2 the minimum from each sample\nfill_val = X_df_raw.min(axis=1) / 2.0\n# must transpose, b/c fillna only operates along columns\nX_df_filled = X_df_raw.T.fillna(value=fill_val, ).T\n\nX_pqn_df_raw = preproc.correct_dilution_factor(X_df_raw, plot=True)\nX_pqn_df_filled = preproc.correct_dilution_factor(X_df_filled, plot=True)\n\nX_pqn_df_filled_log = np.log10(X_pqn_df_filled)", "<h2> Check out the intensity distributions, MW p-vals (warning that\nthe mw-pvals assume asymptotic normal, so are probably a bit off. Don't rely on them\nfor important stuff. But as a spot-check, should be okay)", "# Do mann-whitney on case vs control\ndef mw_pval_dist(case, control):\n '''\n case - dataframe containing case\n control - dataframe with control samples\n All should have same features (columns)\n '''\n # get parametric pvals\n mann_whitney_vals = pd.DataFrame(np.full([case.shape[1],2], np.nan),\n index=case.columns, columns= ['u', 'pval'])\n for idx, case_vals in case.iteritems():\n control_vals = control[idx]\n u, pval = stats.mannwhitneyu(case_vals, control_vals)\n mann_whitney_vals.loc[idx, 'u'] = u\n mann_whitney_vals.loc[idx, 'pval'] = pval\n \n # plot mw pval distribution\n mann_whitney_vals.hist('pval')\n plt.title('mann-whitney pval between case and control')\n plt.show()\n \n # plot distribution of mean intensities\n case_mean = case.mean(axis=0)\n ctrl_mean = control.mean(axis=0)\n sns.distplot(np.log10(case_mean), label='case')\n sns.distplot(np.log10(ctrl_mean), label='control')\n plt.xlabel('log_10 intensity')\n plt.title('Mean intensity of case vs. control')\n plt.legend()\n plt.show()\n u, pval = stats.mannwhitneyu(case_mean, ctrl_mean)\n print 'pval (MannW) of intensities between case and control: ', pval\n \nprint('Raw intensities\\n\\n')\nmw_pval_dist(X_df_raw.loc[case_labels], X_df_raw.loc[control_labels])\n\nprint('*'*50+'\\nNaN filled with 1/2 min')\nmw_pval_dist(X_df_filled.loc[case_labels], X_df_filled.loc[control_labels])\n\nprint('*'*50+'\\n Raw pqn_normalized')\nmw_pval_dist(X_pqn_df_raw.loc[case_labels], X_pqn_df_raw.loc[control_labels])\n\nprint('*'*50+'\\n NaN filled with 1/2 min, pqn normalized')\nmw_pval_dist(X_pqn_df_filled.loc[case_labels], X_pqn_df_filled.loc[control_labels])\n", "<h2> Use the raw values - they appear to be closer to the case vals than the dilution-factor normalized values...?\n\n<h2> Run rt-window classifiers and capture the auc distributions\nto plot </h2>", "# Add back mz, rt, etc. columns to feature table and reshape it to be\n# (feats x samples)\nX_df_filled_mzrt = pd.concat([df[not_samples].T, X_pqn_df_filled], \n axis=0).T\n\n# run a sliding windonw\n# Make sliding window\nmin_val = 0\nmax_val = df['rt'].max()\nwidth = max_val / 5\nstep = width / 2\nsliding_window = rtwin.make_sliding_window(min_val, \n max_val, width, step)\n\n# run classifier & plot on sliding window\nn_iter = 50\ntest_size = 0.3\nrf_trees = 1000\nall_aucs = np.full([len(sliding_window), n_iter], np.nan)\npath = ('/revo_healthcare/presentations/isaac_bats/'+\n 'rt_window_plots/MTBLS315/')\noutput_path = local_path + path\n\n# Run rt-sliding-window classifier\nrtwin.sliding_rt_window_aucs(X_df_filled_mzrt, y, sliding_window, not_samples,\n rf_trees=rf_trees, n_iter=n_iter, test_size=test_size,\n output_path=output_path)\n\nauc_vals = pickle.load(open(output_path+'/auc_vals.pkl', 'rb'))\n\nfig_path = output_path + 'auc_vs_rt.pdf'\nrtwin.plot_auc_vs_rt(auc_vals, sliding_window, df, fig_path)\n\n# Run on whole window\n# Add back mz, rt, etc. columns to feature table and reshape it to be\n# (feats x samples)\nX_df_filled_mzrt = pd.concat([df[not_samples].T, X_pqn_df_filled], \n axis=0).T\n\n# run a sliding windonw\n# Make sliding window\nmin_val = 0\nmax_val = df['rt'].max()\nwidth = max_val \nstep = width \nsliding_window = rtwin.make_sliding_window(min_val, \n max_val, width, step)\n\n# run classifier & plot on sliding window\nn_iter = 50\ntest_size = 0.3\nrf_trees = 1000\nall_aucs = np.full([len(sliding_window), n_iter], np.nan)\npath = ('/revo_healthcare/presentations/isaac_bats/'+\n 'rt_window_plots/MTBLS315/')\noutput_path = local_path + path\n\n# Run rt-sliding-window classifier\nrtwin.sliding_rt_window_aucs(X_df_filled_mzrt, y, sliding_window, not_samples,\n rf_trees=rf_trees, n_iter=n_iter, test_size=test_size,\n output_path=output_path)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
talumbau/taxdata
docs/notebooks/Stage I.ipynb
mit
[ "from pandas import DataFrame as df\nimport pandas as pd", "Import Census projection on population:\n\nProjection from 2014\nHistorical estimates from 2010 to 2014\nHistorical estimates from 2000 to 2010", "#projection 2014+\npop_projection = df.from_csv(\"NP2014_D1.csv\", index_col='year')\npop_projection = pop_projection[(pop_projection.sex == 0) & (pop_projection.race == 0) & (pop_projection.origin == 0)]\npop_projection = pop_projection.drop(['sex', 'race', 'origin'], axis=1)\npop_projection = pop_projection.drop(pop_projection.index[11:], axis=0)\npop_projection = pop_projection.drop(pop_projection.index[:1], axis=0)\n\n#estimates 2010-2014\nhistorical1 = pd.read_csv(\"NC-EST2014-AGESEX-RES.csv\")\nhistorical1 = historical1[historical1.SEX == 0]\nhistorical1 = historical1.drop(['SEX', 'CENSUS2010POP', 'ESTIMATESBASE2010'],axis = 1)\n\npop_dep1 = historical1[historical1.AGE<=19].sum()\npop_dep1 = pop_dep1.drop(['AGE'],axis = 0)\n\npop_snr1 = historical1[(historical1.AGE>=65)&(historical1.AGE<999)].sum()\npop_snr1 = pop_snr1.drop(['AGE'], axis = 0)\n\ntotal_pop1 = historical1[historical1.AGE==999]\ntotal_pop1 = total_pop1.drop(['AGE'], axis = 1)\n\n#estimates 2008-2009\nhistorical2 = pd.read_csv(\"US-EST00INT-ALLDATA.csv\")\nhistorical2 = historical2[(historical2.MONTH==7)&(historical2.YEAR>=2008)&(historical2.YEAR<2010)]\nhistorical2 = historical2.drop(historical2.columns[4:],axis = 1)\nhistorical2 = historical2.drop(historical2.columns[0],axis = 1)\n\npop_dep2 = [historical2.TOT_POP[(historical2.YEAR ==2008) & (historical2.AGE <=19)].sum(),historical2.TOT_POP[(historical2.YEAR ==2009) & (historical2.AGE <=19)].sum()]\npop_snr2 = [historical2.TOT_POP[(historical2.YEAR ==2008) & (historical2.AGE >=65) & (historical2.AGE < 999)].sum(), historical2.TOT_POP[(historical2.YEAR ==2009) & (historical2.AGE >=65) & (historical2.AGE < 999)].sum()]\ntotal_pop2 = [historical2.TOT_POP[(historical2.YEAR ==2008) & (historical2.AGE == 999)].sum(), historical2.TOT_POP[(historical2.YEAR ==2009) & (historical2.AGE == 999)].sum() ]\n\nPOP_DEP = pd.concat([df(pop_dep2),df(pop_dep1),df(pop_projection[pop_projection.columns[1:21]].sum(axis = 1))])\nPOP_SNR = pd.concat([df(pop_snr2),df(pop_snr1),df(pop_projection[pop_projection.columns[66:]].sum(axis = 1))])\nTOTAL_POP = pd.concat([df(total_pop2), df(total_pop1.values.transpose()),df(pop_projection.total_pop.values)])\n\nStage_II_targets = df(TOTAL_POP)\nStage_II_targets.columns = ['TOTAL_POP']\n\nStage_II_targets['POP_DEP'] = POP_DEP.values\nStage_II_targets['POP_SNR'] = POP_SNR.values\n\nindex = list(range(2008,2025))\nStage_II_targets.index = index\n\nAPOPN = Stage_II_targets.TOTAL_POP/Stage_II_targets.TOTAL_POP[2008]\nStage_I_factors = df(APOPN, index = index)\nStage_I_factors.columns = ['APOPN']\n\nStage_I_factors['APOPDEP'] = df(Stage_II_targets.POP_DEP/Stage_II_targets.POP_DEP[2008],index = index)\nStage_I_factors['APOPSNR'] = df(Stage_II_targets.POP_SNR/Stage_II_targets.POP_SNR[2008],index = index)\n\npop_growth_rates = df(Stage_II_targets.TOTAL_POP.pct_change()+1)\npop_growth_rates['POPDEP'] = Stage_II_targets.POP_DEP.pct_change()+1\npop_growth_rates['POPSNR'] = Stage_II_targets.POP_SNR.pct_change()+1\npop_growth_rates = pop_growth_rates.drop(pop_growth_rates.index[0],axis = 0)", "Import CBO baseline", "cbo_baseline = (df.from_csv(\"CBO_baseline.csv\", index_col=0)).transpose()\ncbo_baseline.index = index\n\nStage_I_factors['AGDPN'] = df(cbo_baseline.GDP/cbo_baseline.GDP[2008], index = index)\nStage_I_factors['ATXPY'] = df(cbo_baseline.TPY/cbo_baseline.TPY[2008], index = index)\nStage_I_factors['ASCHF'] = df(cbo_baseline.SCHF/cbo_baseline.SCHF[2008], index = index)\nStage_I_factors['ABOOK'] = df(cbo_baseline.BOOK/cbo_baseline.BOOK[2008], index = index)\nStage_I_factors['ACPIU'] = df(cbo_baseline.CPIU/cbo_baseline.CPIU[2008], index = index)\nStage_I_factors['ACPIM'] = df(cbo_baseline.CPIM/cbo_baseline.CPIM[2008], index = index)\n\ncbo_growth_rates = cbo_baseline.pct_change()+1\ncbo_growth_rates = cbo_growth_rates.drop(cbo_growth_rates.index[0], axis=0)", "Import IRS number of returns projection", "irs_returns = (df.from_csv(\"IRS_return_projection.csv\", index_col=0)).transpose() \n\nreturn_growth_rate = irs_returns.pct_change()+1\nreturn_growth_rate.Returns['2023'] = return_growth_rate.Returns['2022']\nreturn_growth_rate.Returns['2024'] = return_growth_rate.Returns['2022']\nreturn_growth_rate.Returns.index = index", "Import SOI estimates (2008 - 2012)\nTax-calculator is using 08 PUF.", "soi_estimates = (df.from_csv(\"SOI_estimates.csv\", index_col=0)).transpose()\nhistorical_index = list(range(2008,2013))\nsoi_estimates.index = historical_index\n\nreturn_projection = soi_estimates\nfor i in range(2012,2024):\n Single = return_projection.Single[i]*return_growth_rate.Returns[i+1]\n Joint = return_projection.Joint[i]*return_growth_rate.Returns[i+1]\n HH = return_projection.HH[i]*return_growth_rate.Returns[i+1]\n SS_return = return_projection.SS_return[i]*pop_growth_rates.POPSNR[i+1]\n Dep_return = return_projection.Dep_return[i]*pop_growth_rates.POPDEP[i+1]\n INTS = return_projection.INTS[i]*cbo_growth_rates.INTS[i+1]\n DIVS = return_projection.DIVS[i]*cbo_growth_rates.DIVS[i+1]\n SCHCI = return_projection.SCHCI[i]*cbo_growth_rates.SCHC[i+1]\n SCHCL = return_projection.SCHCL[i]*cbo_growth_rates.SCHC[i+1]\n CGNS = return_projection.CGNS[i]*cbo_growth_rates.CGNS[i+1]\n Pension = return_projection.Pension[i]*cbo_growth_rates.TPY[i+1]\n SCHEI = return_projection.SCHEI[i]*cbo_growth_rates.BOOK[i+1]\n SCHEL = return_projection.SCHEL[i]*cbo_growth_rates.BOOK[i+1]\n SS = return_projection.SS[i]*cbo_growth_rates.SOCSEC[i+1]\n UCOMP = return_projection.UCOMP[i]*cbo_growth_rates.UCOMP[i+1]\n Wage_1 = return_projection.WAGE_1[i]*cbo_growth_rates.Wages[i+1]\n Wage_2 = return_projection.WAGE_2[i]*cbo_growth_rates.Wages[i+1]\n Wage_3 = return_projection.WAGE_3[i]*cbo_growth_rates.Wages[i+1]\n Wage_4 = return_projection.WAGE_4[i]*cbo_growth_rates.Wages[i+1]\n Wage_5 = return_projection.WAGE_5[i]*cbo_growth_rates.Wages[i+1]\n Wage_6 = return_projection.WAGE_6[i]*cbo_growth_rates.Wages[i+1]\n Wage_7 = return_projection.WAGE_7[i]*cbo_growth_rates.Wages[i+1]\n Wage_8 = return_projection.WAGE_8[i]*cbo_growth_rates.Wages[i+1]\n Wage_9 = return_projection.WAGE_9[i]*cbo_growth_rates.Wages[i+1]\n Wage_10 = return_projection.WAGE_10[i]*cbo_growth_rates.Wages[i+1]\n Wage_11 = return_projection.WAGE_11[i]*cbo_growth_rates.Wages[i+1]\n Wage_12 = return_projection.WAGE_12[i]*cbo_growth_rates.Wages[i+1]\n \n current_year = df([Single, Joint, HH,\n SS_return,Dep_return,INTS,DIVS,SCHCI,SCHCL,\n CGNS,Pension, SCHEI, SCHEL,SS,UCOMP,Wage_1,\n Wage_2,Wage_3,Wage_4,Wage_5,Wage_6,Wage_7,\n Wage_8, Wage_9, Wage_10, Wage_11, Wage_12]).transpose()\n current_year.columns = return_projection.columns\n current_year.index = [i+1]\n return_projection = return_projection.append(current_year)\n\nStage_II_targets = pd.concat([Stage_II_targets,return_projection], axis = 1)\n\ntotal_return = df(Stage_II_targets[Stage_II_targets.columns[3:6]].sum(axis = 1), columns=['ARETS'])\nStage_I_factors['ARETS'] = total_return/total_return.ARETS[2008]\n\ntotal_wage = df(Stage_II_targets[Stage_II_targets.columns[18:30]].sum(axis = 1), columns=['AWAGE'])\nStage_I_factors['AWAGE'] = total_wage/total_wage.AWAGE[2008]\n\nStage_I_factors['ASCHCI'] = Stage_II_targets.SCHCI/Stage_II_targets.SCHCI[2008]\nStage_I_factors['ASCHCL'] = Stage_II_targets.SCHCL/Stage_II_targets.SCHCL[2008]\nStage_I_factors['ASCHEI'] = Stage_II_targets.SCHEI/Stage_II_targets.SCHEI[2008]\nStage_I_factors['ASCHEL'] = Stage_II_targets.SCHEL/Stage_II_targets.SCHEL[2008]\n\nStage_I_factors['AINTS'] = Stage_II_targets.INTS/Stage_II_targets.INTS[2008]\nStage_I_factors['ADIVS'] = Stage_II_targets.DIVS/Stage_II_targets.DIVS[2008]\nStage_I_factors['ACGNS'] = Stage_II_targets.CGNS/Stage_II_targets.CGNS[2008]\n\nStage_I_factors['ASOCSEC'] = Stage_II_targets.SS/Stage_II_targets.SS[2008]\nStage_I_factors['AUCOMP'] = Stage_II_targets.UCOMP/Stage_II_targets.UCOMP[2008]\n\n#pd.options.display.float_format = '{:,.4f}'.format\nStage_I_factors.to_csv(path_or_buf = \"Stage_I_factors.csv\", float_format ='%.4f')\nStage_I_factors = Stage_I_factors.transpose()\nStage_I_factors.to_csv(path_or_buf = \"../Stage II/Stage_I_factors.csv\", float_format ='%.4f')\n\nStage_II_targets = Stage_II_targets.transpose()\nStage_II_targets.to_csv(path_or_buf = \"../Stage II/Stage_II_targets.csv\", float_format = '%.4f')" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
adico-somoto/deep-learning
sentiment-rnn/Sentiment_RNN.ipynb
mit
[ "Sentiment Analysis with an RNN\nIn this notebook, you'll implement a recurrent neural network that performs sentiment analysis. Using an RNN rather than a feedfoward network is more accurate since we can include information about the sequence of words. Here we'll use a dataset of movie reviews, accompanied by labels.\nThe architecture for this network is shown below.\n<img src=\"assets/network_diagram.png\" width=400px>\nHere, we'll pass in words to an embedding layer. We need an embedding layer because we have tens of thousands of words, so we'll need a more efficient representation for our input data than one-hot encoded vectors. You should have seen this before from the word2vec lesson. You can actually train up an embedding with word2vec and use it here. But it's good enough to just have an embedding layer and let the network learn the embedding table on it's own.\nFrom the embedding layer, the new representations will be passed to LSTM cells. These will add recurrent connections to the network so we can include information about the sequence of words in the data. Finally, the LSTM cells will go to a sigmoid output layer here. We're using the sigmoid because we're trying to predict if this text has positive or negative sentiment. The output layer will just be a single unit then, with a sigmoid activation function.\nWe don't care about the sigmoid outputs except for the very last one, we can ignore the rest. We'll calculate the cost from the output of the last step and the training label.", "import numpy as np\nimport tensorflow as tf\n\nwith open('../sentiment-network/reviews.txt', 'r') as f:\n reviews = f.read()\nwith open('../sentiment-network/labels.txt', 'r') as f:\n labels = f.read()\n\nreviews[:2000]", "Data preprocessing\nThe first step when building a neural network model is getting your data into the proper form to feed into the network. Since we're using embedding layers, we'll need to encode each word with an integer. We'll also want to clean it up a bit.\nYou can see an example of the reviews data above. We'll want to get rid of those periods. Also, you might notice that the reviews are delimited with newlines \\n. To deal with those, I'm going to split the text into each review using \\n as the delimiter. Then I can combined all the reviews back together into one big string.\nFirst, let's remove all punctuation. Then get all the text without the newlines and split it into individual words.", "from string import punctuation\nall_text = ''.join([c for c in reviews if c not in punctuation])\nreviews = all_text.split('\\n')\n\nall_text = ' '.join(reviews)\nwords = all_text.split()\n\nall_text[:2000]\n\nwords[:100]", "Encoding the words\nThe embedding lookup requires that we pass in integers to our network. The easiest way to do this is to create dictionaries that map the words in the vocabulary to integers. Then we can convert each of our reviews into integers so they can be passed into the network.\n\nExercise: Now you're going to encode the words with integers. Build a dictionary that maps words to integers. Later we're going to pad our input vectors with zeros, so make sure the integers start at 1, not 0.\nAlso, convert the reviews to integers and store the reviews in a new list called reviews_ints.", "# Create your dictionary that maps vocab words to integers here\nvocab_to_int = {word:i+1 for i,word in enumerate(set(words))}\n\n# Convert the reviews to integers, same shape as reviews list, but with integers\nreviews_ints = []\nfor each in reviews:\n reviews_ints.append([vocab_to_int[word] for word in each.split()]) ", "Encoding the labels\nOur labels are \"positive\" or \"negative\". To use these labels in our network, we need to convert them to 0 and 1.\n\nExercise: Convert labels from positive and negative to 1 and 0, respectively.", "# Convert labels to 1s and 0s for 'positive' and 'negative'\nlabels = labels.split('\\n')\nlabels = np.array([1 if each=='positive' else 0 for each in labels])", "If you built labels correctly, you should see the next output.", "from collections import Counter\nreview_lens = Counter([len(x) for x in reviews_ints])\nprint(\"Zero-length reviews: {}\".format(review_lens[0]))\nprint(\"Maximum review length: {}\".format(max(review_lens)))", "Okay, a couple issues here. We seem to have one review with zero length. And, the maximum review length is way too many steps for our RNN. Let's truncate to 200 steps. For reviews shorter than 200, we'll pad with 0s. For reviews longer than 200, we can truncate them to the first 200 characters.\n\nExercise: First, remove the review with zero length from the reviews_ints list.", "# Filter out that review with 0 length\nnon_zero_idx = [ii for ii, review in enumerate(reviews_ints) if len(review)!=0 ]\nlen(non_zero_idx)\n\nreviews_ints = [reviews_ints[ii] for ii in non_zero_idx]\nlabels = [labels[ii] for ii in non_zero_idx]", "Exercise: Now, create an array features that contains the data we'll pass to the network. The data should come from review_ints, since we want to feed integers to the network. Each row should be 200 elements long. For reviews shorter than 200 words, left pad with 0s. That is, if the review is ['best', 'movie', 'ever'], [117, 18, 128] as integers, the row will look like [0, 0, 0, ..., 0, 117, 18, 128]. For reviews longer than 200, use on the first 200 words as the feature vector.\n\nThis isn't trivial and there are a bunch of ways to do this. But, if you're going to be building your own deep learning networks, you're going to have to get used to preparing your data.", "seq_len = 200\nfeatures = np.zeros([len(reviews_ints),seq_len], dtype=int)\n\nfor i,row in enumerate(reviews_ints):\n features[i, -len(row):] = np.array(row)[:seq_len]", "If you build features correctly, it should look like that cell output below.", "features[:10,:100]", "Training, Validation, Test\nWith our data in nice shape, we'll split it into training, validation, and test sets.\n\nExercise: Create the training, validation, and test sets here. You'll need to create sets for the features and the labels, train_x and train_y for example. Define a split fraction, split_frac as the fraction of data to keep in the training set. Usually this is set to 0.8 or 0.9. The rest of the data will be split in half to create the validation and testing data.", "split_frac = 0.8\n\ndata_len = len(features)\ntrain_len = int( data_len * split_frac )\ntest_len = data_len - train_len\n\ntrain_x, val_x = features[:train_len], features[train_len:]\ntrain_y, val_y = labels[:train_len], labels[train_len:]\n\nval_x_len= int(len(val_x) * 0.5)\n\nval_x, test_x = val_x[:val_x_len], val_x[:val_x_len]\nval_y, test_y = val_y[:val_x_len], val_y[:val_x_len]\n\nprint(\"\\t\\t\\tFeature Shapes:\")\nprint(\"Train set: \\t\\t{}\".format(train_x.shape), \n \"\\nValidation set: \\t{}\".format(val_x.shape),\n \"\\nTest set: \\t\\t{}\".format(test_x.shape))", "With train, validation, and text fractions of 0.8, 0.1, 0.1, the final shapes should look like:\nFeature Shapes:\nTrain set: (20000, 200) \nValidation set: (2500, 200) \nTest set: (2500, 200)\nBuild the graph\nHere, we'll build the graph. First up, defining the hyperparameters.\n\nlstm_size: Number of units in the hidden layers in the LSTM cells. Usually larger is better performance wise. Common values are 128, 256, 512, etc.\nlstm_layers: Number of LSTM layers in the network. I'd start with 1, then add more if I'm underfitting.\nbatch_size: The number of reviews to feed the network in one training pass. Typically this should be set as high as you can go without running out of memory.\nlearning_rate: Learning rate", "lstm_size = 256\nlstm_layers = 1\nbatch_size = 500\nlearning_rate = 0.001", "For the network itself, we'll be passing in our 200 element long review vectors. Each batch will be batch_size vectors. We'll also be using dropout on the LSTM layer, so we'll make a placeholder for the keep probability.\n\nExercise: Create the inputs_, labels_, and drop out keep_prob placeholders using tf.placeholder. labels_ needs to be two-dimensional to work with some functions later. Since keep_prob is a scalar (a 0-dimensional tensor), you shouldn't provide a size to tf.placeholder.", "n_words = len(vocab_to_int)\n\n# Create the graph object\ngraph = tf.Graph()\n# Add nodes to the graph\nwith graph.as_default():\n inputs_ = tf.placeholder( tf.int32, [None,None], name='inputs')\n labels_ = tf.placeholder( tf.int32, [None,None], name='labels')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')", "Embedding\nNow we'll add an embedding layer. We need to do this because there are 74000 words in our vocabulary. It is massively inefficient to one-hot encode our classes here. You should remember dealing with this problem from the word2vec lesson. Instead of one-hot encoding, we can have an embedding layer and use that layer as a lookup table. You could train an embedding layer using word2vec, then load it here. But, it's fine to just make a new layer and let the network learn the weights.\n\nExercise: Create the embedding lookup matrix as a tf.Variable. Use that embedding matrix to get the embedded vectors to pass to the LSTM cell with tf.nn.embedding_lookup. This function takes the embedding matrix and an input tensor, such as the review vectors. Then, it'll return another tensor with the embedded vectors. So, if the embedding layer has 200 units, the function will return a tensor with size [batch_size, 200].", "# Size of the embedding vectors (number of units in the embedding layer)\nembed_size = 300 \n\nwith graph.as_default():\n embedding = tf.Variable(tf.random_uniform([n_words, embed_size], -1, 1))\n embed = tf.nn.embedding_lookup(embedding, inputs_)", "LSTM cell\n<img src=\"assets/network_diagram.png\" width=400px>\nNext, we'll create our LSTM cells to use in the recurrent network (TensorFlow documentation). Here we are just defining what the cells look like. This isn't actually building the graph, just defining the type of cells we want in our graph.\nTo create a basic LSTM cell for the graph, you'll want to use tf.contrib.rnn.BasicLSTMCell. Looking at the function documentation:\ntf.contrib.rnn.BasicLSTMCell(num_units, forget_bias=1.0, input_size=None, state_is_tuple=True, activation=&lt;function tanh at 0x109f1ef28&gt;)\nyou can see it takes a parameter called num_units, the number of units in the cell, called lstm_size in this code. So then, you can write something like \nlstm = tf.contrib.rnn.BasicLSTMCell(num_units)\nto create an LSTM cell with num_units. Next, you can add dropout to the cell with tf.contrib.rnn.DropoutWrapper. This just wraps the cell in another cell, but with dropout added to the inputs and/or outputs. It's a really convenient way to make your network better with almost no effort! So you'd do something like\ndrop = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\nMost of the time, your network will have better performance with more layers. That's sort of the magic of deep learning, adding more layers allows the network to learn really complex relationships. Again, there is a simple way to create multiple layers of LSTM cells with tf.contrib.rnn.MultiRNNCell:\ncell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)\nHere, [drop] * lstm_layers creates a list of cells (drop) that is lstm_layers long. The MultiRNNCell wrapper builds this into multiple layers of RNN cells, one for each cell in the list.\nSo the final cell you're using in the network is actually multiple (or just one) LSTM cells with dropout. But it all works the same from an achitectural viewpoint, just a more complicated graph in the cell.\n\nExercise: Below, use tf.contrib.rnn.BasicLSTMCell to create an LSTM cell. Then, add drop out to it with tf.contrib.rnn.DropoutWrapper. Finally, create multiple LSTM layers with tf.contrib.rnn.MultiRNNCell.\n\nHere is a tutorial on building RNNs that will help you out.", "with graph.as_default():\n # Your basic LSTM cell\n lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)\n \n # Add dropout to the cell\n drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)\n \n # Stack up multiple LSTM layers, for deep learning\n cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers )\n \n # Getting an initial state of all zeros\n initial_state = cell.zero_state(batch_size, tf.float32)", "RNN forward pass\n<img src=\"assets/network_diagram.png\" width=400px>\nNow we need to actually run the data through the RNN nodes. You can use tf.nn.dynamic_rnn to do this. You'd pass in the RNN cell you created (our multiple layered LSTM cell for instance), and the inputs to the network.\noutputs, final_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state)\nAbove I created an initial state, initial_state, to pass to the RNN. This is the cell state that is passed between the hidden layers in successive time steps. tf.nn.dynamic_rnn takes care of most of the work for us. We pass in our cell and the input to the cell, then it does the unrolling and everything else for us. It returns outputs for each time step and the final_state of the hidden layer.\n\nExercise: Use tf.nn.dynamic_rnn to add the forward pass through the RNN. Remember that we're actually passing in vectors from the embedding layer, embed.", "with graph.as_default():\n outputs, final_state = tf.nn.dynamic_rnn(cell, embed, initial_state=initial_state)", "Output\nWe only care about the final output, we'll be using that as our sentiment prediction. So we need to grab the last output with outputs[:, -1], the calculate the cost from that and labels_.", "with graph.as_default():\n predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid)\n cost = tf.losses.mean_squared_error(labels_, predictions)\n \n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)", "Validation accuracy\nHere we can add a few nodes to calculate the accuracy which we'll use in the validation pass.", "with graph.as_default():\n correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels_)\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))", "Batching\nThis is a simple function for returning batches from our data. First it removes data such that we only have full batches. Then it iterates through the x and y arrays and returns slices out of those arrays with size [batch_size].", "def get_batches(x, y, batch_size=100):\n \n n_batches = len(x)//batch_size\n x, y = x[:n_batches*batch_size], y[:n_batches*batch_size]\n for ii in range(0, len(x), batch_size):\n yield x[ii:ii+batch_size], y[ii:ii+batch_size]", "Training\nBelow is the typical training code. If you want to do this yourself, feel free to delete all this code and implement it yourself. Before you run this, make sure the checkpoints directory exists.", "epochs = 10\n\nwith graph.as_default():\n saver = tf.train.Saver()\n\nwith tf.Session(graph=graph) as sess:\n sess.run(tf.global_variables_initializer())\n iteration = 1\n for e in range(epochs):\n state = sess.run(initial_state)\n \n for ii, (x, y) in enumerate(get_batches(train_x, train_y, batch_size), 1):\n feed = {inputs_: x,\n labels_: y[:, None],\n keep_prob: 0.5,\n initial_state: state}\n loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed)\n \n if iteration%5==0:\n print(\"Epoch: {}/{}\".format(e, epochs),\n \"Iteration: {}\".format(iteration),\n \"Train loss: {:.3f}\".format(loss))\n\n if iteration%25==0:\n val_acc = []\n val_state = sess.run(cell.zero_state(batch_size, tf.float32))\n for x, y in get_batches(val_x, val_y, batch_size):\n feed = {inputs_: x,\n labels_: y[:, None],\n keep_prob: 1,\n initial_state: val_state}\n batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed)\n val_acc.append(batch_acc)\n print(\"Val acc: {:.3f}\".format(np.mean(val_acc)))\n iteration +=1\n saver.save(sess, \"checkpoints/sentiment.ckpt\")", "Testing", "test_acc = []\nwith tf.Session(graph=graph) as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n test_state = sess.run(cell.zero_state(batch_size, tf.float32))\n for ii, (x, y) in enumerate(get_batches(test_x, test_y, batch_size), 1):\n feed = {inputs_: x,\n labels_: y[:, None],\n keep_prob: 1,\n initial_state: test_state}\n batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed)\n test_acc.append(batch_acc)\n print(\"Test accuracy: {:.3f}\".format(np.mean(test_acc)))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
brennick/project-1-first-neural-network
DLND Your first neural network.ipynb
mit
[ "Your first neural network\nIn this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.", "%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt", "Load and prepare the data\nA critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!", "data_path = 'Bike-Sharing-Dataset/hour.csv'\n\nrides = pd.read_csv(data_path)\n\nrides.head()", "Checking out the data\nThis dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above.\nBelow is a plot showing the number of bike riders over the first 10 days or so in the data set. (Some days don't have exactly 24 entries in the data set, so it's not exactly 10 days.) You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.", "rides[:24*10].plot(x='dteday', y='cnt')", "Dummy variables\nHere we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies().", "dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']\nfor each in dummy_fields:\n dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)\n rides = pd.concat([rides, dummies], axis=1)\n\nfields_to_drop = ['instant', 'dteday', 'season', 'weathersit', \n 'weekday', 'atemp', 'mnth', 'workingday', 'hr']\ndata = rides.drop(fields_to_drop, axis=1)\ndata.head()", "Scaling target variables\nTo make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.\nThe scaling factors are saved so we can go backwards when we use the network for predictions.", "quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']\n# Store scalings in a dictionary so we can convert back later\nscaled_features = {}\nfor each in quant_features:\n mean, std = data[each].mean(), data[each].std()\n scaled_features[each] = [mean, std]\n data.loc[:, each] = (data[each] - mean)/std", "Splitting the data into training, testing, and validation sets\nWe'll save the data for the last approximately 21 days to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.", "# Save data for approximately the last 21 days \ntest_data = data[-21*24:]\n\n# Now remove the test data from the data set \ndata = data[:-21*24]\n\n# Separate the data into features and targets\ntarget_fields = ['cnt', 'casual', 'registered']\nfeatures, targets = data.drop(target_fields, axis=1), data[target_fields]\ntest_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]", "We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).", "# Hold out the last 60 days or so of the remaining data as a validation set\ntrain_features, train_targets = features[:-60*24], targets[:-60*24]\nval_features, val_targets = features[-60*24:], targets[-60*24:]", "Time to build the network\nBelow you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.\n<img src=\"assets/neural_network.png\" width=300px>\nThe network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation.\nWe use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation.\n\nHint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.\n\nBelow, you have these tasks:\n1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function.\n2. Implement the forward pass in the train method.\n3. Implement the backpropagation algorithm in the train method, including calculating the output error.\n4. Implement the forward pass in the run method.", "class NeuralNetwork(object):\n def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # Set number of nodes in input, hidden and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Initialize weights\n self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5, \n (self.input_nodes, self.hidden_nodes))\n\n self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5, \n (self.hidden_nodes, self.output_nodes))\n self.lr = learning_rate\n \n #### TODO: Set self.activation_function to your implemented sigmoid function ####\n #\n # Note: in Python, you can define a function with a lambda expression,\n # as shown below.\n #self.activation_function = lambda x : 0 # Replace 0 with your sigmoid calculation.\n \n ### If the lambda code above is not something you're familiar with,\n # You can uncomment out the following three lines and put your \n # implementation there instead.\n #\n def sigmoid(x):\n return 1. / (1. + np.exp(-x)) # Replace 0 with your sigmoid calculation here\n self.activation_function = sigmoid\n \n \n def train(self, features, targets):\n ''' Train the network on batch of features and targets. \n \n Arguments\n ---------\n \n features: 2D array, each row is one data record, each column is a feature\n targets: 1D array of target values\n \n '''\n n_records = features.shape[0]\n delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)\n delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)\n for X, y in zip(features, targets):\n #### Implement the forward pass here ####\n ### Forward pass ###\n # TODO: Hidden layer - Replace these values with your calculations.\n hidden_inputs = np.dot(X, self.weights_input_to_hidden) # signals into hidden layer\n hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer\n\n # TODO: Output layer - Replace these values with your calculations.\n final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer\n final_outputs = final_inputs # signals from final output layer\n \n #### Implement the backward pass here ####\n ### Backward pass ###\n\n # TODO: Output error - Replace this value with your calculations.\n error = y - final_outputs # Output layer error is the difference between desired target and actual output.\n \n output_error_term = error * 1.0\n # TODO: Calculate the hidden layer's contribution to the error\n hidden_error = np.dot(output_error_term, self.weights_hidden_to_output.T)\n \n # TODO: Backpropagated error terms - Replace these values with your calculations.\n hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs)\n\n # Weight step (input to hidden)\n delta_weights_i_h += hidden_error_term * X[:, None]\n # Weight step (hidden to output)\n delta_weights_h_o += output_error_term * hidden_outputs[:, None]\n\n # TODO: Update the weights - Replace these values with your calculations.\n self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step\n self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step\n \n def run(self, features):\n ''' Run a forward pass through the network with input features \n \n Arguments\n ---------\n features: 1D array of feature values\n '''\n \n #### Implement the forward pass here ####\n # TODO: Hidden layer - replace these values with the appropriate calculations.\n hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer\n hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer\n \n # TODO: Output layer - Replace these values with the appropriate calculations.\n final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer\n final_outputs = final_inputs # signals from final output layer \n \n return final_outputs\n\ndef MSE(y, Y):\n return np.mean((y-Y)**2)", "Unit tests\nRun these unit tests to check the correctness of your network implementation. This will help you be sure your network was implemented correctly befor you starting trying to train it. These tests must all be successful to pass the project.", "import unittest\n\ninputs = np.array([[0.5, -0.2, 0.1]])\ntargets = np.array([[0.4]])\ntest_w_i_h = np.array([[0.1, -0.2],\n [0.4, 0.5],\n [-0.3, 0.2]])\ntest_w_h_o = np.array([[0.3],\n [-0.1]])\n\nclass TestMethods(unittest.TestCase):\n \n ##########\n # Unit tests for data loading\n ##########\n \n def test_data_path(self):\n # Test that file path to dataset has been unaltered\n self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')\n \n def test_data_loaded(self):\n # Test that data frame loaded\n self.assertTrue(isinstance(rides, pd.DataFrame))\n \n ##########\n # Unit tests for network functionality\n ##########\n\n def test_activation(self):\n network = NeuralNetwork(3, 2, 1, 0.5)\n # Test that the activation function is a sigmoid\n self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))\n\n def test_train(self):\n # Test that weights are updated correctly on training\n network = NeuralNetwork(3, 2, 1, 0.5)\n network.weights_input_to_hidden = test_w_i_h.copy()\n network.weights_hidden_to_output = test_w_h_o.copy()\n \n network.train(inputs, targets)\n self.assertTrue(np.allclose(network.weights_hidden_to_output, \n np.array([[ 0.37275328], \n [-0.03172939]])))\n self.assertTrue(np.allclose(network.weights_input_to_hidden,\n np.array([[ 0.10562014, -0.20185996], \n [0.39775194, 0.50074398], \n [-0.29887597, 0.19962801]])))\n\n def test_run(self):\n # Test correctness of run method\n network = NeuralNetwork(3, 2, 1, 0.5)\n network.weights_input_to_hidden = test_w_i_h.copy()\n network.weights_hidden_to_output = test_w_h_o.copy()\n\n self.assertTrue(np.allclose(network.run(inputs), 0.09998924))\n\nsuite = unittest.TestLoader().loadTestsFromModule(TestMethods())\nunittest.TextTestRunner().run(suite)", "Training the network\nHere you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.\nYou'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.\nChoose the number of iterations\nThis is the number of batches of samples from the training data we'll use to train the network. The more iterations you use, the better the model will fit the data. However, if you use too many iterations, then the model with not generalize well to other data, this is called overfitting. You want to find a number here where the network has a low training loss, and the validation loss is at a minimum. As you start overfitting, you'll see the training loss continue to decrease while the validation loss starts to increase.\nChoose the learning rate\nThis scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.\nChoose the number of hidden nodes\nThe more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose.", "# import sys\n\n### Set the hyperparameters here ###\niterations = 15000\nlearning_rate = 0.1\nhidden_nodes = 6\noutput_nodes = 1\n\nN_i = train_features.shape[1]\nnetwork = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)\n\nlosses = {'train':[], 'validation':[]}\nfor ii in range(iterations):\n # Go through a random batch of 128 records from the training data set\n batch = np.random.choice(train_features.index, size=128)\n X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']\n \n network.train(X, y)\n \n # Printing out the training progress\n train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)\n val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)\n sys.stdout.write(\"\\rProgress: {:2.1f}\".format(100 * ii/float(iterations)) \\\n + \"% ... Training loss: \" + str(train_loss)[:5] \\\n + \" ... Validation loss: \" + str(val_loss)[:5])\n sys.stdout.flush()\n \n losses['train'].append(train_loss)\n losses['validation'].append(val_loss)\n\nplt.plot(losses['train'], label='Training loss')\nplt.plot(losses['validation'], label='Validation loss')\nplt.legend()\n_ = plt.ylim()", "Check out your predictions\nHere, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.", "fig, ax = plt.subplots(figsize=(8,4))\n\nmean, std = scaled_features['cnt']\npredictions = network.run(test_features).T*std + mean\nax.plot(predictions[0], label='Prediction')\nax.plot((test_targets['cnt']*std + mean).values, label='Data')\nax.set_xlim(right=len(predictions))\nax.legend()\n\ndates = pd.to_datetime(rides.ix[test_data.index]['dteday'])\ndates = dates.apply(lambda d: d.strftime('%b %d'))\nax.set_xticks(np.arange(len(dates))[12::24])\n_ = ax.set_xticklabels(dates[12::24], rotation=45)", "OPTIONAL: Thinking about your results(this question will not be evaluated in the rubric).\nAnswer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does?\n\nNote: You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter\n\nYour answer below" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
changhoonhahn/centralMS
centralms/notebooks/notes_catalog.ipynb
mit
[ "notebook accompanying catalog.py\n\nIllustrates how to generate new subhalo accretion history catalogs", "import numpy as np \nimport catalog as Cat\n\nimport matplotlib.pyplot as plt\nfrom ChangTools.plotting import prettycolors", "generate new subhalo and central subhalo catalogs", "sig = 0.0\nsmf = 'li-march'\nnsnap0 = 20\n\nsubhist = Cat.SubhaloHistory(sigma_smhm=0., smf_source='li-march', nsnap_ancestor=20)\n\nsubhist.Build()\n\nsubhist._CheckHistory()", "generate central subhalo accretion history catalogs", "censub = Cat.PureCentralHistory(sigma_smhm=sig, smf_source=smf, nsnap_ancestor=nsnap0)\n\ncensub.Build()\n\ncensub.Downsample()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
turbomanage/training-data-analyst
courses/machine_learning/deepdive/02_generalization/labs/create_datasets.ipynb
apache-2.0
[ "<h1> Explore and create ML datasets </h1>\n\nIn this notebook, we will explore data corresponding to taxi rides in New York City to build a Machine Learning model in support of a fare-estimation tool. The idea is to suggest a likely fare to taxi riders so that they are not surprised, and so that they can protest if the charge is much higher than expected.\n<div id=\"toc\"></div>\n\nLet's start off with the Python imports that we need.", "from google.cloud import bigquery\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\nimport shutil", "<h3> Extract sample data from BigQuery </h3>\n\nThe dataset that we will use is <a href=\"https://bigquery.cloud.google.com/table/nyc-tlc:yellow.trips\">a BigQuery public dataset</a>. Click on the link, and look at the column names. Switch to the Details tab to verify that the number of records is one billion, and then switch to the Preview tab to look at a few rows.\nWrite a SQL query to pick up the following fields\n<pre>\n pickup_datetime,\n pickup_longitude, pickup_latitude, \n dropoff_longitude, dropoff_latitude,\n passenger_count,\n trip_distance,\n tolls_amount,\n fare_amount,\n total_amount\n</pre>\nfrom the dataset and explore a random subsample of the data. Sample size should be about 10,000 records. Make sure to pick a repeatable subset of the data so that if someone reruns this notebook, they will get the same results.\n<p>\n<b>Hint (highlight to see)</b>\n<pre style=\"color: white\">\nSet the query string to be:\nSELECT above_fields FROM\n `nyc-tlc.yellow.trips`\nWHERE\n ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 100000)) = 1\nThen, use the BQ library:\ntrips = bigquery.Client().query(query).execute().result().to_dataframe()\n</pre>", "# TODO: write a BigQuery query for the above fields\n# Store it into a Pandas dataframe named \"trips\" that contains about 10,000 records.\n", "<h3> Exploring data </h3>\n\nLet's explore this dataset and clean it up as necessary. We'll use the Python Seaborn package to visualize graphs and Pandas to do the slicing and filtering.", "ax = sns.regplot(x = \"trip_distance\", y = \"fare_amount\", ci = None, truncate = True, data = trips)", "Hmm ... do you see something wrong with the data that needs addressing?\nIt appears that we have a lot of invalid data that is being coded as zero distance and some fare amounts that are definitely illegitimate. Let's remove them from our analysis. We can do this by modifying the BigQuery query to keep only trips longer than zero miles and fare amounts that are at least the minimum cab fare ($2.50).\nWhat's up with the streaks at \\$45 and \\$50? Those are fixed-amount rides from JFK and La Guardia airports into anywhere in Manhattan, i.e. to be expected. Let's list the data to make sure the values look reasonable.\nLet's examine whether the toll amount is captured in the total amount.", "tollrides = trips[trips['tolls_amount'] > 0]\ntollrides[tollrides['pickup_datetime'] == '2014-05-20 23:09:00']", "Looking a few samples above, it should be clear that the total amount reflects fare amount, toll and tip somewhat arbitrarily -- this is because when customers pay cash, the tip is not known. So, we'll use the sum of fare_amount + tolls_amount as what needs to be predicted. Tips are discretionary and do not have to be included in our fare estimation tool.\nLet's also look at the distribution of values within the columns.", "trips.describe()", "Hmm ... The min, max of longitude look strange.\nFinally, let's actually look at the start and end of a few of the trips.", "def showrides(df, numlines):\n import matplotlib.pyplot as plt\n lats = []\n lons = []\n goodrows = df[df['pickup_longitude'] < -70]\n for iter, row in goodrows[:numlines].iterrows():\n lons.append(row['pickup_longitude'])\n lons.append(row['dropoff_longitude'])\n lons.append(None)\n lats.append(row['pickup_latitude'])\n lats.append(row['dropoff_latitude'])\n lats.append(None)\n\n sns.set_style(\"darkgrid\")\n plt.plot(lons, lats)\n\nshowrides(trips, 10)\n\nshowrides(tollrides, 10)", "As you'd expect, rides that involve a toll are longer than the typical ride.\n<h3> Quality control and other preprocessing </h3>\n\nWe need to do some clean-up of the data:\n<ol>\n<li>New York city longitudes are around -74 and latitudes are around 41.</li>\n<li>We shouldn't have zero passengers.</li>\n<li>Clean up the total_amount column to reflect only fare_amount and tolls_amount, and then remove those two columns.</li>\n<li>Before the ride starts, we'll know the pickup and dropoff locations, but not the trip distance (that depends on the route taken), so remove it from the ML dataset</li>\n<li>Discard the timestamp</li>\n</ol>\n\nLet's change the BigQuery query appropriately. In production, we'll have to carry out the same preprocessing on the real-time input data.", "def sample_between(a, b):\n basequery = \"\"\"\nSELECT\n (tolls_amount + fare_amount) AS fare_amount,\n pickup_longitude AS pickuplon,\n pickup_latitude AS pickuplat,\n dropoff_longitude AS dropofflon,\n dropoff_latitude AS dropofflat,\n passenger_count*1.0 AS passengers\nFROM\n `nyc-tlc.yellow.trips`\nWHERE\n trip_distance > 0\n AND fare_amount >= 2.5\n AND pickup_longitude > -78\n AND pickup_longitude < -70\n AND dropoff_longitude > -78\n AND dropoff_longitude < -70\n AND pickup_latitude > 37\n AND pickup_latitude < 45\n AND dropoff_latitude > 37\n AND dropoff_latitude < 45\n AND passenger_count > 0\n \"\"\"\n sampler = \"AND ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), EVERY_N)) = 1\"\n sampler2 = \"AND {0} >= {1}\\n AND {0} < {2}\".format(\n \"ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), EVERY_N * 100))\",\n \"(EVERY_N * {})\".format(a), \"(EVERY_N * {})\".format(b)\n )\n return \"{}\\n{}\\n{}\".format(basequery, sampler, sampler2)\n\ndef create_query(phase, EVERY_N):\n \"\"\"Phase: train (70%) valid (15%) or test (15%)\"\"\"\n query = \"\"\n if phase == 'train':\n # Training\n query = sample_between(0, 70)\n elif phase == 'valid':\n # Validation\n query = sample_between(70, 85)\n else:\n # Test\n query = sample_between(85, 100)\n return query.replace(\"EVERY_N\", str(EVERY_N))\n\n\n# TODO: try out train, test and valid here\nprint (create_query('train', 100000))\n\ndef to_csv(df, filename):\n outdf = df.copy(deep = False)\n outdf.loc[:, 'key'] = np.arange(0, len(outdf)) # rownumber as key\n # Reorder columns so that target is first column\n cols = outdf.columns.tolist()\n cols.remove('fare_amount')\n cols.insert(0, 'fare_amount')\n print (cols) # new order of columns\n outdf = outdf[cols]\n outdf.to_csv(filename, header = False, index_label = False, index = False)\n print (\"Wrote {} to {}\".format(len(outdf), filename))\n\nfor phase in ['train', 'valid', 'test']:\n query = create_query(phase, 100000)\n df = bigquery.Client().query(query).to_dataframe()\n to_csv(df, 'taxi-{}.csv'.format(phase))", "<h3> Verify that datasets exist </h3>", "!ls -l *.csv", "We have 3 .csv files corresponding to train, valid, test. The ratio of file-sizes correspond to our split of the data.", "!head taxi-train.csv", "Looks good! We now have our ML datasets and are ready to train ML models, validate them and evaluate them.\n<h3> Benchmark </h3>\n\nBefore we start building complex ML models, it is a good idea to come up with a very simple model and use that as a benchmark.\nMy model is going to be to simply divide the mean fare_amount by the mean trip_distance to come up with a rate and use that to predict. Let's compute the RMSE of such a model.", "from google.cloud import bigquery\nimport pandas as pd\nimport numpy as np\nimport shutil\n\ndef distance_between(lat1, lon1, lat2, lon2):\n # Haversine formula to compute distance \"as the crow flies\". Taxis can't fly of course.\n dist = np.degrees(np.arccos(np.sin(np.radians(lat1)) * np.sin(np.radians(lat2)) + np.cos(np.radians(lat1)) * np.cos(np.radians(lat2)) * np.cos(np.radians(lon2 - lon1)))) * 60 * 1.515 * 1.609344\n return dist\n\ndef estimate_distance(df):\n return distance_between(df['pickuplat'], df['pickuplon'], df['dropofflat'], df['dropofflon'])\n\ndef compute_rmse(actual, predicted):\n return np.sqrt(np.mean((actual - predicted)**2))\n\ndef print_rmse(df, rate, name):\n print (\"{1} RMSE = {0}\".format(compute_rmse(df['fare_amount'], rate * estimate_distance(df)), name))\n\nFEATURES = ['pickuplon','pickuplat','dropofflon','dropofflat','passengers']\nTARGET = 'fare_amount'\ncolumns = list([TARGET])\ncolumns.extend(FEATURES) # in CSV, target is the first column, after the features\ncolumns.append('key')\ndf_train = pd.read_csv('taxi-train.csv', header = None, names = columns)\ndf_valid = pd.read_csv('taxi-valid.csv', header = None, names = columns)\ndf_test = pd.read_csv('taxi-test.csv', header = None, names = columns)\nrate = df_train['fare_amount'].mean() / estimate_distance(df_train).mean()\nprint (\"Rate = ${0}/km\".format(rate))\nprint_rmse(df_train, rate, 'Train')\nprint_rmse(df_valid, rate, 'Valid') \nprint_rmse(df_test, rate, 'Test') ", "The simple distance-based rule gives us a RMSE of <b>$9.35</b> on the validation dataset. We have to beat this, of course, but you will find that simple rules of thumb like this can be surprisingly difficult to beat. You don't wnat to set a goal on the test dataset because you want to change the architecture of the network etc. to get the best validation error. Then, you can evaluate ONCE on the test data.\nChallenge Exercise\nLet's say that you want to predict whether a Stackoverflow question will be acceptably answered. Using this public dataset of questions, create a machine learning dataset that you can use for classification.\n<p>\nWhat is a reasonable benchmark for this problem?\nWhat features might be useful?\n<p>\nIf you got the above easily, try this harder problem: you want to predict whether a question will be acceptably answered within 2 days. How would you create the dataset?\n<p>\nHint (highlight to see):\n<p style='color:white' linkstyle='color:white'> \nYou will need to do a SQL join with the table of [answers]( https://bigquery.cloud.google.com/table/bigquery-public-data:stackoverflow.posts_answers) to determine whether the answer was within 2 days.\n</p>\n\nCopyright 2018 Google Inc.\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ProfessorKazarinoff/staticsite
content/code/error_bars/bar_chart_with_matplotlib.ipynb
gpl-3.0
[ "Building bar charts is a useful skill for engineers.\nImport matplotlib and numpy", "import matplotlib.pyplot as plt\nimport numpy as np\n\n#if using a jupyter notebook\n%matplotlib inline ", "Coefficient of thermal expansion of three metals (units: / &#176;C)\nSample | Aluminum | Copper | Steel \n ----- | ------------- | ------------- | ------------- \n 1 | 6.4e-5 | 4.5e-5 | 3.3e-5\n 2 | 3.01e-5 | 1.97e-5 | 1.21e-5\n 3 | 2.36e-5 | 1.6e-5 | 0.9e-5\n 4 | 3.0e-5 | 1.97e-5 | 1.2e-5\n 5 | 7.0e-5 | 4.0e-5 | 1.3e-5\n 6 | 4.5e-5 | 2.4e-5 | 1.6e-5\n 7 | 3.8e-5 | 1.9e-5 | 1.4e-5 \n 8 | 4.2e-5 | 2.41e-5 | 1.58e-5\n 9 | 2.62e-5 | 1.85e-5 | 1.32e-5\n 10 | 3.6e-5 | 3.3e-5 | 2.1e-5 \nWe'll put this data into three different numpy arrays, one array for each metal. Notice the syntax np.array([ ... ]) has a parenthesis ( followed by a square bracket [. We are passing a Python list, [ denoted with square brackets ] into a the numpy array function (argument enclosed in parenthesis).", "# Enter in the raw data\naluminum = np.array([6.4e-5 , 3.01e-5 , 2.36e-5, 3.0e-5, 7.0e-5, 4.5e-5, 3.8e-5, 4.2e-5, 2.62e-5, 3.6e-5])\ncopper = np.array([4.5e-5 , 1.97e-5 , 1.6e-5, 1.97e-5, 4.0e-5, 2.4e-5, 1.9e-5, 2.41e-5 , 1.85e-5, 3.3e-5 ])\nsteel = np.array([3.3e-5 , 1.2e-5 , 0.9e-5, 1.2e-5, 1.3e-5, 1.6e-5, 1.4e-5, 1.58e-5, 1.32e-5 , 2.1e-5])\n\n# Calculate the average\nAluminum_mean = np.mean(aluminum)\nCopper_mean = np.mean(copper)\nSteel_mean = np.mean(steel)\nCTEs =np.array([Aluminum_mean, Copper_mean, Steel_mean])\nx_pos =['Al','Cu','Steel']", "Now it's time to build the plot. We are going to build a bar chart with three different bars, one bar for each material: Aluminum, Copper and Steel. \nFirst we will create a figure object called fig and an axis object in that figure called ax using matplotlib's plt.subplots() function. Everything in our plot will be added to the ax (axis) object. Next we put a bar chart on our ax (axis) with the ax.bar() method. Note the arguments that go into this method: (x_pos, CTEs, yerr=error). x_pos is the array with the count of the number of bars. CTEs is our array which contains the means or heights of the bars. yerr=error sets the heights of the error bars and the standard deviations. The subsequent arguments (align='center', alpha=0.5, ecolor='black', capsize=10) styles the plot.\nWe'll put a label on the y-axis with the title \"Coefficient of thermal expansion (&#176;C<sup>-1</sup>)\" using ax.set_ylabel. We use ax.set_xticks() to feed in our number array to set the bars as numbers 1, 2, 3. Then we add labels to these numbered bars with ax.set_ticklabels(). ax.set_title() and ax.yaxis.grid(True) adds a title and horizontal grid lines.\nFinally, we we'll save the figure to a file called bar_plot_with_error_bars.png using matplotlib's plt.savefig() function. The plt.thight_layout() line ensures that the labels for our bars and axis don't get cut off and are visible.", "# Build the plot\nfig, ax = plt.subplots()\nax.bar(x_pos, CTEs, align='center', alpha=0.5)\nax.set_ylabel('Coefficient of Thermal Expansion ($\\degree C^{-1}$)')\n\nax.set_xticklabels(materials)\nax.set_title('Coefficent of Thermal Expansion (CTE) of Three Metals')\nax.yaxis.grid(True)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
sorig/shogun
doc/ipython-notebooks/ica/bss_audio.ipynb
bsd-3-clause
[ "Blind Source Separation with the Shogun Machine Learning Toolbox\nBy Kevin Hughes\nThis notebook illustrates <a href=\"http://en.wikipedia.org/wiki/Blind_signal_separation\">Blind Source Seperation</a>(BSS) on audio signals using <a href=\"http://en.wikipedia.org/wiki/Independent_component_analysis\">Independent Component Analysis</a> (ICA) in Shogun. We generate a mixed signal and try to seperate it out using Shogun's implementation of ICA & BSS called <a href=\"http://www.shogun-toolbox.org/doc/en/3.0.0/classshogun_1_1CJade.html\">JADE</a>.\nMy favorite example of this problem is known as the cocktail party problem where a number of people are talking simultaneously and we want to separate each persons speech so we can listen to it separately. Now the caveat with this type of approach is that we need as many mixtures as we have source signals or in terms of the cocktail party problem we need as many microphones as people talking in the room.\nLet's get started, this example is going to be in python and the first thing we are going to need to do is load some audio files. To make things a bit easier further on in this example I'm going to wrap the basic scipy wav file reader and add some additional functionality. First I added a case to handle converting stereo wav files back into mono wav files and secondly this loader takes a desired sample rate and resamples the input to match. This is important because when we mix the two audio signals they need to have the same sample rate.", "import numpy as np\nimport os\nSHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')\nfrom scipy.io import wavfile\nfrom scipy.signal import resample\n\ndef load_wav(filename,samplerate=44100):\n \n # load file\n rate, data = wavfile.read(filename)\n\n # convert stereo to mono\n if len(data.shape) > 1:\n data = data[:,0]/2 + data[:,1]/2\n\n # re-interpolate samplerate \n ratio = float(samplerate) / float(rate)\n data = resample(data, int(len(data) * ratio))\n \n return samplerate, data.astype(np.int16)", "Next we're going to need a way to play the audio files we're working with (otherwise this wouldn't be very exciting at all would it?). In the next bit of code I've defined a wavPlayer class that takes the signal and the sample rate and then creates a nice HTML5 webplayer right inline with the notebook.", "from IPython.display import Audio\nfrom IPython.display import display\ndef wavPlayer(data, rate):\n display(Audio(data, rate=rate))", "Now that we can load and play wav files we actually need some wav files! I found the sounds from Starcraft to be a great source of wav files because they're short, interesting and remind me of my childhood. You can download Starcraft wav files here: http://wavs.unclebubby.com/computer/starcraft/ among other places on the web or from your Starcraft install directory (come on I know its still there).\nAnother good source of data (although lets be honest less cool) is ICA central and various other more academic data sets: http://perso.telecom-paristech.fr/~cardoso/icacentral/base_multi.html. Note that for lots of these data sets the data will be mixed already so you'll be able to skip the next few steps.\nOkay lets load up an audio file. I chose the Terran Battlecruiser saying \"Good Day Commander\". In addition to the creating a wavPlayer I also plotted the data using Matplotlib (and tried my best to have the graph length match the HTML player length). Have a listen!", "# change to the shogun-data directory\nimport os\nos.chdir(os.path.join(SHOGUN_DATA_DIR, 'ica'))\n\n%matplotlib inline\nimport pylab as pl\n\n# load\nfs1,s1 = load_wav('tbawht02.wav') # Terran Battlecruiser - \"Good day, commander.\"\n\n# plot\npl.figure(figsize=(6.75,2))\npl.plot(s1)\npl.title('Signal 1')\npl.show()\n\n# player\nwavPlayer(s1, fs1)", "Now let's load a second audio clip:", "# load\nfs2,s2 = load_wav('TMaRdy00.wav') # Terran Marine - \"You want a piece of me, boy?\"\n\n# plot\npl.figure(figsize=(6.75,2))\npl.plot(s2)\npl.title('Signal 2')\npl.show()\n\n# player\nwavPlayer(s2, fs2)", "and a third audio clip:", "# load\nfs3,s3 = load_wav('PZeRdy00.wav') # Protoss Zealot - \"My life for Aiur!\"\n\n# plot\npl.figure(figsize=(6.75,2))\npl.plot(s3)\npl.title('Signal 3')\npl.show()\n\n# player\nwavPlayer(s3, fs3)", "Now we've got our audio files loaded up into our example program. The next thing we need to do is mix them together!\nFirst another nuance - what if the audio clips aren't the same lenth? The solution I came up with for this was to simply resize them all to the length of the longest signal, the extra length will just be filled with zeros so it won't affect the sound.\nThe signals are mixed by creating a mixing matrix $A$ and taking the dot product of $A$ with the signals $S$.\nAfterwards I plot the mixed signals and create the wavPlayers, have a listen!", "# Adjust for different clip lengths\nfs = fs1\nlength = max([len(s1), len(s2), len(s3)])\ns1 = np.resize(s1, (length,1))\ns2 = np.resize(s2, (length,1))\ns3 = np.resize(s3, (length,1))\n\nS = (np.c_[s1, s2, s3]).T\n\n# Mixing Matrix\n#A = np.random.uniform(size=(3,3))\n#A = A / A.sum(axis=0)\nA = np.array([[1, 0.5, 0.5],\n [0.5, 1, 0.5], \n [0.5, 0.5, 1]]) \nprint('Mixing Matrix:')\nprint(A.round(2))\n\n# Mix Signals\nX = np.dot(A,S)\n\n# Mixed Signal i\nfor i in range(X.shape[0]):\n pl.figure(figsize=(6.75,2))\n pl.plot((X[i]).astype(np.int16))\n pl.title('Mixed Signal %d' % (i+1))\n pl.show()\n wavPlayer((X[i]).astype(np.int16), fs)", "Now before we can work on separating these signals we need to get the data ready for Shogun, thankfully this is pretty easy!", "from shogun import features\n\n# Convert to features for shogun\nmixed_signals = features((X).astype(np.float64))", "Now lets unmix those signals!\nIn this example I'm going to use an Independent Component Analysis (ICA) algorithm called JADE. JADE is one of the ICA algorithms available in Shogun and it works by performing Aproximate Joint Diagonalization (AJD) on a 4th order cumulant tensor. I'm not going to go into a lot of detail on how JADE works behind the scenes but here is the reference for the original paper:\nCardoso, J. F., & Souloumiac, A. (1993). Blind beamforming for non-Gaussian signals. In IEE Proceedings F (Radar and Signal Processing) (Vol. 140, No. 6, pp. 362-370). IET Digital Library.\nShogun also has several other ICA algorithms including the Second Order Blind Identification (SOBI) algorithm, FFSep, JediSep, UWedgeSep and FastICA. All of the algorithms inherit from the ICAConverter base class and share some common methods for setting an intial guess for the mixing matrix, retrieving the final mixing matrix and getting/setting the number of iterations to run and the desired convergence tolerance. Some of the algorithms have additional getters for intermediate calculations, for example Jade has a method for returning the 4th order cumulant tensor while the \"Sep\" algorithms have a getter for the time lagged covariance matrices. Check out the source code on GitHub (https://github.com/shogun-toolbox/shogun) or the Shogun docs (http://www.shogun-toolbox.org/doc/en/latest/annotated.html) for more details!", "from shogun import Jade\n\n# Separating with JADE\njade = Jade()\nsignals = jade.apply(mixed_signals)\n\nS_ = signals.get_real_matrix('feature_matrix')\n\nA_ = jade.get_real_matrix('mixing_matrix')\nA_ = A_ / A_.sum(axis=0)\nprint('Estimated Mixing Matrix:')\nprint(A_)", "Thats all there is to it! Check out how nicely those signals have been separated and have a listen!", "# Show separation results\n\n# Separated Signal i\ngain = 4000\nfor i in range(S_.shape[0]):\n pl.figure(figsize=(6.75,2))\n pl.plot((gain*S_[i]).astype(np.int16))\n pl.title('Separated Signal %d' % (i+1))\n pl.show()\n wavPlayer((gain*S_[i]).astype(np.int16), fs)", "BSS isn't only useful for working with Audio, it is also useful for image processing and pre-processing other forms of high dimensional data. Have a google for ICA and machine learning if you want to learn more!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
akokai/chemviewing
notebooks/chemview-test.ipynb
unlicense
[ "import requests\nimport json\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame", "US EPA ChemView web services\nThe documentation lists several ways of accessing data in ChemView.", "URIBASE = 'http://java.epa.gov/chemview/'", "Getting 'chemicals' data from ChemView\nAs a start... this downloads data for all chemicals. Let's see what we get.", "uri = URIBASE + 'chemicals'\nr = requests.get(uri, headers = {'Accept': 'application/json, */*'})\nj = json.loads(r.text)\n\nprint(len(j))\n\ndf = DataFrame(j)\ndf.tail()\n\n# Save this dataset so that I don't have to re-request it again later. \ndf.to_pickle('../data/chemicals.pickle')\n\ndf = pd.read_pickle('../data/chemicals.pickle')", "Data wrangling", "# want to interpret 'None' as NaN\ndef scrub_None(x):\n s = str(x).strip()\n if s == 'None' or s == '':\n return np.nan\n else:\n return s\n\nfor c in list(df.columns)[:-1]:\n df[c] = df[c].apply(scrub_None)\n\ndf.tail()", "How many unique CASRNs, PMN numbers?", "# CASRNS\nlen(df.casNo.value_counts())\n\n# PMN numbers\nlen(df.pmnNo.value_counts())", "What's in 'synonyms'?", "DataFrame(df.loc[4,'synonyms'])", "How many 'synonyms' for each entry?", "df.synonyms.apply(len).describe()", "Do the data objects in synonyms all have the same attributes?", "def getfields(x):\n k = set()\n for d in x:\n j = set(d.keys())\n k = k | j\n return ','.join(sorted(k))\n\ndf.synonyms.apply(getfields).head()\n\nlen(df.synonyms.apply(getfields).value_counts())", "All of the synonyms fields contain a variable number of objects with a uniform set of fields.\nTell me more about those items with PMN numbers...", "pmns = df.loc[df.pmnNo.notnull()]\npmns.head()", "Are there any that have CASRN too? ... No.", "len(pmns.casNo.dropna())" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
eyaltrabelsi/my-notebooks
Lectures/data_quality/Great Expectations.ipynb
mit
[ "Why Data Quality\n\nData driven oraganizations/products\nWrong desicion / Bad product\nIts all about trust\n\nData Quality Effect\n\nIs the data correct ?\nIf so to what time ?\n\n\n\n\ngreat to find violations as early as possible\n\n\nproductivity and integrity are the same thing data stuff\n\n\n\n\n\nTrying to maintain data systems that are untested, undocumented and unstable is nearly impossible\nData Quality characteristics\n\nAccuracy: for whatever data described, it needs to be accurate.\nRelevancy: the data should meet the requirements for the intended use.\nCompleteness: the data should not have missing values or miss data records.\nTimeliness: the data should be up to date.\nConsistency:the data should have the data format as expected and can be cross reference-able with the same results.\n\nData Quality tools\n\nGreat Expectations\nDeequ\nTensorflow Data Validation\n\nGreat Expectations Native", "import pandas as pd\nimport great_expectations as ge\nimport great_expectations.jupyter_ux\nimport json\n\nfrom datetime import datetime\nfrom sklearn.model_selection import train_test_split\n\ndf = ge.read_csv(\"data/titanic.csv\")\ntrain, test = train_test_split(df, test_size=0.3)", "expectations are assertions about data", "train.expect_column_values_to_be_between(\"Age\", 0,80)\ntrain.expect_column_values_to_be_in_set('Survived', [1, 0])\ntrain.expect_column_mean_to_be_between(\"Age\", 20,40)\ntrain.expect_column_values_to_match_regex('Name', '[A-Z][a-z]+(?: \\([A-Z][a-z]+\\))?, ', mostly=.95)\ntrain.expect_column_values_to_be_in_set(\"Sex\", [\"male\", \"female\"])\n\nresults = train.validate() \nif train.validate()[\"SUCCESS\"]:\n ...\nresults\n\nmy_expectations = train.get_expectation_suite()\ntest.validate(expectation_suite=my_expectations)", "Great Expectations Validation in Your pipeline", "# ! great_expectations init\n\ncontext = ge.data_context.DataContext()\ncontext.list_expectation_suite_names()\n\nexpectation_suite_name = \" \"\nbatch_kwargs = {'path': \"https://github.com/plotly/datasets/raw/master/titanic.csv\",\n 'datasource': \"titanic\"}\nbatch = context.get_batch(batch_kwargs, my_expectations)\nbatch.head()\n\nrun_id = {\"run_name\": \"First validation\", \"run_time\": datetime.now()}\nresults = context.run_validation_operator(\"action_list_operator\", \n assets_to_validate=[batch], \n run_id=run_id)\nresults", "Test as data documentations\nyour docs are your tests and your tests are your docs", "context.build_data_docs()\ncontext.open_data_docs()", "Addtional Resources\n\nGreat expectations 101\nGreat expectations 201\nGreat expectations 301" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ryan-leung/PHYS4650_Python_Tutorial
notebooks/02-Python-Data-Structures.ipynb
bsd-3-clause
[ "Python Data Structures\nData structure in computing\nData structures are how computer programs store information. Theses information can be processed, analyzed \nand visualized easily from the programme. Scientific data can be large and complex and may require data structures appropriate for scientific programming. In Astronomy, the fits file is one of the most widely used data-storing medium, it can store a lot of information including the coordinates, the precious time, a very large cataelog table, multi-dimension data cube, etc.. These data, when it is opened by the programme, shall be recognised and easily managed by the programme.\nIn Python, there are pre-defined advanced data structure depending on the kind of data you wish to store. \nYou will have to choose data structures that best meet your requirements for the problem you are trying to solve. In this section, I will go through specifically examine three Python data structures: datetime, lists, tuples, sets, and dictionaries.\n<a href=\"https://colab.research.google.com/github/ryan-leung/PHYS4650_Python_Tutorial/blob/master/notebooks/02-Python-Data-Structures.ipynb\"><img align=\"right\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open in Colab\" title=\"Open and Execute in Google Colaboratory\">\n</a>\nBuilt-In Types\nPython's simple types are summarized in the following table:\n| Type | Example | Description |\n|-------------|----------------|--------------------------------------------------------------|\n| int | x = 1 | integers (i.e., whole numbers) |\n| float | x = 1.0 | floating-point numbers (i.e., real numbers) |\n| complex | x = 1 + 2j | Complex numbers (i.e., numbers with real and imaginary part) |\n| bool | x = True | Boolean: True/False values |\n| str | x = 'abc' | String: characters or text |\n| NoneType| x = None | Special object indicating nulls |\nWe'll take a quick look at each of these in turn.", "a = 1 # integer\nb = 1.1 #floating point numbers\nc = True; d = False # Boolean (logical expression)\ne = \"Hello\" # Strings", "Arithmetic Operations\nPython implements seven basic binary arithmetic operators, two of which can double as unary operators.\nThey are summarized in the following table:\n| Operator | Name | Description |\n|--------------|----------------|--------------------------------------------------------|\n| a + b | Addition | Sum of a and b |\n| a - b | Subtraction | Difference of a and b |\n| a * b | Multiplication | Product of a and b |\n| a / b | True division | Quotient of a and b |\n| a // b | Floor division | Quotient of a and b, removing fractional parts |\n| a % b | Modulus | Integer remainder after division of a by b |\n| a ** b | Exponentiation | a raised to the power of b |\n| -a | Negation | The negative of a |\n| +a | Unary plus | a unchanged (rarely used) |\nThese operators can be used and combined in intuitive ways, using standard parentheses to group operations.\nFor example:", "# addition, subtraction, multiplication\n(4 + 8) * (6.5 - 3)", "Strings in Python 2 and 3\n```Python\nPython 2\nprint type(\"Hello World!\")\n<type 'str'> \nthis is a byte string\nprint type(u\"Hello World!\")\n<type 'unicode'>\nthis is a Unicode string\n```\n```Python\nPython 3\nprint(type(\"Hello World!\"))\n<class 'str'>\nthis is a Unicode string\nprint(type(b\"Hello World!\"))\n<class 'bytes'>\nthis is a byte string\n```\nBuilt-In Data Structures\n| Type Name | Example | Add Element | Get Element | Set Element | Description |\n|-----------|---------------------------|--------------------------------------------|-------------|-------------|---------------------------------------|\n| list | [1, 2, 3] | x.append(1) | x[0] | x[0]=2 | Ordered collection |\n| tuple | (1, 2, 3) | no altering | x[0] | no altering | Immutable ordered collection |\n| dict | {'a':1, 'b':2, 'c':3} | x['new_key'] = 4 or x.update({'new_key'=4} | x['a'] | x['a']=2 | Unordered (key,value) mapping |\n| set | {1, 2, 3} | x.add(4) | no indexing | no indexing | Unordered collection of unique values |\nlist\nA Python list is a sequence of values (elements) that are usually the same kind of item. They are in order and mutable. Mutable means they can be changed after they are created, of course, this implies you can exchange the order of the elements inside it. This is a Python list of prime numbers smaller than 50:", "x = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47]", "Definition\nIt is defined with parentheses : [xx,xx,xx]. \nGet Element\nThe elements are called using a square bracket with an index starting from zero : x[y], 0..N. \nSlice (sub-array)\nYou can slice the array using colon, in this case a[start:end] means items start up to end-1.", "print(x)\nprint(x[0])", "A single colon a[:] means a copy of the whole array.\na[start:] return tuple of items start through the rest of the array.\na[:end]return tuple of items from the beginning through end-1.", "print(x[1:2])\nprint(x[:])\nprint(x[:2])\nprint(x[1:])", "more interestingly, they have negative index\na[-1] means last item in the array\na[-2:] means last two items in the array\na[:-2] means everything except the last two items", "print(x[-1])\nprint(x[-2])\nprint(x[-2:])\nprint(x[:-2])", "You may reversed a list with xxx[::-1].", "print(x[::-1])", "Concatenate\nYou may add up two list or we say concatenate, and multiply to duplicate the items.", "print(x + [0,1])\nprint([0,1] + x)\nprint([0,1] * 5)", "Sorting\nYou may sort a list with sorted(x). Noted that it returns a new list.", "print(x[::-1])\ny = sorted(x[::-1])\nprint(y)", "Add element (append); Remove element (pop); Insert element (insert)\nThese functions are modified in-place, i.e. the original list will be changed", "print(x)\nx.append('A')\nprint(x)\n\nprint(x)\nx.insert(5,'B') # insert 'B' between x[4] and x[5], results in x[5] = 'B'\nprint(x)\n\nprint(x); \nx.pop(5); # Removed the x[5] item and return it\nprint(x); \nx.pop(-1); # Removed the last item and return it\nprint(x)", "Tuple\nA Python tuple is similar to a list. The elements are in order but fixed once they are created. In other words, they are immutable. The tuple can store differently type of elements. \nDefinition\nIt is defined with parentheses : (xx,xx,xx). \nGet Element\nThe elements are called using a square bracket with an index starting from zero : x[y], 0..N. \nSlice (sub-array)\nYou can slice the array using colon, in this case a[start:end] means items start up to end-1.", "corr = (22.28552, 114.15769)\nprint(corr)\n\ncorr[0] = 10", "Dictionary\nDictionary is more flexible than list and its index is a string, it is defined with curly bracket: \ndata = {'k1' : y1 , 'k2' : y2 , 'k3' : y3 }\nk1, k2, k3 are called keys while y1,y2 and y3 are elements.\nCreating an empty dictionary\nIt is defined with a pair of curly bracket or the dict() fuction: data = {} or data = dict()\nCreating a dictionary with initial values\n\nIt could be defined with a curly bracket with index:element pairs : data = {'k1' : y1 , 'k2' : y2 , 'k3' : y3 }. \nIt could also be defined with the dict() function : data = dict(k1=y1, k2=y2, k3=y3).\nIt could also be defined with tuples : data = {k: v for k, v in (('k1', y1),('k2',y2),('k3',y3))}.\n\nGet Element\nThe elements are called using a square bracket with an index string : data[key]. \nInserting/Updating a single value / multiple values\n\n\ndata['k1']=1 # Updates if 'k1' exists, else adds the element with index 'k1'\n\n\ndata.update({'k1':1})\n\n\ndata.update(dict(k1=1))\n\n\ndata.update(k1=1)\n\n\nMultiple values : data.update({'k3':3,'k4':4}) # Updates 'k3' and adds 'k4'\n\n\nMerged dictionary without modifying originals\n\ndata3 = {}\ndata3.update(data) # Modifies data3, not data\ndata3.update(data2) # Modifies data3, not data2\n\nDelete an item\n\ndel data[key] # Removes specific element in a dictionary\ndata.pop(key) # Removes the key & returns the value\ndata.clear() # Clears entire dictionary\n\nCheck if a key is existed\n\nkey in data # Return a boolean\n\nIterate through pairs\n\nfor key in data: # Iterates just through the keys, ignoring the values\nfor key, value in d.items(): # Iterates through the pairs", "# Creating an empty dictionary\nlocation = {}\nprint(location)\n\n# Defined with a curly bracket\nlocation = {\n 'Berlin': (52.5170365, 13.3888599),\n 'London': (51.5073219, -0.1276474),\n 'Sydney': (-33.8548157, 151.2164539),\n 'Tokyo': (34.2255804, 139.294774527387),\n 'Paris': (48.8566101, 2.3514992),\n 'Moscow': (46.7323875, -117.0001651)\n }\nprint(location)\n\n# Update\nlocation.update({'Hong Kong': (22.2793278, 114.1628131)})\nprint(location)\n\n# Call element\nlocation['Tokyo']\n\n# Delete element\ndel location['Hong Kong']\nlocation\n\nfor key, value in location.items():\n print(key, value)", "Extra reading:", "### More on slicing in list and tuple\nstart=2\nend=5\nstep=2\n\nprint(\"Original:\", x)\nprint(\"items start through end-1 :\", x[start:end]) # items start through end-1\nprint(\"items start through the rest of the array :\", x[start:]) # items start through the rest of the array\nprint(\"items from the beginning through end-1 :\", x[:end]) # items from the beginning through end-1\nprint(\"whole array :\", x[:]) # whole array\nprint(\"last item in the array :\", x[-1]) # last item in the array\nprint(\"last two items in the array :\", x[-2:]) # last two items in the array\nprint(\"everything except the last two items :\", x[:-2]) # everything except the last two items\n\nprint(\"start through not past end, by step\", x[start:end:step]) # start through no01-Python-Syntaxt past end, by step", "Bitwise Operations\nIn addition to the standard numerical operations, Python includes operators to perform bitwise logical operations on integers.\nThese are much less commonly used than the standard arithmetic operations, but it's useful to know that they exist.\nThe six bitwise operators are summarized in the following table:\n| Operator | Name | Description |\n|--------------|-----------------|---------------------------------------------|\n| a &amp; b | Bitwise AND | Bits defined in both a and b |\n| <code>a &#124; b</code>| Bitwise OR | Bits defined in a or b or both |\n| a ^ b | Bitwise XOR | Bits defined in a or b but not both |\n| a &lt;&lt; b | Bit shift left | Shift bits of a left by b units |\n| a &gt;&gt; b | Bit shift right | Shift bits of a right by b units |\n| ~a | Bitwise NOT | Bitwise negation of a |\nSummary\nThese operations shows Python are so easy to use compared to lower-level languages such as C.\nIn C, we need to manually constructing a loop over the list and checking for equality of each value.\nIn Python, you just type what you want to know, easy to type but hard to debug, just like English grammar." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
Z2PackDev/TBmodels
examples/kwant_interface/kwant_interface_demo.ipynb
apache-2.0
[ "Interface from TBmodels to kwant\nPrerequisites:\n* TBmodels\n* kwant", "import kwant\nimport tbmodels\n\nimport numpy as np\nimport scipy.linalg as la\nimport matplotlib.pyplot as plt\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'", "Bulk Hamiltonian with wraparound\nIn this first example, we compare the bulk Hamiltonian from TBmodels with that of the model in kwant, using wraparound.", "model = tbmodels.Model.from_wannier_files(hr_file='data/wannier90_hr.dat')", "First we need to create the lattice from the tight-binding model and define the translation symmetries.", "lattice = model.to_kwant_lattice()\n\nsym = kwant.TranslationalSymmetry(\n lattice.vec((1, 0, 0)),\n lattice.vec((0, 1, 0)),\n lattice.vec((0, 0, 1))\n)", "Now we define a Builder with these symmetries", "kwant_sys = kwant.Builder(sym)", "We give the system an \"infinite\" shape. This needs to be done before adding the hoppings, because on-site energies and hoppings are added only to existing sites.", "kwant_sys[lattice.shape(lambda p: True, (0, 0, 0))] = 0", "Now we can add the hoppings. This modifies the model in-place.", "model.add_hoppings_kwant(kwant_sys)", "Finally, use wraparound to finalize the bulk system:", "kwant_model = kwant.wraparound.wraparound(kwant_sys).finalized()", "To see that the two models are the same, we plot the bands along some line. Note that the periodicity of the k-vector is $1$ in TBmodels, but $2\\pi$ in kwant. The k-vector needs to be scaled accordingly.", "k_list = [(kx, 0, 0) for kx in np.linspace(0, 1, 100)]\nx = range(100)\n\neigs_tbmodels = [model.eigenval(k) for k in k_list]\neigs_kwant = [la.eigvalsh(\n kwant_model.hamiltonian_submatrix(\n params={key: val for key, val in zip(['k_x', 'k_y', 'k_z'], 2 * np.pi * np.array(k))}\n )\n) for k in k_list]", "Numerical and visual test for equivalence:", "np.isclose(eigs_tbmodels, eigs_kwant).all()\n\nfig, ax = plt.subplots()\nfor band in np.array(eigs_tbmodels).T:\n ax.plot(x, band, 'k')\nfor band in np.array(eigs_kwant).T:\n ax.plot(x, band, 'b')", "Finite wire with leads\nIn the second example, we build a finite wire and attach two leads on either side.\nSince the finite wire doesn't have translation symmetry, we can just create a bare Builder.", "wire = kwant.Builder()", "Now we define a shape for the wire - for simplicity we use a square.", "def shape(p):\n x, y, z = p\n return -20 < x < 20 and -5 < y < 5 and -5 < z < 5", "Again, we explicitly create the lattice sites before populating the hoppings.", "wire[lattice.shape(shape, (0, 0, 0))] = 0\nmodel.add_hoppings_kwant(wire)\n\nkwant.plot(wire);", "Now we create and attach two leads on either side. The lead must be long enough s.t. the most long-range hopping stays within the lead.", "sym_lead = kwant.TranslationalSymmetry(lattice.vec((-5, 0, 0)))\nlead = kwant.Builder(sym_lead)\ndef lead_shape(p):\n x, y, z = p\n return -5 <= x <= 0 and -5 < y < 5 and -5 < z < 5\nlead[lattice.shape(lead_shape, (0, 0, 0))] = 0\nmodel.add_hoppings_kwant(lead)\n\nwire.attach_lead(lead);\nwire.attach_lead(lead.reversed());\n\nkwant.plot(wire);", "Here's how you can find out what's the longest-range hopping in a given direction:", "for i, dir in enumerate(['x', 'y', 'z']):\n print(dir + ':', max([abs(R[i]) for R in model.hop.keys()]))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
atlury/deep-opencl
DL0110EN/6.1.3.Activation max pooling .ipynb
lgpl-3.0
[ "<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n <a href=\"http://cocl.us/pytorch_link_top\"><img src = \"http://cocl.us/Pytorch_top\" width = 950, align = \"center\"></a>\n\n<img src = \"https://ibm.box.com/shared/static/ugcqz6ohbvff804xp84y4kqnvvk3bq1g.png\" width = 200, align = \"center\">\n\n\n<h1 align=center><font size = 5>Activation Functions and Max Pooling</h1 >\n\n\n# Table of Contents\nIn this lab, you will learn two important components in building a convolutional neural network. The first is applying an activation function, which is analogous to building a regular network. You will also learn about max pooling. Max pooling reduces the number of parameters and makes the network less susceptible to changes in the image. \n\n\n<li><a href=\"#ref0\">Activation Functions</a></li>\n\n<li><a href=\"#ref1\">Max Pooling</a></li>\n\n\n<br>\n<p></p>\nEstimated Time Needed: <strong>25 min</strong>\n</div>\n\n<hr>\n\nImport the following libraries:", "import torch \nimport torch.nn as nn\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import ndimage, misc\nimport torch.nn.functional as F", "<a id=\"ref0\"></a>\n<h2 align=center>Activation Functions </h2>\n\nJust like a neural network, you apply an activation function to the activation map as shown in the following image:\n<img src = \"https://ibm.box.com/shared/static/g3x3p1jaf2lv249gdvnjtnzez3p64nou.png\" width = 1000, align = \"center\">\nCreate a kernel and image as usual. Set the bias to zero:", "conv = nn.Conv2d(in_channels=1, out_channels=1,kernel_size=3)\nGx=torch.tensor([[1.0,0,-1.0],[2.0,0,-2.0],[1.0,0,-1.0]])\nconv.state_dict()['weight'][0][0]=Gx\nconv.state_dict()['bias'][0]=0.0\nconv.state_dict()\n\nimage=torch.zeros(1,1,5,5)\nimage[0,0,:,2]=1\nimage", "The following image shows the image and kernel: \n<img src = \"https://ibm.box.com/shared/static/e0xc2oqtolg4p6nfsumcbpix1q5yq2kr.png\" width = 500, align = \"center\">\nApply convolution to the image:", "Z=conv(image)\nZ", "Apply the activation function to the activation map. This will apply the activation function to each element in the activation map.", "A=F.relu(Z)\nA", "The process is summarized in the the following figure. The Relu function is applied to each element. All the elements less than zero are mapped to zero. The remaining components do not change.\n<img src = \"https://ibm.box.com/shared/static/b07y9oepudg45ur8383x11xv36ox6any.gif\" width = 1000, align = \"center\">\n<a id=\"ref1\"></a>\n<h2 align=center>Max Pooling </h2>\n\nConsider the following image:", "image1=torch.zeros(1,1,4,4)\nimage1[0,0,0,:]=torch.tensor([1.0,2.0,3.0,-4.0])\nimage1[0,0,1,:]=torch.tensor([0.0,2.0,-3.0,0.0])\nimage1[0,0,2,:]=torch.tensor([0.0,2.0,3.0,1.0])\n\nimage1", "Max pooling simply takes the maximum value in each region. Consider the following image. For the first region, max pooling simply takes the largest element in a yellow region. \n<img src = \"https://ibm.box.com/shared/static/gso58h37ov42cl6bx5wkvll11kx80jku.png\" width = 500, align = \"center\">\nThe region shifts, and the process is repeated. The process is similar to convolution and is demonstrated in the following figure:\n<img src = \"https://ibm.box.com/shared/static/f9hrpfavdpdbuuix9nc4xudytyq51hcu.gif\" width = 500, align = \"center\">\nCreate a maxpooling object in 2d as follows and perform max pooling as follows:", "max3=torch.nn.MaxPool2d(2,stride=1)\nmax3(image1)\n\nmax1=torch.nn.MaxPool2d(2)\nmax1(image1)", "If the stride is set to None (its defaults setting), the process will simply take the maximum in a prescribed area and shift over accordingly as shown in the following figure:\n<img src = \"https://ibm.box.com/shared/static/cenhef82q5kxzvzdqmjyuvbxo6j3c2ej.gif\" width = 500, align = \"center\">\nHere's the code in Pytorch:", "max1=torch.nn.MaxPool2d(2)\nmax1(image1)", "About the Authors:\nJoseph Santarcangelo has a PhD in Electrical Engineering. His research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. \nOther contributors: Michelle Carey, Mavis Zhou \n<hr>\n\nCopyright &copy; 2018 cognitiveclass.ai. This notebook and its source code are released under the terms of the MIT License." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
rorimac/Tools-of-a-Math-Student
Chapter2.ipynb
mit
[ "Using python\nPython is an interpreted language. This means that there is a \"python program\" that reads your input and excecutes it. If you open your ipython interpreter, you'll see the following message (or something very similar):\nPython 2.7.6 (default, Mar 22 2014, 22:59:56) \nType \"copyright\", \"credits\" or \"license\" for more information.\n\nIPython 3.1.0 -- An enhanced Interactive Python.\n? -&gt; Introduction and overview of IPython's features.\n%quickref -&gt; Quick reference.\nhelp -&gt; Python's own help system.\nobject? -&gt; Details about 'object', use 'object??' for extra details.\n\nIn [1]:\n\nwhere In [1]: is a command prompt. Whenever I write", "1 + 1", "I mean that the command is written in the ipython command prompt (or in the ipython notebook if you prefer to use that). There are some lines above the command prompt. These are information on the current python and ipython version and tips on how to get help. But first, let us look at some of the basic operations, addition, subtraction, multiplication, division and exponentiation. In python, these would have the symbols +, -, *, / and **. If we would like to calculate\n\\begin{align}\n\\frac{3.6\\cdot 5 + (3 - 2)^{3}}{2}\n\\end{align}\nwe would write", "(3.6*5 + (3 - 2)**3)/2", "These are the most basic operation and there are several more. We wont bother about them in this text though.\nVariables\nPython, as most programming languages relies on the fact that you can create something called a variable in it. This is similar to what in mathematics is called a variable and a constant. For example, we can define a variable with the name $a$ in python and let it have the value $5$ by typing", "a = 5", "Python now knows that there is such a thing as $a$ which you can use to do further operations. For example, instead of writing", "5 + 3", "We can write", "a + 3", "and we get the same result. This might seem as it is not that useful, but we can use it the same way as we use constants and variables in mathematics to shorten what we have to write. For example, we may want to calculate averages of averages.", "b = (2.5 + 6.3)/2\nc = (5.3 + 8.7)/2\n\n(b + c)/2", "Without variables this would get messy and very, very soon extremely difficult. \nObjects\nJust as there are different kind of objects in mathematics such as integers, reals and matrices, there are different kind of objects in python. The basic ones are integers, floats, lists, tuples and strings which will be introduced here.\nIntegers\nIntegers behave very much like the integers in mathematics. Any operation between two integers will result in an integer, for example $5 + 2$ will result in $7$, which is an integer. Notice though that division might not always lead to a new integer, for example $\\frac{5}{2}$ is not an integer. In python operations between integers always results in a new integer. Because of this, division between integers in python will drop the remainder.", "a = 12\na / 5", "Often, this is not what you wanted. This leads us to the next object.\nFloats\nA float, or a floating point number, works very much like a real number in mathematics. The set of floats is closed under all the operations we have introduced just as the reals are. To declare a float we simply have to add a dot to the number we wish to have, like this", "a = 12.", "and now, $a$ is a float instead of am integer. If we do the same operation as before, but with floats we get", "a / 5.", "Now it seems as if we only should use floats and not use integers at all because of this property. But as we will se soon the integers will play a central role in loops.\nLists and tuples\nJust as integers and floats are similar, so are lists and tuples. We will begin with lists. A list is an ordered collection of objects. The objects can be of any type, it can even contain itself! (But that is usually not very useful). A list is initiated with matching square brackets [ and ].", "a = [1, 3.5, [3, 5, []], 'this is a string']\na", "Because a list is ordered, the objects in the list can be accessed by stating at which place they are. To access an object in the list we use the square brackets again. In python (and most otehr programming languages) counts from $0$, which means that the first object in the list has index $0$, the second has index $1$ and so on. So to access an object in a list we simply type", "a[2] # Accessing the third element in the list. (This is acomment and is ignored by the interpreter.)\n\na[2][1] # We can access an objects in a list which is in a list\n\na[1] = 13.2 # We can change values of the objects in the list\n\na[0] + a[1] + a[2][1]", "You can also access parts of a list like this:", "a[0:2] # Return a list containing the first element up to but not including the third (index 2) element\n\na[0:2] + a[0:2] # You can put two lists together with addition.", "The lengt hof a list is not fixed in python and objects can be added and removed. To do this we will use append and del.", "a = [] # Creating an empty list\na\n\na.append('bleen')\na.append([2,4.1,'grue'])\na.append(4.3)\na\n\ndel a[-1] # We can also index from the end of the list. -1 indicates the last element\na", "Tuples are initialized similarly as lists and they can contain most objects the list can contain. The main difference is that the tuple does not support item assignment. What this means is that when the tuple is created its objects can not change later. Tuples are initiated with matching parentheses ( and ).", "a = (2, 'e', (3.4, 6.8))\na\n\na[0]\n\na[-1][-1]\n\na[1] = 0", "Because tuples does not support item assignment, you cannot use append or del with it. Tuples ar good to use if you want to make sure that certain values stay unchanged in a program, for example a group of physical constants.\nStrings\nStrings are lines of text or symbols and are initiated with doubble or single quotes. If you wish for the string to span several lines you can use triple double quotes", "a = 'here is a string'\na\n\na = \"Here's another\" # Notice that we included a single quote in the string.\na\n\na = \"\"\"\nThis string\nspans\nseveral lines.\n\"\"\"\na # \\n means new line. They can be manually included with \\n.\n\nprint a # To see \\n as an actual new line we need to use print a.", "Here, you saw the first occurrence of the print statement. It's functionality is much greater than prettifying string output as it can print text to the command or terminal window. One omportant functionality of the string is the format function. This function lets us create a string without knowing what it will contain beforehand.", "a = [1,2,3,4,5]\n\nstr = \"The sum of {} is {}\".format(a, sum(a))\nstr", "It uses curly brackets { and } as placeholders for the objects in the format part. There are many other things you can do with strings, to find out use the question mark, ?, in the interpreter after the variable you want more information about. Notice that this does not work in the regular python interpreter, you have to use ipython. You can also use the help function to get help about functions and variables. It works both in the regular interpreter and in ipython.", "a?\n\nhelp(sum)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
scotthuang1989/Python-3-Module-of-the-Week
text/text_wrap.ipynb
apache-2.0
[ "The textwrap module can be used to format text for output in situations where pretty-printing is desired. It offers programmatic functionality similar to the paragraph wrapping or filling features found in many text editors and word processors.", "import textwrap", "Structure of this module\nThere are 2 way of invoking this module\n\ntextwrap.TextWrapper(**kwargs)\nthis module also provide some convinient function \n\nLet's start with convinient functiones.", "sample_text = '''\n The textwrap module can be used to format text for output in\n situations where pretty-printing is desired. It offers\n programmatic functionality similar to the paragraph wrapping\n or filling features found in many text editors.\n '''", "Convinient functiones\ntextwrap.wrap(text, width=70, **kwargs)\nWraps the single paragraph in text (a string) so every line is at most width characters long. Returns a list of output lines, without final newlines", "wrap_result = textwrap.wrap(sample_text,width=30)\n\nwrap_result", "textwrap.fill(text, width=70, **kwargs)\nWraps the single paragraph in text, and returns a single string containing the wrapped paragraph", "fill_result = textwrap.fill(sample_text,width=30)\n\nfill_result", "textwrap.dedent(text)\nRemove any common leading whitespace from every line in text.", "# before dedent\nsample_text\n\ndedent_result = textwrap.dedent(sample_text)\n\n# after dedent\ndedent_result", "you probaly notice that the result of textwrap.fill have some unwanted space. in this case we can work around this by dedent it first then fill.", "dedent_result_wrap = textwrap.fill(dedent_result,width=30)\n\ndedent_result_wrap", "textwrap.indent(text, prefix, predicate=None)\nAdd prefix to the beginning of selected lines in text.", "indent_result = textwrap.indent(sample_text,prefix=\"=A=\")\n\nindent_result\n\nprint(indent_result)", "To control which lines receive the new prefix, pass a callable as the predicate argument to indent(). The callable will be invoked for each line of text in turn and the prefix will be added for lines where the return value is true.", "def should_indent(line):\n print('Indent {!r}?'.format(line))\n return len(line.strip()) % 2 == 0\n\ndedented_text = textwrap.dedent(sample_text)\nwrapped = textwrap.fill(dedented_text, width=50)\nfinal = textwrap.indent(wrapped, 'EVEN ',\n predicate=should_indent)\n\nprint('\\nQuoted block:\\n')\nprint(final)", "This example adds the prefix EVEN to lines that contain an even number of characters.\ntextwrap.shorten(text, width, **kwargs)\n\nCollapse and truncate the given text to fit in the given width.\nFirst the whitespace in text is collapsed (all whitespace is replaced by single spaces). If the result fits in the width, it is returned. Otherwise, enough words are dropped from the end so that the remaining words plus the placeholder fit within width", "shorten_result = textwrap.shorten(sample_text,width=90)\n\nshorten_result\n\n# use different placeholder\nshorten_result_1 = textwrap.shorten(sample_text,width=90,placeholder='....')\n\nshorten_result_1", "textwrap.TextWrapper(**kwargs)\nIf you want more convinient or efficient solution. you should use TextWrapper directly.\nyou may notice that \ntextwrap.wrap(text, width=70, **kwargs)\ntextwrap.fill(text, width=70, **kwargs)\ntextwrap.shorten(text, width=70, **kwargs)\nhave optional kwargs arguments. These optional arguments correspond to the instance attributes of TextWrapper.\nfor complete list, refer to official doc\nExample:\nit is possible to set the width of the output, the indent of the first line can be controlled independently of subsequent lines.", "dedented_text = textwrap.dedent(sample_text).strip()\nprint(textwrap.fill(dedented_text,\n initial_indent='',\n subsequent_indent=' ' * 4,\n width=50,\n ))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
skkandrach/foundations-homework
Homework 11 Soma.ipynb
mit
[ "import pandas as pd \n\n!head -n 1000 violations.csv > small-violations.csv", "1. I want to make sure my Plate ID is a string. Can't lose the leading zeroes!", "plate_info = {'Plate ID': 'str'}\ndf = pd.read_csv(\"small-violations.csv\", dtype=plate_info)\n\ndf\n\ndf.head()\n\ndf.head(10)\n\ndf.tail()", "2. I don't think anyone's car was built in 0AD. Discard the '0's as NaN.", "plate_info = {'Plate ID': 'str'}\ndf = pd.read_csv(\"small-violations.csv\", dtype=plate_info, na_values={'Vehicle Year': '0', 'Date First Observed': '0'})\ndf.head()", "3. I want the dates to be dates! Read the read_csv documentation to find out how to make pandas automatically parse dates.", "import dateutil\ndef date_to_date(date):\n date = str(date)\n parsed_date = dateutil.parser.parse(date)\n return parsed_date\n\ndf.columns\n\ndf['New Issue Date']= df['Issue Date'].apply(date_to_date)\n\nimport datetime\ndef convert_to_time(time):\n try:\n str_time = str(time)\n return datetime.datetime.strptime(str_time, \"%Y%m%d\")\n except:\n return None\n\nother_df = df[df['Vehicle Expiration Date'] != 0]\nother_df.head()\n\nother_df['New Vehicle Expiration Date']= other_df['Vehicle Expiration Date'].apply(convert_to_time)\n\nother_df.head()", "4. \"Date first observed\" is a pretty weird column, but it seems like it has a date hiding inside. Using a function with .apply, transform the string (e.g. \"20140324\") into a Python date. Make the 0's show up as NaN.", "other_df.columns\n\nother_df['Date First Observed'].dtypes\n\nother_df['Date First Observed'].tail()\n\nimport dateutil\n\nother_df['Date First Observed']\n\nother_df['Violation Time'].head()\n\nother_df['Violation Time'].tail()\n\ndef int_to_date(integer):\n if not pd.isnull(integer):\n date = str(int(integer))\n parsed_date = dateutil.parser.parse(date)\n return parsed_date.strftime(\"%Y-%-m-%d\")\n\nother_df['Date First Observed'].apply(int_to_date)", "5. \"Violation time\" is... not a time. Make it a time.", "def violation_time_to_time(time):\n try:\n hour = time[0:2]\n minutes = time[2:4]\n am_pm= time[4]\n regular_time= hour + \":\" + minutes + \" \" + am_pm + 'm'\n violation_time_fixed = dateutil.parser.parse(regular_time)\n return violation_time_fixed.strftime(\"%H:%M%p\")\n except:\n return None \n\nother_df['Violation Time'].apply(violation_time_to_time)", "6. There sure are a lot of colors of cars, too bad so many of them are the same. Make \"BLK\" and \"BLACK\", \"WT\" and \"WHITE\", and any other combinations that you notice.", "other_df['Vehicle Color'].value_counts()\n\ndef color_rename(color):\n if (color == 'BLACK') or (color == 'BLK') or (color == 'BK'):\n return 'BLACK'\n elif (color == 'WHITE') or (color == 'WHT') or (color == 'WH') or (color == 'W'):\n return 'WHITE'\n\nother_df['Vehicle Color'].apply(color_rename)", "7. Join the data with the Parking Violations Code dataset from the NYC Open Data site.", "parking_violations_df = pd.read_csv(\"DOF_Parking_Violation_Codes.csv\", encoding=\"mac_roman\", error_bad_lines=False)\nparking_violations_df.head()\n\nparking_violations_df['CODE'].describe()\n\nother_df['Violation Code'].describe()\n\ndef convert_to_str(n):\n return str(n)\n\nparking_violations_df['Code'] = parking_violations_df['CODE'].apply(convert_to_str)\n\nother_df['Violation code'] = other_df['Violation Code'].apply(convert_to_str)\n\nparking_violations_df.head()\n\nupdated_parking_violations_df = parking_violations_df.rename(columns={'Manhattan  96th St. & below': 'Manhattan 96th & below', 'All Other Areas': 'All other areas'})\nupdated_parking_violations_df.head()\n\nother_df.head()\n\ndiff_violations_df = pd.merge(other_df, updated_parking_violations_df, left_on='Violation code', right_on='Code')\ndiff_violations_df.head()", "8. How much money did NYC make off of parking violations?", "diff_violations_df['Manhattan 96th & below'].describe()\n\ndiff_violations_df['All other areas'].describe()\n\ndiff_violations_df['Manhattan 96th & below'].apply(convert_to_str).head()\n\ndiff_violations_df['All other areas'].apply(convert_to_str).head()\n\ndiff_violations_df = new_violations_df[new_violations_df['Manhattan 96th & below'] != 'vary']\ndiff_violations_df.head()\n\nimport re\ndef strip_and_convert_to_int(string):\n match = re.findall(r\"^\\$?\\d*\", string)\n if match:\n new_string = string.replace(\"$\", \"\").split()\n new_int = int(new_string[0])\n return new_int\n else:\n return None\n \n\ndiff_violations_df['Manhattan 96th and below'] = diff_violations_df['Manhattan 96th & below'].apply(strip_and_convert_to_int)\n\ndiff_violations_df.head()\n\n\ndiff_violations_df['All Other Areas'] = diff_violations_df['All other areas'].apply(strip_and_convert_to_int)\ndiff_violations_df.tail()\n\ndiff_violations_df['All Other Areas'].value_counts().head()\n\nmanhattan_violations = diff_violations_df.groupby('Violation code')['All Other Areas'].sum()\nmanhattan_violations.sum()\n\nviolations_not_man = diff_violations_df.groupby('Violation code')['Manhattan 96th and below'].sum()\nviolations_not_man.sum()\n\nviolations_revenue = violations_not_man.sum() + manhattan_violations.sum()\n\nviolations_revenue", "9. What's the most lucrative kind of parking violation? The most frequent?", "manhattan_violations.sort_values(ascending=False)\n\nviolations_not_man.sort_values(ascending=False)\n\nnew_violations_df['Violation code'].value_counts()", "10. New Jersey has bad drivers, but does it have bad parkers, too? How much money does NYC make off of all non-New York vehicles?", "out_of_staters_df = diff_violations_df[diff_violations_df['Registration State'] != 'NY']\nout_of_staters_df.head()\n\nout_of_staters_other = out_of_staters_df.groupby('Violation code')['All Other Areas'].sum()\nout_of_staters_other.sum()\n\nout_of_staters_manhattan= out_of_staters_df.groupby('Violation code')['Manhattan 96th and below'].sum()\nout_of_staters_manhattan.sum()\n\ntotal_out_of_staters_violations = out_of_staters_other.sum()+ out_of_staters_manhattan.sum()\ntotal_out_of_staters_violations\n", "11. Make a chart of the top few.", "%matplotlib inline\n\nout_of_staters_other.sort_values(ascending=False).plot(kind='bar', x='Violation code')\n\nout_of_staters_manhattan.sort_values(ascending=False).plot(kind='bar', x='Violation code')", "12. What time of day do people usually get their tickets? You can break the day up into several blocks - for example 12am-6am, 6am-12pm, 12pm-6pm, 6pm-12am.\n13. What's the average ticket cost in NYC?", "average_tix_price = total_out_of_staters_violations / diff_violations_df['Violation code'].value_counts().sum()\naverage_tix_price", "14. Make a graph of the number of tickets per day.", "diff_violations_df['Issue Date'].value_counts().head(10).plot(kind='barh')", "15. Make a graph of the amount of revenue collected per day.", "daily_revenue = total_out_of_staters_violations / new_violations_df['New Issue Date'].value_counts()\ndaily_revenue.sort_values(ascending=False).head(20).plot(kind='bar')", "16. Manually construct a dataframe out of https://dmv.ny.gov/statistic/2015licinforce-web.pdf (only NYC boroughts - bronx, queens, manhattan, staten island, brooklyn), having columns for borough name, abbreviation, and number of licensed drivers.", "nyc_licenses = pd.read_excel(\"NYC.xlsx\")\nnyc_licenses", "17. What's the parking-ticket-$-per-licensed-driver in each borough of NYC? Do this with pandas and the dataframe you just made, not with your head!", "diff_violations_df.columns\n\ndiff_violations_df['Violation County'].value_counts()\n\nbronx_violations = diff_violations_df[diff_violations_df['Violation County'] == 'BX']\nbronx_licenses = nyc_licenses['Total'][nyc_licenses['Abbreviation'] == 'BX']\nbronx_tix = bronx_violations.groupby('Violation code')['All Other Areas'].sum()\ndriver_bronx_tix = bronx_licenses / bronx_tix.sum()\ndriver_bronx_tix\n\n\nqueens_violations = diff_violations_df[diff_violations_df['Violation County'] == 'Q']\nqueens_licenses = nyc_licenses['Total'][nyc_licenses['Abbreviation'] == 'Q']\nqueens_tix = queens_violations.groupby('Violation code')['All Other Areas'].sum()\ndriver_queens_tix = queens_licenses / queens_tix.sum()\ndriver_queens_tix\n\nny_violations = diff_violations_df[diff_violations_df['Violation County'] == 'NY']\nny_licenses = nyc_licenses['Total'][nyc_licenses['Abbreviation'] == 'NY']\nny_tix = ny_violations.groupby('Violation code')['All Other Areas'].sum()\ndriver_ny_tix = ny_licenses / ny_tix.sum()\ndriver_ny_tix\n\nbrooklyn_violations = diff_violations_df[diff_violations_df['Violation County'] == 'R']\nbrooklyn_licenses = nyc_licenses['Total'][nyc_licenses['Abbreviation'] == 'R']\nbrooklyn_tix = brooklyn_violations.groupby('Violation code')['All Other Areas'].sum()\ndriver_brooklyn_tix = brooklyn_licenses / brooklyn_tix.sum()\ndriver_brooklyn_tix\n\nstaten_is_violations = diff_violations_df[diff_violations_df['Violation County'] == 'K']\nstaten_is_licenses = nyc_licenses['Total'][nyc_licenses['Abbreviation'] == 'K']\nstaten_is_tix = violations_kings.groupby('Violation code')['All Other Areas'].sum()\ndriver_staten_is_tix = staten_is_licenses / staten_is_tix.sum()\ndriver_staten_is_tix" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
JasonSanchez/w261
exams/w261mt/Midterm MRjob code.ipynb
mit
[ "DATASCI W261: Machine Learning at Scale\nVersion 1: One MapReduce Stage (join data at the first reducer)\nData Generation\nData Information:\n+ Sizes: 1000 points\n+ True model: y = 1.0 * x - 4\n+ Noise:Normal Distributed mean = 0, var = 5", "%matplotlib inline\nimport numpy as np\nimport pylab \nsize = 1000\nx = np.random.uniform(-40, 40, size)\ny = x * 1.0 - 4 + np.random.normal(0,5,size)\ndata = zip(range(size),y,x)\n#data = np.concatenate((y, x), axis=1)\nnp.savetxt('LinearRegression.csv',data,'%i,%f,%f')\n\ndata[:10]", "Data Visualiazation", "pylab.plot(x, y,'*')\npylab.show()", "MrJob class code\nThe solution of linear model $$ \\textbf{Y} = \\textbf{X}\\theta $$ is:\n$$ \\hat{\\theta} = (\\textbf{X}^T\\textbf{X})^{-1}\\textbf{X}^T\\textbf{y} $$\nIf $\\textbf{X}^T\\textbf{X}$ is denoted by $A$, and $\\textbf{X}^T\\textbf{y}$ is denoted by $b$, then\n$$ \\hat{\\theta} = A^{-1}b $$\nThere are two MrJob classes to calculate intermediate results:\n+ linearRegressionXSquare.py calculates $A = \\textbf{X}^T\\textbf{X}$\n+ linearRegressionXy.py calculates $b = \\textbf{X}^T\\textbf{y}$", "%%writefile linearRegressionXSquare.py\n#Version 1: One MapReduce Stage (join data at the first reducer)\nfrom mrjob.job import MRJob\n\nclass MRMatrixX2(MRJob):\n #Emit all the data need to caculate cell i,j in result matrix\n def mapper(self, _, line):\n v = line.split(',')\n # add 1s to calculate intercept\n v.append('1.0')\n for i in range(len(v)-2):\n for j in range(len(v)-2):\n yield (j,i),(int(v[0]),float(v[i+2]))\n yield (i,j),(int(v[0]),float(v[i+2]))\n \n # Sum up the product for cell i,j\n def reducer(self, key, values):\n idxdict = {}\n s = 0.0\n preidx = -1\n preval = 0\n f = []\n for idx, value in values:\n if str(idx) in idxdict:\n s = s + value * idxdict[str(idx)]\n else:\n idxdict[str(idx)] = value\n yield key,s\n\nif __name__ == '__main__':\n MRMatrixX2.run()\n\n%%writefile linearRegressionXy.py\nfrom mrjob.job import MRJob\n\nclass MRMatrixXY(MRJob):\n def mapper(self, _, line):\n v = line.split(',')\n # product of y*xi\n for i in range(len(v)-2):\n yield i, float(v[1])*float(v[i+2])\n # To calculate Intercept\n yield i+1, float(v[1])\n \n # Sum up the products\n def reducer(self, key, values):\n yield key,sum(values)\n\nif __name__ == '__main__':\n MRMatrixXY.run()", "Driver:\nDriver run tow MrJob class to get $\\textbf{X}^T\\textbf{X}$ and $\\textbf{X}^T\\textbf{y}$. And it calculate $(\\textbf{X}^T\\textbf{X})^{-1}$ by numpy.linalg.solve.", "from numpy import linalg,array,empty\nfrom linearRegressionXSquare import MRMatrixX2\nfrom linearRegressionXy import MRMatrixXY\nmr_job1 = MRMatrixX2(args=['LinearRegression.csv'])\nmr_job2 = MRMatrixXY(args=['LinearRegression.csv'])\n\nX_Square = []\nX_Y = []\n# Calculate XT*X Covariance Matrix\nprint \"Matrix XT*X:\"\nwith mr_job1.make_runner() as runner: \n # Run MrJob MatrixMultiplication Job\n runner.run()\n # Extract the output I.E. ship data to driver be careful if data you ship is too big\n for line in runner.stream_output():\n key,value = mr_job1.parse_output_line(line)\n X_Square.append((key,value))\n print key, value\nprint \" \" \n# Calculate XT*Y\nprint \"Vector XT*Y:\"\nwith mr_job2.make_runner() as runner: \n runner.run()\n for line in runner.stream_output():\n key,value = mr_job2.parse_output_line(line)\n X_Y.append((key,value))\n print key, value\nprint \" \" \n\n#Local Processing the output from two MrJob\nn = len(X_Y)\nif(n*n!=len(X_Square)):\n print 'Error!'\nelse:\n XX = empty(shape=[n,n])\n for v in X_Square:\n XX[v[0][0],v[0][1]] = v[1]\n XY = empty(shape=[n,1])\n for v in X_Y:\n XY[v[0],0] = v[1]\n\nprint XX\nprint\nprint XY\n \ntheta = linalg.solve(XX,XY)\nprint \"Coefficients:\",theta[0,0],',',theta[1,0]", "Gradient descent - doesn't work", "%%writefile MrJobBatchGDUpdate_LinearRegression.py\nfrom mrjob.job import MRJob\n\n# This MrJob calculates the gradient of the entire training set \n# Mapper: calculate partial gradient for each example \n# \nclass MrJobBatchGDUpdate_LinearRegression(MRJob):\n # run before the mapper processes any input\n def read_weightsfile(self):\n # Read weights file\n with open('weights.txt', 'r') as f:\n self.weights = [float(v) for v in f.readline().split(',')]\n # Initialze gradient for this iteration\n self.partial_Gradient = [0]*len(self.weights)\n self.partial_count = 0\n \n # Calculate partial gradient for each example \n def partial_gradient(self, _, line):\n D = (map(float,line.split(',')))\n # y_hat is the predicted value given current weights\n y_hat = self.weights[0]+self.weights[1]*D[1]\n # Update parial gradient vector with gradient form current example\n self.partial_Gradient = [self.partial_Gradient[0]+ D[0]-y_hat, self.partial_Gradient[1]+(D[0]-y_hat)*D[1]]\n self.partial_count = self.partial_count + 1\n #yield None, (D[0]-y_hat,(D[0]-y_hat)*D[1],1)\n \n # Finally emit in-memory partial gradient and partial count\n def partial_gradient_emit(self):\n yield None, (self.partial_Gradient,self.partial_count)\n \n # Accumulate partial gradient from mapper and emit total gradient \n # Output: key = None, Value = gradient vector\n def gradient_accumulater(self, _, partial_Gradient_Record): \n total_gradient = [0]*2\n total_count = 0\n for partial_Gradient,partial_count in partial_Gradient_Record:\n total_count = total_count + partial_count\n total_gradient[0] = total_gradient[0] + partial_Gradient[0]\n total_gradient[1] = total_gradient[1] + partial_Gradient[1]\n yield None, [v/total_count for v in total_gradient]\n \n def steps(self):\n return [self.mr(mapper_init=self.read_weightsfile,\n mapper=self.partial_gradient,\n mapper_final=self.partial_gradient_emit,\n reducer=self.gradient_accumulater)] \n \nif __name__ == '__main__':\n MrJobBatchGDUpdate_LinearRegression.run()\n\nfrom numpy import random, array\nfrom MrJobBatchGDUpdate_LinearRegression import MrJobBatchGDUpdate_LinearRegression\n\nlearning_rate = 0.05\nstop_criteria = 0.000005\n\n# Generate random values as inital weights\nweights = array([random.uniform(-3,3),random.uniform(-3,3)])\n\n# Write the weights to the files\nwith open('weights.txt', 'w+') as f:\n f.writelines(','.join(str(j) for j in weights))\n\n\n# Update centroids iteratively\ni = 0\nwhile(1):\n # create a mrjob instance for batch gradient descent update over all data\n mr_job = MrJobBatchGDUpdate_LinearRegression(args=['--file', 'weights.txt', 'LinearRegression.csv'])\n \n print \"iteration =\"+str(i)+\" weights =\",weights\n # Save weights from previous iteration\n weights_old = weights\n with mr_job.make_runner() as runner: \n runner.run()\n # stream_output: get access of the output \n for line in runner.stream_output():\n # value is the gradient value\n key,value = mr_job.parse_output_line(line)\n # Update weights\n weights = weights - learning_rate*array(value)\n i = i + 1\n if i>100: break\n # Write the updated weights to file \n with open('weights.txt', 'w+') as f:\n f.writelines(','.join(str(j) for j in weights))\n # Stop if weights get converged\n if(sum((weights_old-weights)**2)<stop_criteria):\n break\n \nprint \"Final weights\\n\"\nprint weights", "Kmeans" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]