repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
content
stringlengths
335
154k
sot/aca_stats
fit_acq_prob_model-2018-03-sota.ipynb
bsd-3-clause
from __future__ import division import numpy as np import matplotlib.pyplot as plt from astropy.table import Table from astropy.time import Time import tables from scipy import stats import tables3_api %matplotlib inline """ Explanation: Fit the flight acquisition probability model in 2018-03 Fit values here were computed 2018-Mar-08 This version introduces a dependence on the search box size. Search box sizes of 160 or 180 arcsec (required for at least 3 star slots) were used in normal operations starting in the MAR2017 products. This followed two PMSTA anomalies. End of explanation """ SOTA2017_FIT_NO_1P5 = [3.8981465963928441, 5.5208216663935739, 2.0187091292966395, # offsets -2.2103221221745111, 0.37783433000968347, 0.10035462978065751, # scales 0.0038541777023636111] # brighter than 8.5 mag SOTA2017_FIT_ONLY_1P5 = [4.4598955940740002, 7.3654868182850661, 4.380944461070051, -1.4766615762918867, 0.53879889036008366, -0.36463411364645115, 0.0020022525242344045] """ Explanation: Final 2017 fit values End of explanation """ SOTA2015_FIT_ALL = [3.9438714542029976, 5.4601129927961134, 1.6582423213669775, -2.0646518576907495, 0.36414269305801689, -0.0075143036207362852, 0.003740065500207244] SOTA2015_FIT_NO_1P5 = [4.092016310373646, 6.5415918325159641, 1.8191919043258409, -2.2301709573082413, 0.30337711472920426, 0.10116735012955963, 0.0043395964215468185] SOTA2015_FIT_ONLY_1P5 = [4.786710417762472, 4.839392687262392, 1.8646719319052267, -1.4926740399312248, 0.76412972998935347, -0.20229644263097146, 0.0016270748026844457] """ Explanation: Final 2015 fit values End of explanation """ with tables.open_file('/proj/sot/ska/data/acq_stats/acq_stats.h5', 'r') as h5: cols = h5.root.data.cols names = {'tstart': 'guide_tstart', 'obsid': 'obsid', 'obc_id': 'acqid', 'halfwidth': 'halfw', 'warm_pix': 'n100_warm_frac', 'mag': 'mag_aca', 'known_bad': 'known_bad', 'color': 'color1', 'img_func': 'img_func', 'ion_rad': 'ion_rad', 'sat_pix': 'sat_pix', 'agasc_id': 'agasc_id', 't_ccd': 'ccd_temp', 'slot': 'slot'} acqs = Table([getattr(cols, h5_name)[:] for h5_name in names.values()], names=list(names.keys())) year_q0 = 1999.0 + 31. / 365.25 # Jan 31 approximately acqs['year'] = Time(acqs['tstart'], format='cxcsec').decimalyear.astype('f4') acqs['quarter'] = (np.trunc((acqs['year'] - year_q0) * 4)).astype('f4') acqs['color_1p5'] = np.where(acqs['color'] == 1.5, 1, 0) # Filter for year and mag (previously used data through 2007:001) ok = (acqs['year'] > 2014) & (acqs['mag'] > 6.0) & (acqs['mag'] < 10.6) # Filter known bad obsids print('Filtering known bad obsids, start len = {}'.format(np.count_nonzero(ok))) bad_obsids = [ # Venus 2411,2414,6395,7306,7307,7308,7309,7311,7312,7313,7314,7315,7317,7318,7406,583, 7310,9741,9742,9743,9744,9745,9746,9747,9749,9752,9753,9748,7316,15292,16499, 16500,16501,16503,16504,16505,16506,16502, ] for badid in bad_obsids: ok = ok & (acqs['obsid'] != badid) print('Filtering known bad obsids, end len = {}'.format(np.count_nonzero(ok))) data_all = acqs[ok] data_all.sort('year') data_all['mag10'] = data_all['mag'] - 10.0 # Adjust probability (in probit space) for box size. See: # https://github.com/sot/skanb/blob/master/pea-test-set/fit_box_size_acq_prob.ipynb b1 = 0.96 b2 = -0.30 box0 = (data_all['halfwidth'] - 120) / 120 # normalized version of box, equal to 0.0 at nominal default data_all['box_delta'] = b1 * box0 + b2 * box0**2 # Create 'fail' column, rewriting history as if the OBC always # ignore the MS flag in ID'ing acq stars. Set ms_disabled = False # to not do this obc_id = data_all['obc_id'] obc_id_no_ms = (data_all['img_func'] == 'star') & ~data_all['sat_pix'] & ~data_all['ion_rad'] data_all['fail'] = np.where(obc_id | obc_id_no_ms, 0.0, 1.0) data_all = data_all.group_by('quarter') data_mean = data_all.groups.aggregate(np.mean) def p_fail(pars, m10, wp, box_delta=0.0): """ Acquisition probability model :param pars: 7 parameters (3 x offset, 3 x scale, p_fail for bright stars) :param m10: mag - 10 :param wp: warm pixel fraction :param box: search box half width (arcsec) """ scl0, scl1, scl2 = pars[0:3] off0, off1, off2 = pars[3:6] p_bright_fail = pars[6] # Make sure inputs have same dimensions m10, wp, box_delta = np.broadcast_arrays(m10, wp, box_delta) scale = scl0 + scl1 * m10 + scl2 * m10**2 offset = off0 + off1 * m10 + off2 * m10**2 p_fail = offset + scale * wp + box_delta p_fail = stats.norm.cdf(p_fail) # probit transform p_fail[m10 < -1.5] = p_bright_fail # For stars brighter than 8.5 mag use a constant return p_fail def p_acq_fail(data=None): """ Sherpa fit function wrapper to ensure proper use of data in fitting. """ if data is None: data = data_all m10 = data['mag10'] wp = data['warm_pix'] box_delta = data['box_delta'] def sherpa_func(pars, x): return p_fail(pars, m10, wp, box_delta) return sherpa_func def fit_sota_model(data_mask=None): from sherpa import ui data = data_all if data_mask is None else data_all[data_mask] data_id = 1 ui.set_method('simplex') ui.set_stat('cash') ui.load_user_model(p_acq_fail(data), 'model') ui.add_user_pars('model', ['scl0', 'scl1', 'scl2', 'off0', 'off1', 'off2', 'p_bright_fail']) ui.set_model(data_id, 'model') ui.load_arrays(data_id, np.array(data['year']), np.array(data['fail'], dtype=np.float)) # Initial fit values from fit of all data start_vals = iter(SOTA2015_FIT_ALL) # Offset fmod = ui.get_model_component('model') for name in ('scl', 'off'): for num in (0, 1, 2): comp_name = name + str(num) setattr(fmod, comp_name, next(start_vals)) comp = getattr(fmod, comp_name) comp.min = -100000 comp.max = 100000 # ui.freeze(comp) fmod.p_bright_fail = 0.025 fmod.p_bright_fail.min = 0.0 fmod.p_bright_fail.max = 1.0 # ui.freeze(fmod.p_bright_fail) ui.fit(data_id) # conf = ui.get_confidence_results() return ui.get_fit_results() """ Explanation: Fit code End of explanation """ def plot_fit_grouped(pars, group_col, group_bin, mask=None, log=False, colors='br', label=None, probit=False): data = data_all if mask is None else data_all[mask] data['model'] = p_acq_fail(data)(pars, None) group = np.trunc(data[group_col] / group_bin) data = data.group_by(group) data_mean = data.groups.aggregate(np.mean) len_groups = np.diff(data.groups.indices) data_fail = data_mean['fail'] model_fail = data_mean['model'] # Possibly plot the data and model probabilities in probit space if probit: data_fail = stats.norm.ppf(data_fail) model_fail = stats.norm.ppf(model_fail) fail_sigmas = np.sqrt(data_fail * len_groups) / len_groups plt.errorbar(data_mean[group_col], data_fail, yerr=fail_sigmas, fmt='.' + colors[0], label=label) plt.plot(data_mean[group_col], model_fail, '-' + colors[1]) if log: ax = plt.gca() ax.set_yscale('log') def mag_filter(mag0, mag1): ok = (data_all['mag'] > mag0) & (data_all['mag'] < mag1) return ok def wp_filter(wp0, wp1): ok = (data_all['warm_pix'] > wp0) & (data_all['warm_pix'] < wp1) return ok def plot_fit_all(parvals, mask=None, probit=False): if mask is None: mask = np.ones(len(data_all), dtype=bool) plt.figure() plot_fit_grouped(parvals, 'mag', 0.25, wp_filter(0.20, 0.25) & mask, log=False, colors='gk', label='0.20 < WP < 0.25') plot_fit_grouped(parvals, 'mag', 0.25, wp_filter(0.10, 0.20) & mask, log=False, colors='cm', label='0.10 < WP < 0.20') plt.legend(loc='upper left'); plt.ylim(0.001, 1.0); plt.xlim(9, 11) plt.grid() plt.figure() plot_fit_grouped(parvals, 'mag', 0.25, wp_filter(0.20, 0.25) & mask, probit=True, colors='gk', label='0.20 < WP < 0.25') plot_fit_grouped(parvals, 'mag', 0.25, wp_filter(0.10, 0.20) & mask, probit=True, colors='cm', label='0.10 < WP < 0.20') plt.legend(loc='upper left'); # plt.ylim(0.001, 1.0); plt.xlim(9, 11) plt.grid() plt.figure() plot_fit_grouped(parvals, 'warm_pix', 0.02, mag_filter(10.3, 10.6) & mask, log=False, colors='gk', label='10.3 < mag < 10.6') plot_fit_grouped(parvals, 'warm_pix', 0.02, mag_filter(10, 10.3) & mask, log=False, colors='cm', label='10 < mag < 10.3') plot_fit_grouped(parvals, 'warm_pix', 0.02, mag_filter(9, 10) & mask, log=False, colors='br', label='9 < mag < 10') plt.legend(loc='best') plt.grid() plt.figure() plot_fit_grouped(parvals, 'year', 0.25, mag_filter(10.3, 10.6) & mask, colors='gk', label='10.3 < mag < 10.6') plot_fit_grouped(parvals, 'year', 0.25, mag_filter(10, 10.3) & mask, colors='cm', label='10 < mag < 10.3') plot_fit_grouped(parvals, 'year', 0.25, mag_filter(9.5, 10) & mask, colors='br', label='9.5 < mag < 10') plot_fit_grouped(parvals, 'year', 0.25, mag_filter(9.0, 9.5) & mask, colors='gk', label='9.0 < mag < 9.5') plt.legend(loc='best') plt.grid() plt.figure() plot_fit_grouped(parvals, 'year', 0.25, mag_filter(10.3, 10.6) & mask, colors='gk', label='10.3 < mag < 10.6', probit=True) plot_fit_grouped(parvals, 'year', 0.25, mag_filter(10, 10.3) & mask, colors='cm', label='10 < mag < 10.3', probit=True) plot_fit_grouped(parvals, 'year', 0.25, mag_filter(9.5, 10) & mask, colors='br', label='9.5 < mag < 10', probit=True) plot_fit_grouped(parvals, 'year', 0.25, mag_filter(9.0, 9.5) & mask, colors='gk', label='9.0 < mag < 9.5', probit=True) plt.legend(loc='best') plt.grid(); """ Explanation: Plotting and validation End of explanation """ # fit = fit_sota_model(data_all['color'] == 1.5, ms_disabled=True) mask_no_1p5 = data_all['color'] != 1.5 print('Hang tight, this could take a few minutes') fit_no_1p5 = fit_sota_model(mask_no_1p5) plot_fit_all(fit_no_1p5.parvals, mask=mask_no_1p5) plot_fit_all(SOTA2017_FIT_NO_1P5, mask=mask_no_1p5) """ Explanation: Color != 1.5 fit (this is MOST acq stars) End of explanation """ print('Hang tight, this could take a few minutes') mask_1p5 = data_all['color'] == 1.5 fit_1p5 = fit_sota_model(mask_1p5) plot_fit_all(fit_1p5.parvals, mask=mask_1p5) """ Explanation: Color == 1.5 fit End of explanation """ mag = np.linspace(9, 11, 30) for wp in (0.1, 0.2, 0.3): plt.plot(mag, p_fail(fit_no_1p5.parvals, mag-10, wp), 'r', label='2015 model' if wp == 0.1 else None) plt.plot(mag, p_fail(SOTA2017_FIT_NO_1P5, mag-10, wp), 'b', label='2017 model' if wp == 0.1 else None) plt.grid() plt.xlabel('Mag') plt.ylim(0, 1) plt.title('Failure prob vs. mag for Wp=(0.1, 0.2, 0.3)') plt.legend(loc='upper left') plt.ylabel('Prob'); mag = np.linspace(9, 11, 30) for wp in (0.1, 0.2, 0.3): plt.plot(mag, stats.norm.ppf(p_fail(fit_no_1p5.parvals, mag-10, wp)), 'r', label='2018 model' if wp == 0.1 else None) plt.plot(mag, stats.norm.ppf(p_fail(SOTA2017_FIT_NO_1P5, mag-10, wp)), 'b', label='2017 model' if wp == 0.1 else None) plt.grid() plt.xlabel('Mag') # plt.ylim(0, 1) plt.title('Failure prob vs. mag for Wp=(0.1, 0.2, 0.3)') plt.legend(loc='upper left') plt.ylabel('Prob'); """ Explanation: Compare fit to 2017 coefficients Failure prob vs. mag for Wp=(0.1, 0.2, 0.3) End of explanation """ for mag in (10.0, 10.25, 10.5): wp = np.linspace(0, 0.4, 30) plt.plot(wp, p_fail(fit_no_1p5.parvals, mag-10, wp), 'r', label='2015 model' if mag == 10.0 else None) plt.plot(wp, p_fail(SOTA2017_FIT_NO_1P5, mag-10, wp), 'b', label='2017 model' if mag == 10.0 else None) plt.grid() plt.xlabel('Warm pix frac') plt.ylim(0, 1) plt.title('Failure prob vs. Wp for mag=(10.0, 10.25, 10.5)') plt.ylabel('Fail prob'); """ Explanation: Failure prob vs. Wp for mag=(10.0, 10.25, 10.5) End of explanation """ plt.hist(data_all['warm_pix'], bins=100) plt.grid() plt.xlabel('Warm pixel fraction'); plt.hist(data_all['mag'], bins=np.arange(6, 11.1, 0.1)) plt.grid() plt.xlabel('Mag_aca') plt.plot(data_all['year'], data_all['warm_pix']) plt.ylim(0, None) plt.grid(); ok = (data_all['mag'] > 10.3) & (data_all['mag'] < 10.6) da = data_all[ok] from Chandra.Time import DateTime yr0 = DateTime('2017:079').frac_year yr1 = DateTime('2017-10-01T00:00:00').frac_year yr2 = DateTime('2018:060').frac_year ok1 = (yr0 < da['year']) & (da['year'] < yr1) ok2 = (yr1 < da['year']) & (da['year'] < yr2) plt.figure() plt.hist(da['mag'][ok1], facecolor='C0') plt.hist(da['mag'][ok2], facecolor='C1', alpha=0.5) print(np.mean(da['fail'][ok1])) print(np.mean(da['fail'][ok2])) print(np.count_nonzero(ok1)) print(np.count_nonzero(ok2)) print(np.mean(da['mag'][ok1])) print(np.mean(da['mag'][ok2])) print(len(set(da['agasc_id'][ok1]))) print(len(set(da['agasc_id'][ok2]))) print(np.mean(da['warm_pix'][ok1])) print(np.mean(da['warm_pix'][ok2])) print(np.mean(da['t_ccd'][ok1])) print(np.mean(da['t_ccd'][ok2])) from scipy.stats import binom n = np.count_nonzero(ok1) k = np.count_nonzero(da['fail'][ok1]) print(binom.ppf(0.01, n, k/n) / n) print(binom.ppf(0.99, n, k/n) / n) n = np.count_nonzero(ok2) k = np.count_nonzero(da['fail'][ok2]) print(binom.ppf(0.01, n, k/n) / n) print(binom.ppf(0.99, n, k/n) / n) print(np.mean(da['halfwidth'][ok1])) print(np.mean(da['halfwidth'][ok2])) print(np.mean(da['ion_rad'][ok1])) print(np.mean(da['ion_rad'][ok2])) print(np.mean(da['sat_pix'][ok1])) print(np.mean(da['sat_pix'][ok2])) plot_fit_grouped(fit_no_1p5.parvals, 'year', 0.10, mag_filter(10.3, 10.6) & mask_no_1p5, colors='gk', label='10.3 < mag < 10.6') plt.xlim(2016.0, None) y0, y1 = plt.ylim() x = DateTime('2017-10-01T00:00:00').frac_year plt.plot([x, x], [y0, y1], '--r', alpha=0.5) plt.grid(); plot_fit_grouped(fit_no_1p5.parvals, 'year', 0.10, mag_filter(10.0, 10.3) & mask_no_1p5, colors='gk', label='10.0 < mag < 10.3') plt.xlim(2016.0, None) y0, y1 = plt.ylim() x = DateTime('2017-10-01T00:00:00').frac_year plt.plot([x, x], [y0, y1], '--r', alpha=0.5) plt.grid(); """ Explanation: Histogram of warm pixel fraction End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/nasa-giss/cmip6/models/sandbox-3/aerosol.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'nasa-giss', 'sandbox-3', 'aerosol') """ Explanation: ES-DOC CMIP6 Model Properties - Aerosol MIP Era: CMIP6 Institute: NASA-GISS Source ID: SANDBOX-3 Topic: Aerosol Sub-Topics: Transport, Emissions, Concentrations, Optical Radiative Properties, Model. Properties: 69 (37 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:54:21 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Software Properties 3. Key Properties --&gt; Timestep Framework 4. Key Properties --&gt; Meteorological Forcings 5. Key Properties --&gt; Resolution 6. Key Properties --&gt; Tuning Applied 7. Transport 8. Emissions 9. Concentrations 10. Optical Radiative Properties 11. Optical Radiative Properties --&gt; Absorption 12. Optical Radiative Properties --&gt; Mixtures 13. Optical Radiative Properties --&gt; Impact Of H2o 14. Optical Radiative Properties --&gt; Radiative Scheme 15. Optical Radiative Properties --&gt; Cloud Interactions 16. Model 1. Key Properties Key properties of the aerosol model 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of aerosol model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of aerosol model code End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.scheme_scope') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "troposhere" # "stratosphere" # "mesosphere" # "mesosphere" # "whole atmosphere" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Scheme Scope Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Atmospheric domains covered by the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.basic_approximations') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.4. Basic Approximations Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Basic approximations made in the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "3D mass/volume ratio for aerosols" # "3D number concenttration for aerosols" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.5. Prognostic Variables Form Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Prognostic variables in the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 1.6. Number Of Tracers Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of tracers in the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.family_approach') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 1.7. Family Approach Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are aerosol calculations generalized into families of species? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Software Properties Software properties of aerosol code 2.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses atmospheric chemistry time stepping" # "Specific timestepping (operator splitting)" # "Specific timestepping (integrated)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Timestep Framework Physical properties of seawater in ocean 3.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Mathematical method deployed to solve the time evolution of the prognostic variables End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.2. Split Operator Advection Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for aerosol advection (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.3. Split Operator Physical Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for aerosol physics (in seconds). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.4. Integrated Timestep Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Timestep for the aerosol model (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Implicit" # "Semi-implicit" # "Semi-analytic" # "Impact solver" # "Back Euler" # "Newton Raphson" # "Rosenbrock" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3.5. Integrated Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Specify the type of timestep scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Meteorological Forcings ** 4.1. Variables 3D Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Three dimensionsal forcing variables, e.g. U, V, W, T, Q, P, conventive mass flux End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Variables 2D Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Two dimensionsal forcing variables, e.g. land-sea mask definition End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.3. Frequency Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Frequency with which meteological forcings are applied (in seconds). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Resolution Resolution in the aersosol model grid 5.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.2. Canonical Horizontal Resolution Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 5.3. Number Of Horizontal Gridpoints Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Total number of horizontal (XY) points (or degrees of freedom) on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 5.4. Number Of Vertical Levels Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Number of vertical levels resolved on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.5. Is Adaptive Grid Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Default is False. Set true if grid resolution changes during execution. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Tuning Applied Tuning methodology for aerosol model 6.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Global Mean Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List set of metrics of the global mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.3. Regional Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of regional metrics of mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.4. Trend Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List observed trend metrics used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Transport Aerosol transport 7.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of transport in atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Specific transport scheme (eulerian)" # "Specific transport scheme (semi-lagrangian)" # "Specific transport scheme (eulerian and semi-lagrangian)" # "Specific transport scheme (lagrangian)" # TODO - please enter value(s) """ Explanation: 7.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method for aerosol transport modeling End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Mass adjustment" # "Concentrations positivity" # "Gradients monotonicity" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 7.3. Mass Conservation Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Method used to ensure mass conservation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.convention') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Convective fluxes connected to tracers" # "Vertical velocities connected to tracers" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 7.4. Convention Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Transport by convention End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Emissions Atmospheric aerosol emissions 8.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of emissions in atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Prescribed (climatology)" # "Prescribed CMIP6" # "Prescribed above surface" # "Interactive" # "Interactive above surface" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.2. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Method used to define aerosol species (several methods allowed because the different species may not use the same method). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.sources') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Vegetation" # "Volcanos" # "Bare ground" # "Sea surface" # "Lightning" # "Fires" # "Aircraft" # "Anthropogenic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.3. Sources Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Sources of the aerosol species are taken into account in the emissions scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Interannual" # "Annual" # "Monthly" # "Daily" # TODO - please enter value(s) """ Explanation: 8.4. Prescribed Climatology Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify the climatology type for aerosol emissions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.5. Prescribed Climatology Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and prescribed via a climatology End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.6. Prescribed Spatially Uniform Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and prescribed as spatially uniform End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.7. Interactive Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and specified via an interactive method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.other_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.8. Other Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and specified via an &quot;other method&quot; End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.9. Other Method Characteristics Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Characteristics of the &quot;other method&quot; used for aerosol emissions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Concentrations Atmospheric aerosol concentrations 9.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of concentrations in atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.2. Prescribed Lower Boundary Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed at the lower boundary. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.3. Prescribed Upper Boundary Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed at the upper boundary. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.4. Prescribed Fields Mmr Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed as mass mixing ratios. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.5. Prescribed Fields Mmr Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed as AOD plus CCNs. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10. Optical Radiative Properties Aerosol optical and radiative properties 10.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of optical and radiative properties End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11. Optical Radiative Properties --&gt; Absorption Absortion properties in aerosol scheme 11.1. Black Carbon Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Absorption mass coefficient of black carbon at 550nm (if non-absorbing enter 0) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11.2. Dust Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Absorption mass coefficient of dust at 550nm (if non-absorbing enter 0) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11.3. Organics Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Absorption mass coefficient of organics at 550nm (if non-absorbing enter 0) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 12. Optical Radiative Properties --&gt; Mixtures ** 12.1. External Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there external mixing with respect to chemical composition? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 12.2. Internal Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there internal mixing with respect to chemical composition? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.3. Mixing Rule Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If there is internal mixing with respect to chemical composition then indicate the mixinrg rule End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13. Optical Radiative Properties --&gt; Impact Of H2o ** 13.1. Size Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does H2O impact size? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13.2. Internal Mixture Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does H2O impact internal mixture? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14. Optical Radiative Properties --&gt; Radiative Scheme Radiative scheme for aerosol 14.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of radiative scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.2. Shortwave Bands Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of shortwave bands End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.3. Longwave Bands Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of longwave bands End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15. Optical Radiative Properties --&gt; Cloud Interactions Aerosol-cloud interactions 15.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of aerosol-cloud interactions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.2. Twomey Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the Twomey effect included? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.3. Twomey Minimum Ccn Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If the Twomey effect is included, then what is the minimum CCN number? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.4. Drizzle Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the scheme affect drizzle? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.5. Cloud Lifetime Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the scheme affect cloud lifetime? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.6. Longwave Bands Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of longwave bands End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16. Model Aerosol model 16.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Dry deposition" # "Sedimentation" # "Wet deposition (impaction scavenging)" # "Wet deposition (nucleation scavenging)" # "Coagulation" # "Oxidation (gas phase)" # "Oxidation (in cloud)" # "Condensation" # "Ageing" # "Advection (horizontal)" # "Advection (vertical)" # "Heterogeneous chemistry" # "Nucleation" # TODO - please enter value(s) """ Explanation: 16.2. Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Processes included in the Aerosol model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.coupling') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Radiation" # "Land surface" # "Heterogeneous chemistry" # "Clouds" # "Ocean" # "Cryosphere" # "Gas phase chemistry" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.3. Coupling Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Other model components coupled to the Aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.gas_phase_precursors') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "DMS" # "SO2" # "Ammonia" # "Iodine" # "Terpene" # "Isoprene" # "VOC" # "NOx" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.4. Gas Phase Precursors Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of gas phase aerosol precursors. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.scheme_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Bulk" # "Modal" # "Bin" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.5. Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Type(s) of aerosol scheme used by the aerosols model (potentially multiple: some species may be covered by one type of aerosol scheme and other species covered by another type). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.bulk_scheme_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sulphate" # "Nitrate" # "Sea salt" # "Dust" # "Ice" # "Organic" # "Black carbon / soot" # "SOA (secondary organic aerosols)" # "POM (particulate organic matter)" # "Polar stratospheric ice" # "NAT (Nitric acid trihydrate)" # "NAD (Nitric acid dihydrate)" # "STS (supercooled ternary solution aerosol particule)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.6. Bulk Scheme Species Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of species covered by the bulk scheme. End of explanation """
nicoa/showcase
pydatabln_2018_schedule2cal/pydatabln2018_filter_and_overview.ipynb
mit
import requests as rq import pandas as pd import matplotlib.pyplot as mpl import bs4 import os from tqdm import tqdm_notebook from datetime import time %matplotlib inline """ Explanation: Table of Contents <p><div class="lev1 toc-item"><a href="#Query-Data" data-toc-modified-id="Query-Data-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Query Data</a></div><div class="lev1 toc-item"><a href="#visualize-some-stuff" data-toc-modified-id="visualize-some-stuff-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>visualize some stuff</a></div> End of explanation """ base_url = "https://pydata.org" r = rq.get(base_url + "/berlin2018/schedule/") bs = bs4.BeautifulSoup(r.text, "html.parser") """ Explanation: Query Data Grab schedule page: End of explanation """ data = {} for ahref in tqdm_notebook(bs.find_all("a")): if 'schedule/presentation' in ahref.get("href"): url = ahref.get("href") else: continue data[url] = {} resp = bs4.BeautifulSoup(rq.get(base_url + url).text, "html.parser") title = resp.find("h2").text resp = resp.find_all(attrs={'class':"container"})[1] when, who = resp.find_all("h4") date_info = when.string.split("\n")[1:] day_info = date_info[0].strip() time_inf = date_info[1].strip() room_inf = date_info[3].strip()[3:] speaker = who.find("a").text level = resp.find("dd").text abstract = resp.find(attrs={'class':'abstract'}).text description = resp.find(attrs={'class':'description'}).text data[url] = { 'day_info': day_info, 'title': title, 'time_inf': time_inf, 'room_inf': room_inf, 'speaker': speaker, 'level': level, 'abstract': abstract, 'description': description } """ Explanation: Let's query every talk description: End of explanation """ df = pd.DataFrame.from_dict(data, orient='index') df.reset_index(drop=True, inplace=True) # Tutorials on Friday df.loc[df.day_info=='Friday', 'tutorial'] = True df['tutorial'].fillna(False, inplace=True) # time handling df['time_from'], df['time_to'] = zip(*df.time_inf.str.split(u'\u2013')) df.time_from = pd.to_datetime(df.time_from).dt.time df.time_to = pd.to_datetime(df.time_to).dt.time del df['time_inf'] df.to_json('./data.json') df.head(3) # Example: Let's query all non-novice talks on sunday, starting at 4 pm tmp = df.query("(level!='Novice') & (day_info=='Sunday')") tmp[tmp.time_from >= time(16)] """ Explanation: Okay, make a dataframe and add some helpful columns: End of explanation """ plt.style.use('seaborn-darkgrid')#'seaborn-darkgrid') plt.rcParams['savefig.dpi'] = 200 plt.rcParams['figure.dpi'] = 120 plt.rcParams['figure.autolayout'] = False plt.rcParams['figure.figsize'] = 10, 5 plt.rcParams['axes.labelsize'] = 17 plt.rcParams['axes.titlesize'] = 20 plt.rcParams['font.size'] = 16 plt.rcParams['lines.linewidth'] = 2.0 plt.rcParams['lines.markersize'] = 8 plt.rcParams['legend.fontsize'] = 11 plt.rcParams['font.family'] = "serif" plt.rcParams['font.serif'] = "cm" plt.rcParams['text.latex.preamble'] = "\\usepackage{subdepth}, \\usepackage{type1cm}" plt.rcParams['text.usetex'] = True ax = df.level.value_counts().plot.bar(rot=0) ax.set_ylabel("number of talks") ax.set_title("levels of the talks where:") plt.show() ax = df.rename(columns={'day_info': 'dayinfo'}).groupby("dayinfo")['level'].value_counts(normalize=True).round(2).unstack(level=0).plot.bar(rot=0) ax.set_xlabel('') ax.set_title('So the last day is more kind of "fade-out"?') plt.show() ax = df.groupby("tutorial")['level'].value_counts(normalize=True).round(2).unstack(level=0).T.plot.bar(rot=0) ax.set_title('the percentage of experienced slots is higher for tutorials!\n\\small{So come on fridays for experienced level ;-)}') plt.show() """ Explanation: visualize some stuff End of explanation """
kevinsung/OpenFermion
docs/fqe/guide/introduction.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2020 The OpenFermion Developers End of explanation """ try: import fqe except ImportError: !pip install fqe --quiet import fqe import numpy as np """ Explanation: Introduction to FQE <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://quantumai.google/openfermion/fqe/guide/introduction"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/quantumlib/OpenFermion/blob/master/docs/fqe/guide/introduction.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/quantumlib/OpenFermion/blob/master/docs/fqe/guide/introduction.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/OpenFermion/docs/fqe/guide/introduction.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a> </td> </table> OpenFermion-FQE is an emulator for quantum computing specialized for simulations of Fermionic many-body problems, where FQE stands for 'Fermionic quantum emulator.' By focusing on Fermionic physics, OpenFermion-FQE realizes far more efficient emulation of quantum computing than generic quantum emulators such as Cirq, both in computation and memory costs; the speed-up and improved memory footprint originate from the use of the spin and number symmetries as well as highly optimized special algorithms. The examples of the problems that can be simulated by OpenFermion-FQE include those in molecular electronic structure, condensed matter physics, and nuclear physics. The initial version of OpenFermion-FQE has been developed in collaboration between QSimulate and Google Quantum AI. The source code is found in the GitHub repository (https://github.com/quantumlib/OpenFermion-FQE). This tutorial will describe the data structures and conventions of the library. End of explanation """ wfn = fqe.Wavefunction([[4, 4, 4], [4, 2, 4], [4, 0, 4], [4, -2, 4], [4, -4, 4]]) """ Explanation: The FQE Wavefunction The Wavefunction is an interface to the objects that hold the actual wavefunction data. As mentioned, the wavefunction is partitioned into sectors with fixed particle and $Sz$ quantum number. This partitioning information is the necessary information for initializing a Wavefunction object. As an example, we consider initializing a wavefunction with four spatial orbitals, four electrons, and different $Sz$ expectation values. The Wavefunction object takes a list of these sectors [[n_electrons, sz, n_orbits]]. End of explanation """ wfn_fqe = fqe.Wavefunction([[2, -2, 4]], broken=None) """ Explanation: This command initializes a wavefunction with the following block structure: Each sector corresponds to a set of bit strings $$ \vert I \rangle = \vert I_{\alpha}I_{\beta}\rangle $$ that encode a fixed particle number and fixed $Sz$ expectation. The coefficients associated with the bitstrings in these sectors are formed into matrices. This helps with efficient vectorized computations. The row-index of the array corresponds to the $\alpha$ spin-orbital number occupation index and the column-index corresponds to the $\beta$-strings. The Wavefunction object provides tools to access sectors or perform basic mathematical operations on this vector. Methods to initialize wavefunctions FQE wavefunctions can be initialized by calling the constructor directly. End of explanation """ wfn_fqe.print_wfn() """ Explanation: When wavefunctions are first created, they are initialized to empty values. We can see this by printing out the wavefunction. End of explanation """ wfn_fqe.set_wfn(strategy="hartree-fock") wfn_fqe.print_wfn() """ Explanation: To set the values of a wavefunction, we can use the set_wfn method with a provided strategy. End of explanation """ interesting_states = wfn_fqe.get_coeff((2, -2)) print(interesting_states) """ Explanation: Users can access the wavefunction through the get_sector method. This returns the entire matrix of data representing the specified sector of the wavefunction. For example, we can grab the sector corresponding to $n = 2$ and $sz = -2$ by doing the following. End of explanation """ wfn_fqe = fqe.get_wavefunction(4, -2, 10) """ Explanation: Other than the Wavefunction constructor, several utility methods are available to initialize wavefunctions. The function fqe.get_wavefunction builds a wavefunction with definite particle number and spin. End of explanation """ wfn_fqe1, wfn_fqe2 = fqe.get_wavefunction_multiple([[4, 0, 10], [5, -5, 10]]) """ Explanation: The function fqe.get_wavefunction_multiple constructs multiple wavefunctions with different particle number, spin, and orbital number. End of explanation """ # Get a spin conserving wavefunction. spin_conserving_wfn = fqe.get_spin_conserving_wavefunction(2, 4) # Get a number conserving wavefunction. number_conserving_wfn = fqe.get_number_conserving_wavefunction(2, 4) """ Explanation: There are also functions like fqe.get_number_conserving_wavefunction and fqe.get_spin_conserving_wavefunction to get number or spin conserving wavefunctions, respectively. End of explanation """ nqubits = 4 cirq_wfn = np.random.rand(2**nqubits) + 1.0j * np.random.rand(2**nqubits) cirq_wfn /= np.linalg.norm(cirq_wfn) print("Cirq wavefunction:") print(*cirq_wfn, sep="\n") """ Explanation: Conversions between FQE and Cirq wavefunction representations Wavefunctions on $n$ qubits in Cirq are represented by Numpy arrays with $2^n$ amplitudes. End of explanation """ fqe_wfn = fqe.from_cirq(cirq_wfn, thresh=0.0001) fqe_wfn.print_wfn() """ Explanation: To convert from this representation to the FQE representation, the function fqe.from_cirq can be used. End of explanation """ cirq_wfn_from_fqe = fqe.to_cirq(fqe_wfn) print("Cirq wavefunction from FQE:") print(*cirq_wfn_from_fqe, sep="\n") assert np.allclose(cirq_wfn_from_fqe, cirq_wfn) """ Explanation: Note: The thresh argument is the value below which amplitudes are considered zero. We can convert back to the Cirq representation using fqe._to_cirq. End of explanation """ print('String formatting') fqe_wfn.print_wfn(fmt='str') print('Occupation formatting') fqe_wfn.print_wfn(fmt='occ') """ Explanation: An important thing to note in these conversions is the ordering of the $\alpha$ and $\beta$ strings in the converted wavefunctions. The FQE uses the OpenFermion convention of interleaved $\alpha$ and $\beta$ orbitals. Thus when converting to Cirq we first convert each bitstring into an OpenFermion operator and then call normal ordering. Printing and saving wavefunctions Printing is currently available as alpha beta strings followed by the coefficient as well as orbital occupation representation. End of explanation """ from openfermion import FermionOperator, hermitian_conjugated ops = FermionOperator('2^ 0', 1.2) new_wfn = fqe_wfn.apply(ops + hermitian_conjugated(ops)) """ Explanation: Wavefunctions can also be saved to disk using the save method which takes a filename and optional path. Action on Wavefunctions: Fermionic algebra operations and their unitaries on the state FermionOperators can be directly passed in to create a new wavefunction based on application of the operators. The FermionOperators are parsed according to the interleaved $\alpha$ $\beta$ indexing of the spin-orbitals. This means that odd index FermionOperators correspond to $\beta$-spin orbitals and even are $\alpha$-spin orbitals. Sharp Edge: The user must be careful to not break the symmetry of the wavefunction. If a request to apply an operator to a state takes the wavefunction outside of the specified symmetry sector the FQE will not execute the command. Effectively, the FQE requires the user to have more knowledge of what type of operations their Wavefunction object can support. End of explanation """ i, j, theta = 0, 1, np.pi / 3 op = (FermionOperator(((2 * i, 1), (2 * j, 0)), coefficient=-1j * theta) + FermionOperator(((2 * j, 1), (2 * i, 0)), coefficient=1j * theta)) new_wfn = fqe_wfn.time_evolve(1.0, op) new_wfn.print_wfn() """ Explanation: Unitary operations Any simulator backend must be able to perform unitary evolution on a state. The FQE accomplishes this by implementing code for evolving a state via the action of a unitary generated by fermionic generators. Given a fermion operator $g$, the unitary $$ e^{-i (g + g^{\dagger})} $$ can be applied to the state. It can be shown that this evolution operator can be rewritten as $$ e^{-i(g + g^{\dagger})\epsilon } = \cos\left(\epsilon\right) \mathbb{I}{s}(gg^{\dagger}) + \cos\left(\epsilon\right) \mathbb{I}{s}(g^{\dagger}g) - i\sin\left(\epsilon\right) \left(g + g^{\dagger}\right) \left[\mathbb{I}{s}(gg^{\dagger}) + \mathbb{I}{s}(g^{\dagger}g)\right] + \mathbb{I}_{!s} $$ The $\mathbb{I}{!s}$ is for setting the coefficients of the unitary that are not in the subspace $\mathcal{H}{s} \subset \mathcal{H}$ where $gg^{\dagger}$ is 0. The user can specify a fermionic monomial in OpenFermion and use the time_evolve method of the Wavefunction object to call the evolution. All the rules for preserving symmetries must be maintained as before. End of explanation """
mne-tools/mne-tools.github.io
0.13/_downloads/plot_object_evoked.ipynb
bsd-3-clause
import os.path as op import mne """ Explanation: The :class:Evoked &lt;mne.Evoked&gt; data structure: evoked/averaged data End of explanation """ data_path = mne.datasets.sample.data_path() fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif') evokeds = mne.read_evokeds(fname, baseline=(None, 0), proj=True) print(evokeds) """ Explanation: The :class:Evoked &lt;mne.Evoked&gt; data structure is mainly used for storing averaged data over trials. In MNE the evoked objects are created by averaging epochs data with :func:mne.Epochs.average. Here we read the evoked dataset from a file. End of explanation """ evoked = mne.read_evokeds(fname, condition='Left Auditory', baseline=(None, 0), proj=True) print(evoked) """ Explanation: Notice that the reader function returned a list of evoked instances. This is because you can store multiple categories into a single file. Here we have categories of ['Left Auditory', 'Right Auditory', 'Left Visual', 'Right Visual']. We can also use condition parameter to read in only one category. End of explanation """ print(evoked.info) print(evoked.times) """ Explanation: If you're gone through the tutorials of raw and epochs datasets, you're probably already familiar with the :class:Info &lt;mne.Info&gt; attribute. There is nothing new or special with the evoked.info. All the relevant info is still there. End of explanation """ print(evoked.nave) # Number of averaged epochs. print(evoked.first) # First time sample. print(evoked.last) # Last time sample. print(evoked.comment) # Comment on dataset. Usually the condition. print(evoked.kind) # Type of data, either average or standard_error. """ Explanation: The evoked data structure also contains some new attributes easily accessible: End of explanation """ data = evoked.data print(data.shape) """ Explanation: The data is also easily accessible. Since the evoked data arrays are usually much smaller than raw or epochs datasets, they are preloaded into the memory when the evoked object is constructed. You can access the data as a numpy array. End of explanation """ print('Data from channel {0}:'.format(evoked.ch_names[10])) print(data[10]) """ Explanation: The data is arranged in an array of shape (n_channels, n_times). Notice that unlike epochs, evoked object does not support indexing. This means that to access the data of a specific channel you must use the data array directly. End of explanation """ evoked = mne.EvokedArray(data, evoked.info, tmin=evoked.times[0]) evoked.plot() """ Explanation: If you want to import evoked data from some other system and you have it in a numpy array you can use :class:mne.EvokedArray for that. All you need is the data and some info about the evoked data. For more information, see tut_creating_data_structures. End of explanation """
pwer21c/pwer21c.github.io
python/pythoncodes/20022021.ipynb
mit
i=1 while i<100: if i%7==0: print(i) i=i+1 """ Explanation: 1에서 100까지의 수중에서 7의 배수 multiples de 7, multiples of 7를 출력할때 입니다. End of explanation """ i=1 multiplesof7=[] while i<100: if i%7==0: multiplesof7.append(i) i=i+1 print(multiplesof7) """ Explanation: 그런데 7의 배수를 list 변수에 저장해서 출력하기 위해서는 아래와 같이 합니다. End of explanation """ i=1 group7=[] group5=[] while i<100: if i%7==0: group7.append(i) elif i%5==0: group5.append(i) i=i+1 print(group7) print(group5) """ Explanation: 7과 5의 배수를 저장하기 위해서는 다음과 같아요 End of explanation """ i=1 group7=[] group5=[] while i<100: if i%7==0: group7.append(i) if i%5==0: group5.append(i) i=i+1 print(group7) print(group5) """ Explanation: 숙제. 그러면 3과 6과 9의 배수를 구하기 위해서는 어떻게 해야 할까요? 참고로 위에서 5의 배수에 보년 35와 70이 빠져 있어요 그래서 위의 코드는 오히려 이렇게 되어야 해요. 보세요 35와 70이 들어있어요. 왜일까요 ? 이게 숙제에 대한 힌트입니다. End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/ncc/cmip6/models/noresm2-mh/seaice.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'ncc', 'noresm2-mh', 'seaice') """ Explanation: ES-DOC CMIP6 Model Properties - Seaice MIP Era: CMIP6 Institute: NCC Source ID: NORESM2-MH Topic: Seaice Sub-Topics: Dynamics, Thermodynamics, Radiative Processes. Properties: 80 (63 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:54:24 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.model.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties --&gt; Model 2. Key Properties --&gt; Variables 3. Key Properties --&gt; Seawater Properties 4. Key Properties --&gt; Resolution 5. Key Properties --&gt; Tuning Applied 6. Key Properties --&gt; Key Parameter Values 7. Key Properties --&gt; Assumptions 8. Key Properties --&gt; Conservation 9. Grid --&gt; Discretisation --&gt; Horizontal 10. Grid --&gt; Discretisation --&gt; Vertical 11. Grid --&gt; Seaice Categories 12. Grid --&gt; Snow On Seaice 13. Dynamics 14. Thermodynamics --&gt; Energy 15. Thermodynamics --&gt; Mass 16. Thermodynamics --&gt; Salt 17. Thermodynamics --&gt; Salt --&gt; Mass Transport 18. Thermodynamics --&gt; Salt --&gt; Thermodynamics 19. Thermodynamics --&gt; Ice Thickness Distribution 20. Thermodynamics --&gt; Ice Floe Size Distribution 21. Thermodynamics --&gt; Melt Ponds 22. Thermodynamics --&gt; Snow Processes 23. Radiative Processes 1. Key Properties --&gt; Model Name of seaice model used. 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of sea ice model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.model.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.variables.prognostic') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sea ice temperature" # "Sea ice concentration" # "Sea ice thickness" # "Sea ice volume per grid cell area" # "Sea ice u-velocity" # "Sea ice v-velocity" # "Sea ice enthalpy" # "Internal ice stress" # "Salinity" # "Snow temperature" # "Snow depth" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Variables List of prognostic variable in the sea ice model. 2.1. Prognostic Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of prognostic variables in the sea ice component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "TEOS-10" # "Constant" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Seawater Properties Properties of seawater relevant to sea ice 3.1. Ocean Freezing Point Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.2. Ocean Freezing Point Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If using a constant seawater freezing point, specify this value. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Resolution Resolution of the sea ice grid 4.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Canonical Horizontal Resolution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.3. Number Of Horizontal Gridpoints Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Total number of horizontal (XY) points (or degrees of freedom) on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Tuning Applied Tuning applied to sea ice model component 5.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.2. Target Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.3. Simulations Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 *Which simulations had tuning applied, e.g. all, not historical, only pi-control? * End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.4. Metrics Used Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 List any observed metrics used in tuning model/parameters End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.5. Variables Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Which variables were changed during the tuning process? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Ice strength (P*) in units of N m{-2}" # "Snow conductivity (ks) in units of W m{-1} K{-1} " # "Minimum thickness of ice created in leads (h0) in units of m" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Key Parameter Values Values of key parameters 6.1. Typical Parameters Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N What values were specificed for the following parameters if used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Additional Parameters Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N If you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.assumptions.description') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Key Properties --&gt; Assumptions Assumptions made in the sea ice model 7.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General overview description of any key assumptions made in this model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. On Diagnostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Note any assumptions that specifically affect the CMIP6 diagnostic sea ice variables. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.3. Missing Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Key Properties --&gt; Conservation Conservation in the sea ice component 8.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Provide a general description of conservation methodology. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.properties') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Energy" # "Mass" # "Salt" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.2. Properties Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Properties conserved in sea ice by the numerical schemes. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.budget') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.3. Budget Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 For each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3 End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 8.4. Was Flux Correction Used Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does conservation involved flux correction? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.5. Corrected Conserved Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 List any variables which are conserved by more than the numerical scheme alone. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Ocean grid" # "Atmosphere Grid" # "Own Grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9. Grid --&gt; Discretisation --&gt; Horizontal Sea ice discretisation in the horizontal 9.1. Grid Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Grid on which sea ice is horizontal discretised? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Structured grid" # "Unstructured grid" # "Adaptive grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9.2. Grid Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the type of sea ice grid? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Finite differences" # "Finite elements" # "Finite volumes" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9.3. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the advection scheme? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 9.4. Thermodynamics Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the time step in the sea ice model thermodynamic component in seconds. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 9.5. Dynamics Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the time step in the sea ice model dynamic component in seconds. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.6. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify any additional horizontal discretisation details. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Zero-layer" # "Two-layers" # "Multi-layers" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10. Grid --&gt; Discretisation --&gt; Vertical Sea ice vertical properties 10.1. Layering Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N What type of sea ice vertical layers are implemented for purposes of thermodynamic calculations? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 10.2. Number Of Layers Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 If using multi-layers specify how many. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.3. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify any additional vertical grid details. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 11. Grid --&gt; Seaice Categories What method is used to represent sea ice categories ? 11.1. Has Mulitple Categories Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Set to true if the sea ice model has multiple sea ice categories. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11.2. Number Of Categories Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 If using sea ice categories specify how many. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.3. Category Limits Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 If using sea ice categories specify each of the category limits. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.4. Ice Thickness Distribution Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the sea ice thickness distribution scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.other') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.5. Other Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 12. Grid --&gt; Snow On Seaice Snow on sea ice details 12.1. Has Snow On Ice Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is snow on ice represented in this model? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 12.2. Number Of Snow Levels Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of vertical levels of snow on ice? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.3. Snow Fraction Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how the snow fraction on sea ice is determined End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.4. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify any additional details related to snow on ice. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.horizontal_transport') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Incremental Re-mapping" # "Prather" # "Eulerian" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Dynamics Sea Ice Dynamics 13.1. Horizontal Transport Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the method of horizontal advection of sea ice? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Incremental Re-mapping" # "Prather" # "Eulerian" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.2. Transport In Thickness Space Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the method of sea ice transport in thickness space (i.e. in thickness categories)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Hibler 1979" # "Rothrock 1975" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.3. Ice Strength Formulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Which method of sea ice strength formulation is used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.redistribution') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Rafting" # "Ridging" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.4. Redistribution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Which processes can redistribute sea ice (including thickness)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.rheology') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Free-drift" # "Mohr-Coloumb" # "Visco-plastic" # "Elastic-visco-plastic" # "Elastic-anisotropic-plastic" # "Granular" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.5. Rheology Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Rheology, what is the ice deformation formulation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Pure ice latent heat (Semtner 0-layer)" # "Pure ice latent and sensible heat" # "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)" # "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14. Thermodynamics --&gt; Energy Processes related to energy in sea ice thermodynamics 14.1. Enthalpy Formulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the energy formulation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Pure ice" # "Saline ice" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.2. Thermal Conductivity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What type of thermal conductivity is used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Conduction fluxes" # "Conduction and radiation heat fluxes" # "Conduction, radiation and latent heat transport" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.3. Heat Diffusion Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the method of heat diffusion? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Heat Reservoir" # "Thermal Fixed Salinity" # "Thermal Varying Salinity" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.4. Basal Heat Flux Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method by which basal ocean heat flux is handled? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.5. Fixed Salinity Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14.6. Heat Content Of Precipitation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method by which the heat content of precipitation is handled. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14.7. Precipitation Effects On Salinity Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15. Thermodynamics --&gt; Mass Processes related to mass in sea ice thermodynamics 15.1. New Ice Formation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method by which new sea ice is formed in open water. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.2. Ice Vertical Growth And Melt Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method that governs the vertical growth and melt of sea ice. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Floe-size dependent (Bitz et al 2001)" # "Virtual thin ice melting (for single-category)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15.3. Ice Lateral Melting Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the method of sea ice lateral melting? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.4. Ice Surface Sublimation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method that governs sea ice surface sublimation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.5. Frazil Ice Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method of frazil ice formation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 16. Thermodynamics --&gt; Salt Processes related to salt in sea ice thermodynamics. 16.1. Has Multiple Sea Ice Salinities Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 16.2. Sea Ice Salinity Thermal Impacts Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does sea ice salinity impact the thermal properties of sea ice? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Prescribed salinity profile" # "Prognostic salinity profile" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17. Thermodynamics --&gt; Salt --&gt; Mass Transport Mass transport of salt 17.1. Salinity Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is salinity determined in the mass transport of salt calculation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 17.2. Constant Salinity Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If using a constant salinity value specify this value in PSU? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.3. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the salinity profile used. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Prescribed salinity profile" # "Prognostic salinity profile" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18. Thermodynamics --&gt; Salt --&gt; Thermodynamics Salt thermodynamics 18.1. Salinity Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is salinity determined in the thermodynamic calculation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 18.2. Constant Salinity Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If using a constant salinity value specify this value in PSU? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 18.3. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the salinity profile used. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Virtual (enhancement of thermal conductivity, thin ice melting)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 19. Thermodynamics --&gt; Ice Thickness Distribution Ice thickness distribution details. 19.1. Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is the sea ice thickness distribution represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Parameterised" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20. Thermodynamics --&gt; Ice Floe Size Distribution Ice floe-size distribution details. 20.1. Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is the sea ice floe-size represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 20.2. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Please provide further details on any parameterisation of floe-size. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 21. Thermodynamics --&gt; Melt Ponds Characteristics of melt ponds. 21.1. Are Included Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are melt ponds included in the sea ice model? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Flocco and Feltham (2010)" # "Level-ice melt ponds" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 21.2. Formulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What method of melt pond formulation is used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Albedo" # "Freshwater" # "Heat" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 21.3. Impacts Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N What do melt ponds have an impact on? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging') # PROPERTY VALUE(S): # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 22. Thermodynamics --&gt; Snow Processes Thermodynamic processes in snow on sea ice 22.1. Has Snow Aging Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Set to True if the sea ice model has a snow aging scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.2. Snow Aging Scheme Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the snow aging scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 22.3. Has Snow Ice Formation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Set to True if the sea ice model has snow ice formation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.4. Snow Ice Formation Scheme Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the snow ice formation scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.5. Redistribution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the impact of ridging on snow cover? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Single-layered heat diffusion" # "Multi-layered heat diffusion" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22.6. Heat Diffusion Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the heat diffusion through snow methodology in sea ice thermodynamics? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Delta-Eddington" # "Parameterized" # "Multi-band albedo" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23. Radiative Processes Sea Ice Radiative Processes 23.1. Surface Albedo Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method used to handle surface albedo. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Delta-Eddington" # "Exponential attenuation" # "Ice radiation transmission per category" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23.2. Ice Radiation Transmission Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Method by which solar radiation through sea ice is handled. End of explanation """
benvanwerkhoven/kernel_tuner
tutorial/diffusion_opencl.ipynb
apache-2.0
nx = 1024 ny = 1024 """ Explanation: Tutorial: From physics to tuned GPU kernels This tutorial is designed to show you the whole process starting from modeling a physical process to a Python implementation to creating optimized and auto-tuned GPU application using Kernel Tuner. In this tutorial, we will use diffusion as an example application. We start with modeling the physical process of diffusion, for which we create a simple numerical implementation in Python. Then we create an OpenCL kernel that performs the same computation, but on the GPU. Once we have a OpenCL kernel, we start using the Kernel Tuner for auto-tuning our GPU application. And finally, we'll introduce a few code optimizations to our OpenCL kernel that will improve performance, but also add more parameters to tune on using the Kernel Tuner. <div class="alert alert-info"> **Note:** If you are reading this tutorial on the Kernel Tuner's documentation pages, note that you can actually run this tutorial as a Jupyter Notebook. Just clone the Kernel Tuner's [GitHub repository](http://github.com/benvanwerkhoven/kernel_tuner). Install using *pip install .[tutorial,opencl]* and you're ready to go! You can start the tutorial by typing "jupyter notebook" in the "kernel_tuner/tutorial" directory. </div> Diffusion Put simply, diffusion is the redistribution of something from a region of high concentration to a region of low concentration without bulk motion. The concept of diffusion is widely used in many fields, including physics, chemistry, biology, and many more. Suppose that we take a metal sheet, in which the temperature is exactly equal to one degree everywhere in the sheet. Now if we were to heat a number of points on the sheet to a very high temperature, say a thousand degrees, in an instant by some method. We could see the heat diffuse from these hotspots to the cooler areas. We are assuming that the metal does not melt. In addition, we will ignore any heat loss from radiation or other causes in this example. We can use the diffusion equation to model how the heat diffuses through our metal sheet: \begin{equation} \frac{\partial u}{\partial t}= D \left( \frac{\partial^2 u}{\partial x^2} + \frac{\partial^2 u}{\partial y^2} \right) \end{equation} Where $x$ and $y$ represent the spatial descretization of our 2D domain, $u$ is the quantity that is being diffused, $t$ is the descretization in time, and the constant $D$ determines how fast the diffusion takes place. In this example, we will assume a very simple descretization of our problem. We assume that our 2D domain has $nx$ equi-distant grid points in the x-direction and $ny$ equi-distant grid points in the y-direction. Be sure to execute every cell as you read through this document, by selecting it and pressing shift+enter. End of explanation """ def diffuse(field, dt=0.225): field[1:nx-1,1:ny-1] = field[1:nx-1,1:ny-1] + dt * ( field[1:nx-1,2:ny]+field[2:nx,1:ny-1]-4*field[1:nx-1,1:ny-1]+ field[0:nx-2,1:ny-1]+field[1:nx-1,0:ny-2] ) return field """ Explanation: This results in a constant distance of $\delta x$ between all grid points in the $x$ dimension. Using central differences, we can numerically approximate the derivative for a given point $x_i$: \begin{equation} \left. \frac{\partial^2 u}{\partial x^2} \right|{x{i}} \approx \frac{u_{x_{i+1}}-2u_{{x_i}}+u_{x_{i-1}}}{(\delta x)^2} \end{equation} We do the same for the partial derivative in $y$: \begin{equation} \left. \frac{\partial^2 u}{\partial y^2} \right|{y{i}} \approx \frac{u_{y_{i+1}}-2u_{y_{i}}+u_{y_{i-1}}}{(\delta y)^2} \end{equation} If we combine the above equations, we can obtain a numerical estimation for the temperature field of our metal sheet in the next time step, using $\delta t$ as the time between time steps. But before we do, we also simplify the expression a little bit, because we'll assume that $\delta x$ and $\delta y$ are always equal to 1. \begin{equation} u'{x,y} = u{x,y} + \delta t \times \left( \left( u_{x_{i+1},y}-2u_{{x_i},y}+u_{x_{i-1},y} \right) + \left( u_{x,y_{i+1}}-2u_{x,y_{i}}+u_{x,y_{i-1}} \right) \right) \end{equation} In this formula $u'_{x,y}$ refers to the temperature field at the time $t + \delta t$. As a final step, we further simplify this equation to: \begin{equation} u'{x,y} = u{x,y} + \delta t \times \left( u_{x,y_{i+1}}+u_{x_{i+1},y}-4u_{{x_i},y}+u_{x_{i-1},y}+u_{x,y_{i-1}} \right) \end{equation} Python implementation We can create a Python function that implements the numerical approximation defined in the above equation. For simplicity we'll use the assumption of a free boundary condition. End of explanation """ #do the imports we need import numpy from matplotlib import pyplot %matplotlib inline #setup initial conditions def get_initial_conditions(nx, ny): field = numpy.ones((ny, nx)).astype(numpy.float32) field[numpy.random.randint(0,nx,size=10), numpy.random.randint(0,ny,size=10)] = 1e3 return field field = get_initial_conditions(nx, ny) #run the diffuse function a 1000 times and another 2000 times and make plots fig, (ax1, ax2) = pyplot.subplots(1,2) for i in range(1000): field = diffuse(field) ax1.imshow(field) for i in range(2000): field = diffuse(field) ax2.imshow(field) """ Explanation: To give our Python function a test run, we will now do some imports and generate the input data for the initial conditions of our metal sheet with a few very hot points. We'll also make two plots, one after a thousand time steps, and a second plot after another two thousand time steps. Do note that the plots are using different ranges for the colors. Also, executing the following cell may take a little while. End of explanation """ %%time #save the current field for later use field_copy = numpy.copy(field) #run another 1000 steps of the diffuse function and measure the time for i in range(1000): field = diffuse(field) pyplot.imshow(field) pyplot.show() """ Explanation: Now let's take a quick look at the execution time of our diffuse function. Before we do, we also copy the current state of the metal sheet to be able to restart the computation from this state. End of explanation """ def get_kernel_string(nx, ny): return """ #define nx %d #define ny %d #define dt 0.225f __kernel void diffuse_kernel(global float *u_new, global float *u) { unsigned x = get_group_id(0) * block_size_x + get_local_id(0); unsigned y = get_group_id(1) * block_size_y + get_local_id(1); if (x>0 && x<nx-1 && y>0 && y<ny-1) { u_new[y*nx+x] = u[y*nx+x] + dt * ( u[(y+1)*nx+x]+u[y*nx+x+1]-4.0f*u[y*nx+x]+u[y*nx+x-1]+u[(y-1)*nx+x]); } } """ % (nx, ny) kernel_string = get_kernel_string(nx, ny) """ Explanation: Computing on the GPU The next step in this tutorial is to implement a GPU kernel that will allow us to run our problem on the GPU. We store the kernel code in a Python string, because we can directly compile and run the kernel from Python. In this tutorial, we'll use the OpenCL programming model to implement our kernels. End of explanation """ import pyopencl as cl #initialize OpenCL and get compute capability needed for compilation ctx = cl.create_some_context() mf = cl.mem_flags #reserve host memory a_h = field_copy.astype(numpy.float32) #allocate GPU memory (and copy from host buffer) a_d = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=a_h) b_d = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=a_h) #setup thread block dimensions and compile the kernel threads = (16,16) kernel_src = "#define block_size_x %d\n#define block_size_y %d" % threads \ + get_kernel_string(nx, ny) prg = cl.Program(ctx, kernel_src).build() """ Explanation: The above OpenCL kernel parallelizes the work such that every grid point will be processed by a different OpenCL thread. Therefore, the kernel is executed by a 2D grid of threads, which are grouped together into 2D thread blocks. The specific thread block dimensions we choose are not important for the result of the computation in this kernel. But as we will see will later, they will have an impact on performance. In this kernel we are using two, currently undefined, compile-time constants for block_size_x and block_size_y, because we will auto tune these parameters later. It is often needed for performance to fix the thread block dimensions at compile time, because the compiler can unroll loops that iterate using the block size, or because you need to allocate shared memory using the thread block dimensions. The next bit of Python code initializes PyOpenCL, and makes preparations so that we can call the OpenCL kernel to do the computation on the GPU as we did earlier in Python. End of explanation """ queue = cl.CommandQueue(ctx) def run_gpu(): for i in range(500): prg.diffuse_kernel( queue, [nx, ny], threads, b_d, a_d) prg.diffuse_kernel( queue, [nx, ny], threads, a_d, b_d) %time run_gpu() #copy the result from the GPU to Python for plotting cl.enqueue_copy(queue, a_h, a_d) queue.finish() fig, (ax1, ax2) = pyplot.subplots(1,2) ax1.imshow(a_h) ax1.set_title("GPU Result") ax2.imshow(field) ax2.set_title("Python Result") pyplot.show() """ Explanation: The above code is a bit of boilerplate we need to compile a kernel using PyOpenCL. We've also, for the moment, fixed the thread block dimensions at 16 by 16. These dimensions serve as our initial guess for what a good performing pair of thread block dimensions could look like. Now that we've setup everything, let's see how long the computation would take using the GPU. End of explanation """ nx = 4096 ny = 4096 field = get_initial_conditions(nx, ny) kernel_string = get_kernel_string(nx, ny) """ Explanation: That should already be a lot faster than our previous Python implementation, but we can do much better if we optimize our GPU kernel. And that is exactly what the rest of this tutorial is about! Also, if you think the Python boilerplate code to call a GPU kernel was a bit messy, we've got good news for you! From now on, we'll only use the Kernel Tuner to compile and benchmark GPU kernels, which we can do with much cleaner Python code. Auto-Tuning with the Kernel Tuner Remember that previously we've set the thread block dimensions to 16 by 16. But how do we actually know if that is the best performing setting? That is where auto-tuning comes into play. Basically, it is very difficult to provide an answer through performance modeling and as such, we'd rather use the Kernel Tuner to compile and benchmark all possible kernel configurations. But before we continue, we'll increase the problem size, because the GPU is very likely underutilized. End of explanation """ from collections import OrderedDict tune_params = OrderedDict() tune_params["block_size_x"] = [16, 32, 48, 64, 128] tune_params["block_size_y"] = [2, 4, 8, 16, 32] """ Explanation: The above code block has generated new initial conditions and a new string that contains our OpenCL kernel using our new domain size. To call the Kernel Tuner, we have to specify the tunable parameters, in our case block_size_x and block_size_y. For this purpose, we'll create an ordered dictionary to store the tunable parameters. The keys will be the name of the tunable parameter, and the corresponding value is the list of possible values for the parameter. For the purpose of this tutorial, we'll use a small number of commonly used values for the thread block dimensions, but feel free to try more! End of explanation """ args = [field, field] """ Explanation: We also have to tell the Kernel Tuner about the argument list of our OpenCL kernel. Because the Kernel Tuner will be calling the OpenCL kernel and measure its execution time. For this purpose we create a list in Python, that corresponds with the argument list of the diffuse_kernel OpenCL function. This list will only be used as input to the kernel during tuning. The objects in the list should be Numpy arrays or scalars. Because you can specify the arguments as Numpy arrays, the Kernel Tuner will take care of allocating GPU memory and copying the data to the GPU. End of explanation """ problem_size = (nx, ny) """ Explanation: We're almost ready to call the Kernel Tuner, we just need to set how large the problem is we are currently working on by setting a problem_size. The Kernel Tuner knows about thread block dimensions, which it expects to be called block_size_x, block_size_y, and/or block_size_z. From these and the problem_size, the Kernel Tuner will compute the appropiate grid dimensions on the fly. End of explanation """ from kernel_tuner import tune_kernel result = tune_kernel("diffuse_kernel", kernel_string, problem_size, args, tune_params) """ Explanation: And that's everything the Kernel Tuner needs to know to be able to start tuning our kernel. Let's give it a try by executing the next code block! End of explanation """ kernel_string = """ #define nx %d #define ny %d #define dt 0.225f __kernel void diffuse_kernel(global float *u_new, global float *u) { int tx = get_local_id(0); int ty = get_local_id(1); int bx = get_group_id(0) * block_size_x; int by = get_group_id(1) * block_size_y; __local float sh_u[block_size_y+2][block_size_x+2]; #pragma unroll for (int i = ty; i<block_size_y+2; i+=block_size_y) { #pragma unroll for (int j = tx; j<block_size_x+2; j+=block_size_x) { int y = by+i-1; int x = bx+j-1; if (x>=0 && x<nx && y>=0 && y<ny) { sh_u[i][j] = u[y*nx+x]; } } } barrier(CLK_LOCAL_MEM_FENCE); // __syncthreads(); int x = bx+tx; int y = by+ty; if (x>0 && x<nx-1 && y>0 && y<ny-1) { int i = ty+1; int j = tx+1; u_new[y*nx+x] = sh_u[i][j] + dt * ( sh_u[i+1][j] + sh_u[i][j+1] -4.0f * sh_u[i][j] + sh_u[i][j-1] + sh_u[i-1][j] ); } } """ % (nx, ny) result = tune_kernel("diffuse_kernel", kernel_string, problem_size, args, tune_params) """ Explanation: Note that the Kernel Tuner prints a lot of useful information. To ensure you'll be able to tell what was measured in this run the Kernel Tuner always prints the GPU or OpenCL Device name that is being used, as well as the name of the kernel. After that every line contains the combination of parameters and the time that was measured during benchmarking. The time that is being printed is in milliseconds and is obtained by averaging the execution time of 7 runs of the kernel. Finally, as a matter of convenience, the Kernel Tuner also prints the best performing combination of tunable parameters. However, later on in this tutorial we'll explain how to analyze and store the tuning results using Python. Looking at the results printed above, the difference in performance between the different kernel configurations may seem very little. However, on our hardware, the performance of this kernel already varies in the order of 10%. Which of course can build up to large differences in the execution time if the kernel is to be executed thousands of times. We can also see that the performance of the best configuration in this set is 5% better than our initially guessed thread block dimensions of 16 by 16. In addtion, you may notice that not all possible combinations of values for block_size_x and block_size_y are among the results. For example, 128x32 is not among the results. This is because some configuration require more threads per thread block than allowed on our GPU. The Kernel Tuner checks the limitations of your GPU at runtime and automatically skips over configurations that use too many threads per block. It will also do this for kernels that cannot be compiled because they use too much shared memory. And likewise for kernels that use too many registers to be launched at runtime. If you'd like to know about which configurations were skipped automatically you can pass the optional parameter verbose=True to tune_kernel. However, knowing the best performing combination of tunable parameters becomes even more important when we start to further optimize our OpenCL kernel. In the next section, we'll add a simple code optimization and show how this affects performance. Using Shared (local) Memory Shared (or local) memory, is a special type of the memory available in OpenCL. Shared memory can be used by threads within the same thread block to exchange and share values. It is in fact, one of the very few ways for threads to communicate on the GPU. The idea is that we'll try improve the performance of our kernel by using shared memory as a software controlled cache. There are already caches on the GPU, but most GPUs only cache accesses to global memory in L2. Shared memory is closer to the multiprocessors where the thread blocks are executed, comparable to an L1 cache. However, because there are also hardware caches, the performance improvement from this step is expected to not be that great. The more fine-grained control that we get by using a software managed cache, rather than a hardware implemented cache, comes at the cost of some instruction overhead. In fact, performance is quite likely to degrade a little. However, this intermediate step is necessary for the next optimization step we have in mind. End of explanation """ kernel_string = """ #define nx %d #define ny %d #define dt 0.225f __kernel void diffuse_kernel(global float *u_new, global float *u) { int tx = get_local_id(0); int ty = get_local_id(1); int bx = get_group_id(0) * block_size_x * tile_size_x; int by = get_group_id(1) * block_size_y * tile_size_y; /* int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x * block_size_x * tile_size_x; int by = blockIdx.y * block_size_y * tile_size_y; */ __local float sh_u[block_size_y*tile_size_y+2][block_size_x*tile_size_x+2]; #pragma unroll for (int i = ty; i<block_size_y*tile_size_y+2; i+=block_size_y) { #pragma unroll for (int j = tx; j<block_size_x*tile_size_x+2; j+=block_size_x) { int y = by+i-1; int x = bx+j-1; if (x>=0 && x<nx && y>=0 && y<ny) { sh_u[i][j] = u[y*nx+x]; } } } barrier(CLK_LOCAL_MEM_FENCE); // __syncthreads(); #pragma unroll for (int tj=0; tj<tile_size_y; tj++) { int i = ty+tj*block_size_y+1; int y = by + ty + tj*block_size_y; #pragma unroll for (int ti=0; ti<tile_size_x; ti++) { int j = tx+ti*block_size_x+1; int x = bx + tx + ti*block_size_x; if (x>0 && x<nx-1 && y>0 && y<ny-1) { u_new[y*nx+x] = sh_u[i][j] + dt * ( sh_u[i+1][j] + sh_u[i][j+1] -4.0f * sh_u[i][j] + sh_u[i][j-1] + sh_u[i-1][j] ); } } } } """ % (nx, ny) """ Explanation: Tiling GPU Code One very useful code optimization is called tiling, sometimes also called thread-block-merge. You can look at it in this way, currently we have many thread blocks that together work on the entire domain. If we were to use only half of the number of thread blocks, every thread block would need to double the amount of work it performs to cover the entire domain. However, the threads may be able to reuse part of the data and computation that is required to process a single output element for every element beyond the first. This is a code optimization because effectively we are reducing the total number of instructions executed by all threads in all thread blocks. So in a way, were are condensing the total instruction stream while keeping the all the really necessary compute instructions. More importantly, we are increasing data reuse, where previously these values would have been reused from the cache or in the worst-case from GPU memory. We can apply tiling in both the x and y-dimensions. This also introduces two new tunable parameters, namely the tiling factor in x and y, which we will call tile_size_x and tile_size_y. This is what the new kernel looks like: End of explanation """ tune_params["tile_size_x"] = [1,2,4] #add tile_size_x to the tune_params tune_params["tile_size_y"] = [1,2,4] #add tile_size_y to the tune_params grid_div_x = ["block_size_x", "tile_size_x"] #tile_size_x impacts grid dimensions grid_div_y = ["block_size_y", "tile_size_y"] #tile_size_y impacts grid dimensions result = tune_kernel("diffuse_kernel", kernel_string, problem_size, args, tune_params, grid_div_x=grid_div_x, grid_div_y=grid_div_y) """ Explanation: We can tune our tiled kernel by adding the two new tunable parameters to our dictionary tune_params. We also need to somehow tell the Kernel Tuner to use fewer thread blocks to launch kernels with tile_size_x or tile_size_y larger than one. For this purpose the Kernel Tuner's tune_kernel function supports two optional arguments, called grid_div_x and grid_div_y. These are the grid divisor lists, which are lists of strings containing all the tunable parameters that divide a certain grid dimension. So far, we have been using the default settings for these, in which case the Kernel Tuner only uses the block_size_x and block_size_y tunable parameters to divide the problem_size. Note that the Kernel Tuner will replace the values of the tunable parameters inside the strings and use the product of the parameters in the grid divisor list to compute the grid dimension rounded up. You can even use arithmetic operations, inside these strings as they will be evaluated. As such, we could have used ["block_size_x*tile_size_x"] to get the same result. We are now ready to call the Kernel Tuner again and tune our tiled kernel. Let's execute the following code block, note that it may take a while as the number of kernel configurations that the Kernel Tuner will try has just been increased with a factor of 9! End of explanation """ best_time = min(result[0], key=lambda x:x['time'])['time'] for i in result[0]: if i["time"] < best_time*1.05: print("".join([k + "=" + str(v) + ", " for k,v in i.items()])) """ Explanation: We can see that the number of kernel configurations tried by the Kernel Tuner is growing rather quickly. Also, the best performing configuration quite a bit faster than the best kernel before we started optimizing. On our GTX Titan X, the execution time went from 0.72 ms to 0.53 ms, a performance improvement of 26%! Note that the thread block dimensions for this kernel configuration are also different. Without optimizations the best performing kernel used a thread block of 32x2, after we've added tiling the best performing kernel uses thread blocks of size 64x4, which is four times as many threads! Also the amount of work increased with tiling factors 2 in the x-direction and 4 in the y-direction, increasing the amount of work per thread block by a factor of 8. The difference in the area processed per thread block between the naive and the tiled kernel is a factor 32. However, there are actually several kernel configurations that come close. The following Python code prints all instances with an execution time within 5% of the best performing configuration. End of explanation """ #store output as json import json with open("tutorial.json", 'w') as fp: json.dump(result[0], fp) #store output as csv from pandas import DataFrame df = DataFrame(result[0]) df.to_csv("tutorial.csv") """ Explanation: Storing the results While it's nice that the Kernel Tuner prints the tuning results to stdout, it's not that great if we'd have to parse what is printed to get the results. That is why the tune_kernel() returns a data structure that holds all the results. We've actually already used this data in the above bit of Python code. tune_kernel returns a list of dictionaries, where each benchmarked kernel is represented by a dictionary containing the tunable parameters for that particular kernel configuration and one more entry called 'time'. The list of dictionaries format is very flexible and can easily be converted other formats that are easy to parse formats, like json or csv, for further analysis. You can execute the following code block to store the tuning results to both a json and a csv file (if you have Pandas installed). End of explanation """
scotthuang1989/Python-3-Module-of-the-Week
concurrency/multiprocessing/Passing_Messages_to_Processes.ipynb
apache-2.0
import multiprocessing class MyFancyClass: def __init__(self, name): self.name = name def do_something(self): proc_name = multiprocessing.current_process().name print('Doing something fancy in {} for {}!'.format( proc_name, self.name)) def worker(q): obj = q.get() obj.do_something() if __name__ == '__main__': queue = multiprocessing.Queue() p = multiprocessing.Process(target=worker, args=(queue,)) p.start() queue.put(MyFancyClass('Fancy Dan')) # Wait for the worker to finish queue.close() queue.join_thread() p.join() """ Explanation: Passing Messages to Processes As with threads, a common use pattern for multiple processes is to divide a job up among several workers to run in parallel. Effective use of multiple processes usually requires some communication between them, so that work can be divided and results can be aggregated. A simple way to communicate between processes with multiprocessing is to use a Queue to pass messages back and forth. Any object that can be serialized with pickle can pass through a Queue. End of explanation """ import multiprocessing import time class Consumer(multiprocessing.Process): def __init__(self, task_queue, result_queue): multiprocessing.Process.__init__(self) self.task_queue = task_queue self.result_queue = result_queue def run(self): proc_name = self.name while True: next_task = self.task_queue.get() if next_task is None: # Poison pill means shutdown print('{}: Exiting'.format(proc_name)) self.task_queue.task_done() break print('{}: {}'.format(proc_name, next_task)) answer = next_task() self.task_queue.task_done() self.result_queue.put(answer) class Task: def __init__(self, a, b): self.a = a self.b = b def __call__(self): time.sleep(0.1) # pretend to take time to do the work return '{self.a} * {self.b} = {product}'.format( self=self, product=self.a * self.b) def __str__(self): return '{self.a} * {self.b}'.format(self=self) if __name__ == '__main__': # Establish communication queues tasks = multiprocessing.JoinableQueue() results = multiprocessing.Queue() # Start consumers num_consumers = multiprocessing.cpu_count() * 2 print('Creating {} consumers'.format(num_consumers)) consumers = [ Consumer(tasks, results) for i in range(num_consumers) ] for w in consumers: w.start() # Enqueue jobs num_jobs = 10 for i in range(num_jobs): tasks.put(Task(i, i)) # Add a poison pill for each consumer for i in range(num_consumers): tasks.put(None) # Wait for all of the tasks to finish tasks.join() # Start printing results while num_jobs: result = results.get() print('Result:', result) num_jobs -= 1 """ Explanation: A more complex example shows how to manage several workers consuming data from a JoinableQueue and passing results back to the parent process. The poison pill technique is used to stop the workers. After setting up the real tasks, the main program adds one “stop” value per worker to the job queue. When a worker encounters the special value, it breaks out of its processing loop. The main process uses the task queue’s join() method to wait for all of the tasks to finish before processing the results. End of explanation """ import multiprocessing import time def wait_for_event(e): """Wait for the event to be set before doing anything""" print('wait_for_event: starting') e.wait() print('wait_for_event: e.is_set()->', e.is_set()) def wait_for_event_timeout(e, t): """Wait t seconds and then timeout""" print('wait_for_event_timeout: starting') e.wait(t) print('wait_for_event_timeout: e.is_set()->', e.is_set()) if __name__ == '__main__': e = multiprocessing.Event() w1 = multiprocessing.Process( name='block', target=wait_for_event, args=(e,), ) w1.start() w1 = multiprocessing.Process( name='block', target=wait_for_event, args=(e,), ) w1.start() w2 = multiprocessing.Process( name='nonblock', target=wait_for_event_timeout, args=(e, 2), ) w2.start() print('main: waiting before calling Event.set()') time.sleep(3) e.set() print('main: event is set') """ Explanation: Signaling between Processes The Event class provides a simple way to communicate state information between processes. An event can be toggled between set and unset states. Users of the event object can wait for it to change from unset to set, using an optional timeout value. End of explanation """ import multiprocessing import sys def worker_with(lock, stream): with lock: stream.write('Lock acquired via with\n') def worker_no_with(lock, stream): lock.acquire() try: stream.write('Lock acquired directly\n') finally: lock.release() lock = multiprocessing.Lock() w = multiprocessing.Process( target=worker_with, args=(lock, sys.stdout), ) nw = multiprocessing.Process( target=worker_no_with, args=(lock, sys.stdout), ) w.start() nw.start() w.join() nw.join() """ Explanation: When wait() times out it returns without an error. The caller is responsible for checking the state of the event using is_set(). a event.set() will set off all process that are waiting for this event Controlling Access to Resources In situations when a single resource needs to be shared between multiple processes, a Lock can be used to avoid conflicting accesses. End of explanation """ import multiprocessing import time def stage_1(cond): """perform first stage of work, then notify stage_2 to continue """ name = multiprocessing.current_process().name print('Starting', name) with cond: print('{} done and ready for stage 2'.format(name)) cond.notify_all() def stage_2(cond): """wait for the condition telling us stage_1 is done""" name = multiprocessing.current_process().name print('Starting', name) with cond: cond.wait() print('{} running'.format(name)) if __name__ == '__main__': condition = multiprocessing.Condition() s1 = multiprocessing.Process(name='s1', target=stage_1, args=(condition,)) s2_clients = [ multiprocessing.Process( name='stage_2[{}]'.format(i), target=stage_2, args=(condition,), ) for i in range(1, 3) ] for c in s2_clients: c.start() time.sleep(1) s1.start() s1.join() for c in s2_clients: c.join() """ Explanation: Synchronizing Operations Condition Condition objects can be used to synchronize parts of a workflow so that some run in parallel but others run sequentially, even if they are in separate processes. End of explanation """ import random import multiprocessing import time class ActivePool: def __init__(self): super(ActivePool, self).__init__() self.mgr = multiprocessing.Manager() self.active = self.mgr.list() self.lock = multiprocessing.Lock() def makeActive(self, name): with self.lock: self.active.append(name) def makeInactive(self, name): with self.lock: self.active.remove(name) def __str__(self): with self.lock: return str(self.active) def worker(s, pool): name = multiprocessing.current_process().name with s: pool.makeActive(name) print('Activating {} now running {}'.format( name, pool)) time.sleep(random.random()) pool.makeInactive(name) if __name__ == '__main__': pool = ActivePool() s = multiprocessing.Semaphore(3) jobs = [ multiprocessing.Process( target=worker, name=str(i), args=(s, pool), ) for i in range(10) ] for j in jobs: j.start() while True: alive = 0 for j in jobs: if j.is_alive(): alive += 1 j.join(timeout=0.1) print('Now running {}'.format(pool)) if alive == 0: # all done break """ Explanation: In this example, two process run the second stage of a job in parallel, but only after the first stage is done. Controlling Concurrent Access to Resources Sometimes it is useful to allow more than one worker access to a resource at a time, while still limiting the overall number. For example, a connection pool might support a fixed number of simultaneous connections, or a network application might support a fixed number of concurrent downloads. A Semaphore is one way to manage those connections. End of explanation """ import multiprocessing import pprint def worker(d, key, value): d[key] = value if __name__ == '__main__': mgr = multiprocessing.Manager() d = mgr.dict() jobs = [ multiprocessing.Process( target=worker, args=(d, i, i * 2), ) for i in range(10) ] for j in jobs: j.start() for j in jobs: j.join() print('Results:', d) """ Explanation: Managing Shared State In the previous example, the list of active processes is maintained centrally in the ActivePool instance via a special type of list object created by a Manager. The Manager is responsible for coordinating shared information state between all of its users. End of explanation """ import multiprocessing def producer(ns, event): ns.value = 'This is the value' event.set() def consumer(ns, event): try: print('Before event: {}'.format(ns.value)) except Exception as err: print('Before event, error:', str(err)) event.wait() print('After event:', ns.value) if __name__ == '__main__': mgr = multiprocessing.Manager() namespace = mgr.Namespace() event = multiprocessing.Event() p = multiprocessing.Process( target=producer, args=(namespace, event), ) c = multiprocessing.Process( target=consumer, args=(namespace, event), ) c.start() p.start() c.join() p.join() """ Explanation: By creating the list through the manager, it is shared and updates are seen in all processes. Dictionaries are also supported. Shared Namespaces In addition to dictionaries and lists, a Manager can create a shared Namespace. End of explanation """ import multiprocessing def producer(ns, event): # DOES NOT UPDATE GLOBAL VALUE! ns.my_list.append('This is the value') event.set() def consumer(ns, event): print('Before event:', ns.my_list) event.wait() print('After event :', ns.my_list) if __name__ == '__main__': mgr = multiprocessing.Manager() namespace = mgr.Namespace() namespace.my_list = [] event = multiprocessing.Event() p = multiprocessing.Process( target=producer, args=(namespace, event), ) c = multiprocessing.Process( target=consumer, args=(namespace, event), ) c.start() p.start() c.join() p.join() """ Explanation: Any named value added to the Namespace is visible to all of the clients that receive the Namespace instance. It is important to know that updates to the contents of mutable values in the namespace are not propagated automatically. End of explanation """ import multiprocessing def do_calculation(data): return data * 2 def start_process(): print('Starting', multiprocessing.current_process().name) if __name__ == '__main__': inputs = list(range(10)) print('Input :', inputs) builtin_outputs = map(do_calculation, inputs) print('Built-in:', [i for i in builtin_outputs]) pool_size = multiprocessing.cpu_count() * 2 pool = multiprocessing.Pool( processes=pool_size, initializer=start_process, ) pool_outputs = pool.map(do_calculation, inputs) pool.close() # no more tasks pool.join() # wrap up current tasks print('Pool :', pool_outputs) """ Explanation: Process Pools The Pool class can be used to manage a fixed number of workers for simple cases where the work to be done can be broken up and distributed between workers independently. The return values from the jobs are collected and returned as a list. The pool arguments include the number of processes and a function to run when starting the task process (invoked once per child). End of explanation """ import multiprocessing def do_calculation(data): return data * 2 def start_process(): print('Starting', multiprocessing.current_process().name) if __name__ == '__main__': inputs = list(range(10)) print('Input :', inputs) builtin_outputs = map(do_calculation, inputs) print('Built-in:', builtin_outputs) pool_size = multiprocessing.cpu_count() * 2 pool = multiprocessing.Pool( processes=pool_size, initializer=start_process, maxtasksperchild=2, ) pool_outputs = pool.map(do_calculation, inputs) pool.close() # no more tasks pool.join() # wrap up current tasks print('Pool :', pool_outputs) """ Explanation: By default, Pool creates a fixed number of worker processes and passes jobs to them until there are no more jobs. Setting the maxtasksperchild parameter tells the pool to restart a worker process after it has finished a few tasks, preventing long-running workers from consuming ever more system resources. End of explanation """
kanhua/pypvcell
demos/metpv_data_reader_demo.ipynb
apache-2.0
%load_ext autoreload %autoreload 2 %matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd import os from pypvcell.solarcell import SQCell,MJCell,TransparentCell from pypvcell.illumination import Illumination from pypvcell.spectrum import Spectrum from pypvcell.metpv_reader import NEDOLocation from pvlib.location import Location from pvlib.tracking import SingleAxisTracker from pvlib.irradiance import total_irrad,aoi_projection nedo_solar_file='hm51106year.csv' """ Explanation: This is the demonstration how to use NEDO data process utility End of explanation """ ngo_loc=NEDOLocation(nedo_solar_file) df=ngo_loc.main_df ngo_loc.main_df.head() """ Explanation: Load the data into a NEDOLocation object End of explanation """ %%time ngo_df=ngo_loc.extract_unstack_hour_data(norm=False) ngo_df.head() ngo_df.to_csv("ngo_df.csv") """ Explanation: main_df adds the column names into the raw data and convert it to a pandas.DataFrame object Convert the NEDO data into a more useful form Add column names into the raw data does not make the dataframe easier to use. nedo_data_reader can "unstack" the data, i.e., making each row an entry of particular time. End of explanation """ ngo_df[['GHI','DHI','dHI']].sum() """ Explanation: Calculate the overall insolation End of explanation """ ngo_dni=ngo_loc.get_DNI() ngo_dni.head() plt.plot(ngo_dni) plt.ylim([0,1000]) """ Explanation: Extrat DNI Daily METPV-11 data records the solar insolation on a horizontal plane. We can use get_DNI() to recalculate the DNI data. End of explanation """ ngo_tilt_irr=ngo_loc.tilt_irr(include_solar_pos=True) ngo_tilt_irr.head() ngo_tilt_irr.columns plt.plot(ngo_tilt_irr['poa_direct'],alpha=0.5,label='incidence on tilt surface') plt.plot(ngo_dni,alpha=0.5,label='DNI') plt.ylim([0,1000]) plt.legend() """ Explanation: Calculate DNI on a tilted surface End of explanation """ from matplotlib.colors import LogNorm filtered_df=ngo_tilt_irr.loc[(ngo_tilt_irr['poa_direct']>1) & (ngo_tilt_irr['poa_direct']<500), ["azimuth","zenith",'poa_direct']] ax = plt.subplot(111, projection='polar') ax.plot(filtered_df['azimuth'].values*np.pi/180-np.pi/2, filtered_df['zenith'].values-ngo_loc.latitude,'.') plt.show() import matplotlib as mpl filtered_df=ngo_tilt_irr.loc[(ngo_tilt_irr['poa_direct']>1) & (ngo_tilt_irr['poa_direct']<500), ["azimuth","zenith",'poa_direct']] ax = plt.subplot(111, projection='polar') colormap = plt.get_cmap('hsv') norm = mpl.colors.Normalize(1, 400) cax=ax.scatter(filtered_df['azimuth'].values*np.pi/180-np.pi/2, filtered_df['zenith'].values-ngo_loc.latitude, c=filtered_df['poa_direct'].values,s=200,norm=norm,alpha=0.5) plt.colorbar(cax) plt.savefig("nagoya_angular.png",dpi=600) plt.show() """ Explanation: Visualize the sun irradiances in angular plot End of explanation """ ngo_tilt_irr.columns plt.hist(ngo_tilt_irr['aoi'],weights=ngo_tilt_irr['poa_direct'],bins=100) plt.show() """ Explanation: Analyze angle of incidence End of explanation """
gfeiden/MagneticUpperSco
notes/equipartition_B_strength.ipynb
mit
%matplotlib inline import numpy as np import matplotlib.pyplot as plt import scipy.interpolate as scint """ Explanation: Equipartition Surface Magnetic Field Strengths Computing equipartition magnetic field strengths using PHOENIX stellar atmosphere models (Hauschildt et al. 1999). End of explanation """ iso_10 = np.genfromtxt('../models/iso/std/dmestar_00010.0myr_z+0.00_a+0.00_phx.iso') iso_20 = np.genfromtxt('../models/iso/std/dmestar_00020.0myr_z+0.00_a+0.00_phx.iso') """ Explanation: We need to import a file with stellar parameters and a table of stellar atmosphere properties in photospheric layers. We approximate the surface equipartition magnetic field strength as the equipartition field strength where the magnetic pressure is equal the gas pressure at a Rossland optical depth $\tau = 1$. This is roughly equivalent to the equipartition field strength in the optical photospheric layers. Stellar parameters will be taken from a non-magnetic stellar model isochrone at an age of 10 Myr. Start by importing stellar parameters, End of explanation """ atm = np.genfromtxt('../models/atm/tab/Zp0d0.ap0d0_t001.dat') """ Explanation: Now load a table with the atmospheric gas pressure and temperature at an optical depth $\tau = 1$ for a set of $\log(g)$s and $T_{\rm eff}$s. We'll adopt a solar metallicity to simplify things. End of explanation """ teffs = np.transpose(atm[:, 0]) loggs = np.arange(-0.5, 5.6, 0.5) print teffs print loggs """ Explanation: Values for the effective temperature of the atmosphere model is tabulated in column 0, but we must define an array with $\log(g)$ values. End of explanation """ temps = np.empty((len(teffs), len(loggs))) press = np.empty((len(teffs), len(loggs))) for i, teff in enumerate(atm[:, 1:]): for j, prop in enumerate(teff): if prop == 0.: prop = np.nan else: pass if j%2 == 0: press[i, j/2] = prop else: temps[i, j/2] = prop """ Explanation: The atmosphere structure table isn't quite in the correct form for our purposes, as pressures and temperatures are intermingled. We should separate those properties into individual pressure and temperature tables. End of explanation """ pres_surface = scint.interp2d(teffs, loggs, np.transpose(press), kind='linear') """ Explanation: With the individual tables formed, we now need to construct interpolation surfaces using a 2D interpolation routine. Note that we only really care about the pressure table, as that sets the equipartition magnetic field strengths. End of explanation """ B_eq_10 = np.empty((len(iso_10[:62]))) B_eq_20 = np.empty((len(iso_20[:62]))) for i, star in enumerate(iso_10[:62]): B_eq_10[i] = np.sqrt(8.0*np.pi*pres_surface(10**star[1], star[2])) for i, star in enumerate(iso_20[:62]): B_eq_20[i] = np.sqrt(8.0*np.pi*pres_surface(10**star[1], star[2])) """ Explanation: We are in a position to compute surface pressures and, by extension, equipartition magnetic field strengths. End of explanation """ B_eq_20 """ Explanation: See what kind of values we obtain. End of explanation """ fig, ax = plt.subplots(1, 1, figsize=(9., 6.)) ax.set_xlabel('${\\rm Mass}\ (M_{\\odot})$', fontsize=20) ax.set_ylabel('$\\langle {\\rm B}f \\rangle_{\\rm eq}$', fontsize=20) ax.grid() ax.plot(iso_10[:62, 0], B_eq_10, 'o-', lw=1, color='#333333', alpha=0.7) ax.plot(iso_20[:62, 0], B_eq_20, 'o-', lw=1, color='#1e90ff', alpha=0.7) """ Explanation: These values match estimates from convective energy equiparition and observational measurements (e.g., Saar). The general trend is that equipartition field strengths decrease toward higher masses/temperatures, which matches intuition since surface gas pressures decrease as stellar surface layers become more extended and fluffy. We can visualize this: End of explanation """ fig, ax = plt.subplots(1, 1, figsize=(9., 6.)) ax.set_xlabel('$T_{\\rm eff}\ (K)$', fontsize=20) ax.set_ylabel('$\\langle {\\rm B}f \\rangle_{\\rm eq} ({\\rm G})$', fontsize=20) ax.grid() ax.plot(10**iso_10[:62, 1], B_eq_10, 'o-', lw=1, color='#333333', alpha=0.7) ax.plot(10**iso_20[:62, 1], B_eq_20, 'o-', lw=1, color='#1e90ff', alpha=0.7) """ Explanation: and as a function of effective temperature: End of explanation """ for i, star in enumerate(iso_10[:62]): if int(star[0]*100.)%10 == 0: print '{:5.1f} & {:6.0f} & {:6.2f} & {:6.2f} \\\\'.format(star[0], 10**star[1], star[2], B_eq_10[i]/1.0e3) else: pass """ Explanation: For the manuscript, there is a table of stellar properties and the resulting equipartition surface magnetic field strengths. Here is that table, End of explanation """ B_eq_curve_data = np.empty((17, 2)) j = 0 for i, star in enumerate(iso_10[:62]): if int(star[0]*100.)%10 == 0: B_eq_curve_data[j, 0] = star[0] B_eq_curve_data[j, 1] = B_eq_10[i] j += 1 else: pass B_eq_interp_curve = scint.interp1d(B_eq_curve_data[:, 0], B_eq_curve_data[:, 1], kind='cubic', axis=0) """ Explanation: When running models, an interpolation curve is constructed from the data in the table to find the equipartition value as a function of mass. Let's construct that curve. End of explanation """ fig, ax = plt.subplots(1, 1, figsize=(9., 6.)) ax.set_xlabel('${\\rm Mass}\ (M_{\\odot})$', fontsize=20) ax.set_ylabel('$\\langle {\\rm B}f \\rangle_{\\rm eq}\ ({\\rm G})$', fontsize=20) ax.grid() ax.plot(iso_10[:62, 0], B_eq_10, 'o', lw=1, color='#333333', alpha=0.7) ax.plot(np.arange(0.1, 1.7, 0.02), B_eq_interp_curve(np.arange(0.1, 1.7, 0.02)), '-', lw=3, color='#1e90ff') """ Explanation: Now we can compare the accuracy of the interpolated curve. End of explanation """
xaibeing/cn-deep-learning
tutorials/sentiment-rnn/Sentiment_RNN_Solution.ipynb
mit
import numpy as np import tensorflow as tf with open('../sentiment-network/reviews.txt', 'r') as f: reviews = f.read() with open('../sentiment-network/labels.txt', 'r') as f: labels = f.read() reviews[:2000] """ Explanation: Sentiment Analysis with an RNN In this notebook, you'll implement a recurrent neural network that performs sentiment analysis. Using an RNN rather than a feedfoward network is more accurate since we can include information about the sequence of words. Here we'll use a dataset of movie reviews, accompanied by labels. The architecture for this network is shown below. <img src="assets/network_diagram.png" width=400px> Here, we'll pass in words to an embedding layer. We need an embedding layer because we have tens of thousands of words, so we'll need a more efficient representation for our input data than one-hot encoded vectors. You should have seen this before from the word2vec lesson. You can actually train up an embedding with word2vec and use it here. But it's good enough to just have an embedding layer and let the network learn the embedding table on it's own. From the embedding layer, the new representations will be passed to LSTM cells. These will add recurrent connections to the network so we can include information about the sequence of words in the data. Finally, the LSTM cells will go to a sigmoid output layer here. We're using the sigmoid because we're trying to predict if this text has positive or negative sentiment. The output layer will just be a single unit then, with a sigmoid activation function. We don't care about the sigmoid outputs except for the very last one, we can ignore the rest. We'll calculate the cost from the output of the last step and the training label. End of explanation """ from string import punctuation all_text = ''.join([c for c in reviews if c not in punctuation]) reviews = all_text.split('\n') all_text = ' '.join(reviews) words = all_text.split() all_text[:2000] words[:100] """ Explanation: Data preprocessing The first step when building a neural network model is getting your data into the proper form to feed into the network. Since we're using embedding layers, we'll need to encode each word with an integer. We'll also want to clean it up a bit. You can see an example of the reviews data above. We'll want to get rid of those periods. Also, you might notice that the reviews are delimited with newlines \n. To deal with those, I'm going to split the text into each review using \n as the delimiter. Then I can combined all the reviews back together into one big string. First, let's remove all punctuation. Then get all the text without the newlines and split it into individual words. End of explanation """ from collections import Counter counts = Counter(words) vocab = sorted(counts, key=counts.get, reverse=True) vocab_to_int = {word: ii for ii, word in enumerate(vocab, 1)} reviews_ints = [] for each in reviews: reviews_ints.append([vocab_to_int[word] for word in each.split()]) """ Explanation: Encoding the words The embedding lookup requires that we pass in integers to our network. The easiest way to do this is to create dictionaries that map the words in the vocabulary to integers. Then we can convert each of our reviews into integers so they can be passed into the network. Exercise: Now you're going to encode the words with integers. Build a dictionary that maps words to integers. Later we're going to pad our input vectors with zeros, so make sure the integers start at 1, not 0. Also, convert the reviews to integers and store the reviews in a new list called reviews_ints. End of explanation """ labels = labels.split('\n') labels = np.array([1 if each == 'positive' else 0 for each in labels]) review_lens = Counter([len(x) for x in reviews_ints]) print("Zero-length reviews: {}".format(review_lens[0])) print("Maximum review length: {}".format(max(review_lens))) """ Explanation: Encoding the labels Our labels are "positive" or "negative". To use these labels in our network, we need to convert them to 0 and 1. Exercise: Convert labels from positive and negative to 1 and 0, respectively. End of explanation """ non_zero_idx = [ii for ii, review in enumerate(reviews_ints) if len(review) != 0] len(non_zero_idx) reviews_ints[-1] """ Explanation: Okay, a couple issues here. We seem to have one review with zero length. And, the maximum review length is way too many steps for our RNN. Let's truncate to 200 steps. For reviews shorter than 200, we'll pad with 0s. For reviews longer than 200, we can truncate them to the first 200 characters. Exercise: First, remove the review with zero length from the reviews_ints list. End of explanation """ reviews_ints = [reviews_ints[ii] for ii in non_zero_idx] labels = np.array([labels[ii] for ii in non_zero_idx]) """ Explanation: Turns out its the final review that has zero length. But that might not always be the case, so let's make it more general. End of explanation """ seq_len = 200 features = np.zeros((len(reviews_ints), seq_len), dtype=int) for i, row in enumerate(reviews_ints): features[i, -len(row):] = np.array(row)[:seq_len] features[:10,:100] """ Explanation: Exercise: Now, create an array features that contains the data we'll pass to the network. The data should come from review_ints, since we want to feed integers to the network. Each row should be 200 elements long. For reviews shorter than 200 words, left pad with 0s. That is, if the review is ['best', 'movie', 'ever'], [117, 18, 128] as integers, the row will look like [0, 0, 0, ..., 0, 117, 18, 128]. For reviews longer than 200, use on the first 200 words as the feature vector. This isn't trivial and there are a bunch of ways to do this. But, if you're going to be building your own deep learning networks, you're going to have to get used to preparing your data. End of explanation """ split_frac = 0.8 split_idx = int(len(features)*0.8) train_x, val_x = features[:split_idx], features[split_idx:] train_y, val_y = labels[:split_idx], labels[split_idx:] test_idx = int(len(val_x)*0.5) val_x, test_x = val_x[:test_idx], val_x[test_idx:] val_y, test_y = val_y[:test_idx], val_y[test_idx:] print("\t\t\tFeature Shapes:") print("Train set: \t\t{}".format(train_x.shape), "\nValidation set: \t{}".format(val_x.shape), "\nTest set: \t\t{}".format(test_x.shape)) """ Explanation: Training, Validation, Test With our data in nice shape, we'll split it into training, validation, and test sets. Exercise: Create the training, validation, and test sets here. You'll need to create sets for the features and the labels, train_x and train_y for example. Define a split fraction, split_frac as the fraction of data to keep in the training set. Usually this is set to 0.8 or 0.9. The rest of the data will be split in half to create the validation and testing data. End of explanation """ lstm_size = 256 lstm_layers = 1 batch_size = 500 learning_rate = 0.001 """ Explanation: With train, validation, and text fractions of 0.8, 0.1, 0.1, the final shapes should look like: Feature Shapes: Train set: (20000, 200) Validation set: (2500, 200) Test set: (2500, 200) Build the graph Here, we'll build the graph. First up, defining the hyperparameters. lstm_size: Number of units in the hidden layers in the LSTM cells. Usually larger is better performance wise. Common values are 128, 256, 512, etc. lstm_layers: Number of LSTM layers in the network. I'd start with 1, then add more if I'm underfitting. batch_size: The number of reviews to feed the network in one training pass. Typically this should be set as high as you can go without running out of memory. learning_rate: Learning rate End of explanation """ n_words = len(vocab_to_int) # Create the graph object graph = tf.Graph() # Add nodes to the graph with graph.as_default(): inputs_ = tf.placeholder(tf.int32, [None, None], name='inputs') labels_ = tf.placeholder(tf.int32, [None, None], name='labels') keep_prob = tf.placeholder(tf.float32, name='keep_prob') """ Explanation: For the network itself, we'll be passing in our 200 element long review vectors. Each batch will be batch_size vectors. We'll also be using dropout on the LSTM layer, so we'll make a placeholder for the keep probability. Exercise: Create the inputs_, labels_, and drop out keep_prob placeholders using tf.placeholder. labels_ needs to be two-dimensional to work with some functions later. Since keep_prob is a scalar (a 0-dimensional tensor), you shouldn't provide a size to tf.placeholder. End of explanation """ # Size of the embedding vectors (number of units in the embedding layer) embed_size = 300 with graph.as_default(): embedding = tf.Variable(tf.random_uniform((n_words, embed_size), -1, 1)) embed = tf.nn.embedding_lookup(embedding, inputs_) """ Explanation: Embedding Now we'll add an embedding layer. We need to do this because there are 74000 words in our vocabulary. It is massively inefficient to one-hot encode our classes here. You should remember dealing with this problem from the word2vec lesson. Instead of one-hot encoding, we can have an embedding layer and use that layer as a lookup table. You could train an embedding layer using word2vec, then load it here. But, it's fine to just make a new layer and let the network learn the weights. Exercise: Create the embedding lookup matrix as a tf.Variable. Use that embedding matrix to get the embedded vectors to pass to the LSTM cell with tf.nn.embedding_lookup. This function takes the embedding matrix and an input tensor, such as the review vectors. Then, it'll return another tensor with the embedded vectors. So, if the embedding layer as 200 units, the function will return a tensor with size [batch_size, 200]. End of explanation """ with graph.as_default(): # Your basic LSTM cell lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size) # Add dropout to the cell drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob) # Stack up multiple LSTM layers, for deep learning cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers) # Getting an initial state of all zeros initial_state = cell.zero_state(batch_size, tf.float32) """ Explanation: LSTM cell <img src="assets/network_diagram.png" width=400px> Next, we'll create our LSTM cells to use in the recurrent network (TensorFlow documentation). Here we are just defining what the cells look like. This isn't actually building the graph, just defining the type of cells we want in our graph. To create a basic LSTM cell for the graph, you'll want to use tf.contrib.rnn.BasicLSTMCell. Looking at the function documentation: tf.contrib.rnn.BasicLSTMCell(num_units, forget_bias=1.0, input_size=None, state_is_tuple=True, activation=&lt;function tanh at 0x109f1ef28&gt;) you can see it takes a parameter called num_units, the number of units in the cell, called lstm_size in this code. So then, you can write something like lstm = tf.contrib.rnn.BasicLSTMCell(num_units) to create an LSTM cell with num_units. Next, you can add dropout to the cell with tf.contrib.rnn.DropoutWrapper. This just wraps the cell in another cell, but with dropout added to the inputs and/or outputs. It's a really convenient way to make your network better with almost no effort! So you'd do something like drop = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob) Most of the time, you're network will have better performance with more layers. That's sort of the magic of deep learning, adding more layers allows the network to learn really complex relationships. Again, there is a simple way to create multiple layers of LSTM cells with tf.contrib.rnn.MultiRNNCell: cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers) Here, [drop] * lstm_layers creates a list of cells (drop) that is lstm_layers long. The MultiRNNCell wrapper builds this into multiple layers of RNN cells, one for each cell in the list. So the final cell you're using in the network is actually multiple (or just one) LSTM cells with dropout. But it all works the same from an achitectural viewpoint, just a more complicated graph in the cell. Exercise: Below, use tf.contrib.rnn.BasicLSTMCell to create an LSTM cell. Then, add drop out to it with tf.contrib.rnn.DropoutWrapper. Finally, create multiple LSTM layers with tf.contrib.rnn.MultiRNNCell. Here is a tutorial on building RNNs that will help you out. End of explanation """ with graph.as_default(): outputs, final_state = tf.nn.dynamic_rnn(cell, embed, initial_state=initial_state) """ Explanation: RNN forward pass <img src="assets/network_diagram.png" width=400px> Now we need to actually run the data through the RNN nodes. You can use tf.nn.dynamic_rnn to do this. You'd pass in the RNN cell you created (our multiple layered LSTM cell for instance), and the inputs to the network. outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state) Above I created an initial state, initial_state, to pass to the RNN. This is the cell state that is passed between the hidden layers in successive time steps. tf.nn.dynamic_rnn takes care of most of the work for us. We pass in our cell and the input to the cell, then it does the unrolling and everything else for us. It returns outputs for each time step and the final_state of the hidden layer. Exercise: Use tf.nn.dynamic_rnn to add the forward pass through the RNN. Remember that we're actually passing in vectors from the embedding layer, embed. End of explanation """ with graph.as_default(): predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid) cost = tf.losses.mean_squared_error(labels_, predictions) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost) """ Explanation: Output We only care about the final output, we'll be using that as our sentiment prediction. So we need to grab the last output with outputs[:, -1], the calculate the cost from that and labels_. End of explanation """ with graph.as_default(): correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels_) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) """ Explanation: Validation accuracy Here we can add a few nodes to calculate the accuracy which we'll use in the validation pass. End of explanation """ def get_batches(x, y, batch_size=100): n_batches = len(x)//batch_size x, y = x[:n_batches*batch_size], y[:n_batches*batch_size] for ii in range(0, len(x), batch_size): yield x[ii:ii+batch_size], y[ii:ii+batch_size] """ Explanation: Batching This is a simple function for returning batches from our data. First it removes data such that we only have full batches. Then it iterates through the x and y arrays and returns slices out of those arrays with size [batch_size]. End of explanation """ epochs = 10 with graph.as_default(): saver = tf.train.Saver() with tf.Session(graph=graph) as sess: sess.run(tf.global_variables_initializer()) iteration = 1 for e in range(epochs): state = sess.run(initial_state) for ii, (x, y) in enumerate(get_batches(train_x, train_y, batch_size), 1): feed = {inputs_: x, labels_: y[:, None], keep_prob: 0.5, initial_state: state} loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed) if iteration%5==0: print("Epoch: {}/{}".format(e, epochs), "Iteration: {}".format(iteration), "Train loss: {:.3f}".format(loss)) if iteration%25==0: val_acc = [] val_state = sess.run(cell.zero_state(batch_size, tf.float32)) for x, y in get_batches(val_x, val_y, batch_size): feed = {inputs_: x, labels_: y[:, None], keep_prob: 1, initial_state: val_state} batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed) val_acc.append(batch_acc) print("Val acc: {:.3f}".format(np.mean(val_acc))) iteration +=1 saver.save(sess, "checkpoints/sentiment.ckpt") """ Explanation: Training Below is the typical training code. If you want to do this yourself, feel free to delete all this code and implement it yourself. Before you run this, make sure the checkpoints directory exists. End of explanation """ test_acc = [] with tf.Session(graph=graph) as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) test_state = sess.run(cell.zero_state(batch_size, tf.float32)) for ii, (x, y) in enumerate(get_batches(test_x, test_y, batch_size), 1): feed = {inputs_: x, labels_: y[:, None], keep_prob: 1, initial_state: test_state} batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed) test_acc.append(batch_acc) print("Test accuracy: {:.3f}".format(np.mean(test_acc))) """ Explanation: Testing End of explanation """
RogueAstro/RV_PS2017
notebooks/HIP67620_example.ipynb
mit
import numpy as np import matplotlib.pyplot as plt import matplotlib.pylab as pylab import astropy.units as u from radial import estimate, dataset %matplotlib inline """ Explanation: The orbital parameters of the binary solar twin HIP 67620 radial is a simple program designed to do a not very trivial task: simulate radial velocities of a star orbited by a massive object or "reverse engineer" radial velocity measurements to estimate the orbital parameters of the system being studied. The formalism behind it is based on https://arxiv.org/abs/1009.1738. Our objective in this notebook is to use radial velocity data of the solar twin HIP 67620 to estimate the projected mass, separation and other orbital parameters of its companion. We start by importing the necessary packages. Notice that we will specifically import the modules orbit, estimate, and dataset from the radial package. End of explanation """ harps = dataset.RVDataSet(file='../data/HIP67620_HARPS.dat', # File name t_offset=-2.45E6, # Time offset (units of days) rv_offset='subtract_mean', # RV offset instrument_name='HARPS', target_name='HIP 67620', skiprows=1, # Number of rows to skip in the data file t_col=5, # Column corresponding to time in the data file rv_col=6, # Column corresponding to RVs rv_unc_col=7 # Column corresponding to RV ucnertainties ) aat = dataset.RVDataSet(file='../data/HIP67620_AAT.dat', t_offset=-2.45E6, rv_offset='subtract_mean', instrument_name='AATPS', target_name='HIP 67620', delimiter=',') w16 = dataset.RVDataSet(file='../data/HIP67620_WF16.dat', t_offset=-5E4, rv_offset='subtract_mean', instrument_name='W16', target_name='HIP 67620', t_col=1, rv_col=3, rv_unc_col=4) """ Explanation: We then extract the data from the text files located in the tests folder. They will be stored in RVDataSet objects, which are defined in the dataset module. End of explanation """ w16.plot() """ Explanation: We can visualize the radial velocities by running the function plot() of a given dataset object. For instance: End of explanation """ # guess is a dictionary, which is a special type of "list" in python # Instead of being indexed by a number, the items in a dictionary # are indexed by a key (which is a string) guess = {'k': 6000, 'period': 4000, 't0': 5000, 'omega': 180 * np.pi / 180, 'ecc': 0.3, 'gamma_0': 0, 'gamma_1': 0, 'gamma_2': 0} """ Explanation: Now that we have the data, how do we estimate the orbital parameters of the system? We use the methods and functions inside the estimate module. But first, we need to provide an initial guess for the orbital parameters. They are: k: radial velocity semi-amplitude $K$ (in m/s) period: orbital period $T$ (in days) t0: time of periastron passage $t_0$ (in days) omega: argument of periapse $\omega$ (in radians) ecc: eccentricity of the orbit $e$ gamma_X: RV offset $\gamma$ of the dataset number $X$ (in m/s) A first guess is usually an educated guess based on either a periodogram and/or simple visual inspection of the data. End of explanation """ estim = estimate.FullOrbit(datasets=[harps, aat, w16], guess=guess, parametrization='mc10') plot = estim.plot_rvs(plot_guess=True, fold=False, legend_loc=2) plt.show() """ Explanation: Now we need to instantiate a FullOrbit object with the datasets and our guess, as well as the parametrization option we want to use. Then, we plot it. End of explanation """ result = estim.lmfit_orbit(update_guess=True) """ Explanation: We estimate the orbital parameters of the system using the Nelder-Mead optimization algorithm implemented in the lmfit package. This will compute the best solution or, in other words, the one that minimizes the residuals of the fit. It is probable that the first solutions are not good, and that is fine. Just run the estimation a couple of times until you get the satisfactory result. End of explanation """ pylab.rcParams['font.size'] = 12 fig, gs = estim.plot_rvs(plot_guess=True, fold=False, legend_loc=4) """ Explanation: Now let's plot the solution we obtained. End of explanation """ estim.emcee_orbit(nwalkers=16, nsteps=10000, nthreads=12) """ Explanation: If the result looks good, that is great: we have the best solution of the orbit. However, we still need to estimate uncertainties for the orbital parameters. We do that using emcee. This is a Markov-Chain Monte Carlo (MCMC) simulation, in which we simulate a bunch of sets of orbital parameters that could still fit the data given the uncertainties of the observations, but are a little bit off from the best solution. They will make up the uncertainties of the fit. This simulation starts from the best solution and do random walks across the parameter space. We will provide the number of walkers (nwalkers) for the MCMC simulation, as well as the number of steps (nsteps) that each one will take. How do we know the number of walkers and steps to use? As a general rule of thumb, it is recommended to use at least 2 times the number of parameters for the number of walkers, and as many steps as it takes for the simulation to converge. Note: We can use multiprocessing in emcee to make the calculations somewhat faster. For that, we need to provide the number of processing threads (in the parameter nthreads) of your computer. Most laptops have 2 or 4 threads. End of explanation """ estim.plot_emcee_sampler() """ Explanation: With that done, we plot the walkers to see how the simulation went. End of explanation """ estim.make_chains(500) """ Explanation: Let's cut the beginning of the simulation (the first 500 steps) because they correspond to the burn-in phase. End of explanation """ fig = estim.plot_corner() plt.show() """ Explanation: Now we use a corner plot to analyze the posterior distributions of the parameters, as well as the correlations between them. End of explanation """ estim.print_emcee_result(main_star_mass=0.954, # in M_sol units mass_sigma=0.006) """ Explanation: And that should be pretty much it. Finally, we compute the orbital parameters in a human-readable fashion. End of explanation """
LSSTC-DSFP/LSSTC-DSFP-Sessions
Sessions/Session13/Day0/TooBriefVisualization.ipynb
mit
from sklearn.datasets import load_linnerud linnerud = load_linnerud() chinups = linnerud.data[:,0] """ Explanation: Introduction to Visualization: Density Estimation and Data Exploration Version 0.2 There are many flavors of data analysis that fall under the "visualization" umbrella in astronomy. Today, by way of example, we will focus on 2 basic problems. By AA Miller (Northwestern/CIERA) 28 September 2021 Problem 1) Density Estimation Starting with 2MASS and SDSS and extending through to the Rubin Observatory, we are firmly entrenched in an era where data and large statistical samples are cheap. With this explosion in data volume comes a problem: we do not know the underlying probability density function (PDF) of the random variables measured via our observations. Hence - density estimation: an attempt to recover the unknown PDF from observations. In some cases theory can guide us to a parametric form for the PDF, but more often than not such guidance is not available. There is a common, simple, and very familiar tool for density estimation: histograms. But there is also a problem: HISTOGRAMS LIE! We will "prove" this to be the case in a series of examples. For this exercise, we will load the famous Linnerud data set, which tested 20 middle aged men by measuring the number of chinups, situps, and jumps they could do in order to compare these numbers to their weight, pulse, and waist size. To load the data (just chinups for now) we will run the following: from sklearn.datasets import load_linnerud linnerud = load_linnerud() chinups = linnerud.data[:,0] End of explanation """ fig, ax = plt.subplots() ax.hist( # complete ax.set_xlabel('chinups', fontsize=14) ax.set_ylabel('N', fontsize=14) fig.tight_layout() """ Explanation: Problem 1a Plot the histogram for the number of chinups using the default settings in pyplot. End of explanation """ fig, ax = plt.subplots() ax.hist(# complete ax.hist(# complete ax.set_xlabel('chinups', fontsize=14) ax.set_ylabel('N', fontsize=14) fig.tight_layout() """ Explanation: Something is wrong here - the choice of bin centers and number of bins suggest that there is a 0% probability that middle aged men can do 10 chinups. This is intuitively incorrect; we will now adjust the bins in the histogram. Problem 1b Using the same data make 2 new histograms: (i) one with 5 bins (bins = 5), and (ii) one with the bars centered on the left bin edges (align = "left"). Hint - if overplotting the results, you may find it helpful to use the histtype = "step" option End of explanation """ bins = np.append(# complete fig, ax = plt.subplots() ax.hist( # complete ax.set_xlabel('chinups', fontsize=14) ax.set_ylabel('N', fontsize=14) fig.tight_layout() """ Explanation: These small changes significantly change the estimator for the PDF. With fewer bins we get something closer to a continuous distribution, while shifting the bin centers reduces the probability to zero at 9 chinups. What if we instead allow the bin width to vary and require the same number of points in each bin? You can determine the bin edges for bins with 5 sources using the following command: bins = np.append(np.sort(chinups)[::5], np.max(chinups)) Problem 1c Plot a histogram with variable width bins, each with the same number of points. Hint - setting density = True will normalize the bin heights so that the PDF integrates to 1. End of explanation """ fig, ax = plt.subplots() ax.hist(chinups, histtype = 'step') # this is the code for the rug plot ax.plot(chinups, np.zeros_like(chinups), '|', color='k', ms = 25, mew = 4) ax.set_xlabel('chinups', fontsize=14) ax.set_ylabel('N', fontsize=14) fig.tight_layout() """ Explanation: Ending the lie Earlier I stated that histograms lie. One simple way to combat this lie: show all the data. Displaying the original data points allows viewers to understand the effects of the particular bin choices that have been made (though this can also be cumbersome for very large data sets, which is essentially all modern data sets). The standard for showing individual observations relative to a histogram is a "rug plot," which shows a vertical tick (or other symbol) at the location of each source used to estimate the PDF. Problem 1d Execute the cell below to see an example of a rug plot. End of explanation """ # execute this cell from sklearn.neighbors import KernelDensity def kde_sklearn(data, grid, bandwidth = 1.0, **kwargs): kde_skl = KernelDensity(bandwidth = bandwidth, **kwargs) kde_skl.fit(data[:, np.newaxis]) log_pdf = kde_skl.score_samples(grid[:, np.newaxis]) # sklearn returns log(density) return np.exp(log_pdf) """ Explanation: Of course, even rug plots are not a perfect solution. Many of the chinup measurements are repeated, and those instances cannot be easily isolated above. One (slightly) better solution is to vary the transparency of the rug "whiskers" using alpha = 0.3 in the whiskers plot call. But this too is far from perfect. To recap, histograms are not ideal for density estimation for the following reasons: They introduce discontinuities that are not present in the data They are strongly sensitive to user choices ($N_\mathrm{bins}$, bin centering, bin grouping), without any mathematical guidance to what these choices should be They are difficult to visualize in higher dimensions Histograms are useful for generating a quick representation of univariate data, but for the reasons listed above they should never be used for analysis. Most especially, functions should not be fit to histograms given how greatly the number of bins and bin centering affects the output histogram. Okay - so if we are going to rail on histograms this much, there must be a better option. There is: Kernel Density Estimation (KDE), a nonparametric form of density estimation whereby a normalized kernel function is convolved with the discrete data to obtain a continuous estimate of the underlying PDF. As a rule, the kernel must integrate to 1 over the interval $-\infty$ to $\infty$ and be symmetric. There are many possible kernels (gaussian is highly popular, though Epanechnikov, an inverted parabola, produces the minimal mean square error). KDE is not completely free of the problems we illustrated for histograms above (in particular, both a kernel and the width of the kernel need to be selected), but it does manage to correct a number of the ills. We will now demonstrate this via a few examples using the scikit-learn implementation of KDE: KernelDensity, which is part of the sklearn.neighbors module. Note There are many implementations of KDE in Python, and Jake VanderPlas has put together an excellent description of the strengths and weaknesses of each. We will use the scitkit-learn version as it is in many cases the fastest implementation. To demonstrate the basic idea behind KDE, we will begin by representing each point in the dataset as a block (i.e. we will adopt the tophat kernel). Borrowing some code from Jake, we can estimate the KDE using the following code: from sklearn.neighbors import KernelDensity def kde_sklearn(data, grid, bandwidth = 1.0, **kwargs): kde_skl = KernelDensity(bandwidth = bandwidth, **kwargs) kde_skl.fit(data[:, np.newaxis]) log_pdf = kde_skl.score_samples(grid[:, np.newaxis]) # sklearn returns log(density) return np.exp(log_pdf) The two main options to set are the bandwidth and the kernel. End of explanation """ grid = # complete PDFtophat = kde_sklearn( # complete fig, ax = plt.subplots() ax.plot( # complete ax.set_xlabel('chinups', fontsize=14) ax.set_ylabel('PDF', fontsize=14) fig.tight_layout() """ Explanation: Problem 1e Plot the KDE of the PDF for the number of chinups middle aged men can do using a bandwidth of 0.1 and a tophat kernel. Hint - as a general rule, the grid should be smaller than the bandwidth when plotting the PDF. End of explanation """ PDFtophat1 = # complete PDFtophat5 = # complete fig, ax = plt.subplots() ax.plot(# complete ax.plot(# complete ax.set_xlabel('chinups', fontsize=14) ax.set_ylabel('PDF', fontsize=14) fig.tight_layout() ax.legend() """ Explanation: In this representation, each "block" has a height of 0.25. The bandwidth is too narrow to provide any overlap between the blocks. This choice of kernel and bandwidth produces an estimate that is essentially a histogram with a large number of bins. It gives no sense of continuity for the distribution. Now, we examine the difference (relative to histograms) upon changing the the width (i.e. kernel) of the blocks. Problem 1f Plot the KDE of the PDF for the number of chinups middle aged men can do using bandwidths of 1 and 5 and a tophat kernel. How do the results differ from the histogram plots above? End of explanation """ PDFgaussian = # complete PDFepanechnikov = # complete fig, ax = plt.subplots() ax.plot(# complete ax.plot(# complete ax.legend(loc = 2) ax.set_xlabel('chinups', fontsize=14) ax.set_ylabel('PDF', fontsize=14) fig.tight_layout() """ Explanation: It turns out blocks are not an ideal representation for continuous data (see discussion on histograms above). Now we will explore the resulting PDF from other kernels. Problem 1g Plot the KDE of the PDF for the number of chinups middle aged men can do using a gaussian and Epanechnikov kernel. How do the results differ from the histogram plots above? Hint - you will need to select the bandwidth. The examples above should provide insight into the useful range for bandwidth selection. You may need to adjust the values to get an answer you "like." End of explanation """ x = np.arange(0, 6*np.pi, 0.1) y = np.cos(x) fig, ax=plt.subplots() ax.plot(x,y, lw = 2) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_xlim(0, 6*np.pi) fig.tight_layout() """ Explanation: So, what is the optimal choice of bandwidth and kernel? Unfortunately, there is no hard and fast rule, as every problem will likely have a different optimization. Typically, the choice of bandwidth is far more important than the choice of kernel. In the case where the PDF is likely to be gaussian (or close to gaussian), then Silverman's rule of thumb can be used: $$h = 1.059 \sigma n^{-1/5}$$ where $h$ is the bandwidth, $\sigma$ is the standard deviation of the samples, and $n$ is the total number of samples. Note - in situations with bimodal or more complicated distributions, this rule of thumb can lead to woefully inaccurate PDF estimates. The most general way to estimate the choice of bandwidth is via cross validation (we will cover cross-validation during the session on Machine Learning). What about multidimensional PDFs? It is possible using many of the Python implementations of KDE to estimate multidimensional PDFs, though it is very very important to beware the curse of dimensionality in these circumstances. Problem 2) Data Exploration Now a more open ended topic: data exploration. In brief, data exploration encompases a large suite of tools (including those discussed above) to examine data that live in large dimensional spaces. There is no single best method or optimal direction for data exploration. Instead, today we will introduce some of the tools available via python. As an example we will start with a basic line plot - and examine tools beyond matplotlib. End of explanation """ import seaborn as sns fig, ax = plt.subplots() ax.plot(x,y, lw = 2) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_xlim(0, 6*np.pi) fig.tight_layout() """ Explanation: Seaborn Seaborn is a plotting package that enables many useful features for exploration. In fact, a lot of the functionality that we developed above can readily be handled with seaborn. To begin, we will make the same plot that we created in matplotlib. End of explanation """ sns.set_style(# complete # complete # complete # complete """ Explanation: These plots look identical, but it is possible to change the style with seaborn. seaborn has 5 style presets: darkgrid, whitegrid, dark, white, and ticks. You can change the preset using the following: sns.set_style("whitegrid") which will change the output for all subsequent plots. Note - if you want to change the style for only a single plot, that can be accomplished with the following: with sns.axes_style("dark"): with all ploting commands inside the with statement. Problem 2a Re-plot the sine curve using each seaborn preset to see which you like best - then adopt this for the remainder of the notebook. End of explanation """ # default color palette current_palette = sns.color_palette() sns.palplot(current_palette) """ Explanation: The folks behind seaborn have thought a lot about color palettes, which is a good thing. Remember - the choice of color for plots is one of the most essential aspects of visualization. A poor choice of colors can easily mask interesting patterns or suggest structure that is not real. To learn more about what is available, see the seaborn color tutorial. Here we load the default: End of explanation """ # set palette to colorblind sns.set_palette("colorblind") current_palette = sns.color_palette() sns.palplot(current_palette) """ Explanation: which we will now change to colorblind, which is clearer to those that are colorblind. End of explanation """ iris = sns.load_dataset("iris") iris """ Explanation: Now that we have covered the basics of seaborn (and the above examples truly only scratch the surface of what is possible), we will explore the power of seaborn for higher dimension data sets. We will load the famous Iris data set, which measures 4 different features of 3 different types of Iris flowers. There are 150 different flowers in the data set. Note - for those familiar with pandas seaborn is designed to integrate easily and directly with pandas DataFrame objects. In the example below the Iris data are loaded into a DataFrame. iPython notebooks also display the DataFrame data in a nice readable format. End of explanation """ # note - kde, and rug all set to True, set to False to turn them off with sns.axes_style("dark"): sns.displot(iris['petal_length'], bins=20, kde=True, rug=True) plt.tight_layout() """ Explanation: Now that we have a sense of the data structure, it is useful to examine the distribution of features. Above, we went to great pains to produce histograms, KDEs, and rug plots. seaborn handles all of that effortlessly with the displot function. Problem 2b Plot the distribution of petal lengths for the Iris data set. End of explanation """ fig, ax = plt.subplots() ax.scatter( # complete ax.set_xlabel("petal length (cm)") ax.set_ylabel("petal width (cm)") fig.tight_layout() """ Explanation: Of course, this data set lives in a 4D space, so plotting more than univariate distributions is important. Fortunately, seaborn makes it very easy to produce handy summary plots. At this point, we are familiar with basic scatter plots in matplotlib. Problem 2c Make a matplotlib scatter plot showing the Iris petal length against the Iris petal width. End of explanation """ np.random.seed(2016) xexample = np.random.normal(loc = 0.2, scale = 1.1, size = 10000) yexample = np.random.normal(loc = -0.2, scale = 0.9, size = 10000) fig, ax = plt.subplots() ax.scatter(xexample, yexample) ax.set_xlabel('X', fontsize=14) ax.set_ylabel('Y', fontsize=14) fig.tight_layout() """ Explanation: Of course, when there are many many data points, scatter plots become difficult to interpret. As in the example below: End of explanation """ # hexbin w/ bins = "log" returns the log of counts/bin # mincnt = 1 displays only hexpix with at least 1 source present fig, ax = plt.subplots() cax = ax.hexbin(xexample, yexample, bins = "log", cmap = "viridis", mincnt = 1) ax.set_xlabel('X', fontsize=14) ax.set_ylabel('Y', fontsize=14) fig.tight_layout() plt.colorbar(cax) """ Explanation: Here, we see that there are many points, clustered about the origin, but we have no sense of the underlying density of the distribution. 2D histograms, such as plt.hist2d(), can alleviate this problem. I prefer to use plt.hexbin() which is a little easier on the eyes (though note - these histograms are just as subject to the same issues discussed above). End of explanation """ fig, ax = plt.subplots() sns.kdeplot(x=xexample, y=yexample, shade=False) ax.set_xlabel('X', fontsize=14) ax.set_ylabel('Y', fontsize=14) fig.tight_layout() """ Explanation: While the above plot provides a significant improvement over the scatter plot by providing a better sense of the density near the center of the distribution, the binedge effects are clearly present. An even better solution, like before, is a density estimate, which is easily built into seaborn via the kdeplot function. End of explanation """ sns.jointplot(x=iris['petal_length'], y=iris['petal_width']) plt.tight_layout() """ Explanation: This plot is much more appealing (and informative) than the previous two. For the first time we can clearly see that the distribution is not actually centered on the origin. Now we will move back to the Iris data set. Suppose we want to see univariate distributions in addition to the scatter plot? This is certainly possible with matplotlib and you can find examples on the web, however, with seaborn this is really easy. End of explanation """ sns.jointplot(# complete plt.tight_layout() """ Explanation: But! Histograms and scatter plots can be problematic as we have discussed many times before. Problem 2d Re-create the plot above but set kind='kde' to produce density estimates of the distributions. End of explanation """ sns.pairplot(iris[["sepal_length", "sepal_width", "petal_length", "petal_width"]]) plt.tight_layout() """ Explanation: That is much nicer than what was presented above. However - we still have a problem in that our data live in 4D, but we are (mostly) limited to 2D projections of that data. One way around this is via the seaborn version of a pairplot, which plots the distribution of every variable in the data set against each other. (Here is where the integration with pandas DataFrames becomes so powerful.) End of explanation """ sns.pairplot(iris, vars = ["sepal_length", "sepal_width", "petal_length", "petal_width"], hue = "species", diag_kind = 'kde') """ Explanation: For data sets where we have classification labels, we can even color the various points using the hue option, and produce KDEs along the diagonal with diag_type = 'kde'. End of explanation """ g = sns.PairGrid(iris, vars = ["sepal_length", "sepal_width", "petal_length", "petal_width"], hue = "species", diag_sharey=False) g.map_lower(sns.kdeplot) g.map_upper(plt.scatter, edgecolor='white') g.map_diag(sns.kdeplot, lw=3) g.add_legend() """ Explanation: Even better - there is an option to create a PairGrid which allows fine tuned control of the data as displayed above, below, and along the diagonal. In this way it becomes possible to avoid having symmetric redundancy, which is not all that informative. In the example below, we will show scatter plots and contour plots simultaneously. End of explanation """
abeschneider/algorithm_notes
Heapsort.ipynb
mit
def build_heap(lst): # last non-leaf node nonleaf_nodes = len(lst)/2 # start at bottom work up for each node for i in range(nonleaf_nodes-1, -1, -1): percolate_down(lst, i, len(lst)) """ Explanation: Heap Sort Summary | Performance | Complexity | |-----------------------------|------------------| |Worst-case | $O(n\log{n})$ | |Best-case | $O(n\log{n})$ | |Average | $O(n\log{n})$ | |Worst-case space | $O(1)$ | Notes The time complexity for heap sort in all cases make it a good choice when there need to be guarantees for runtime. Additionally, heap sort requires a constant amount of memory, as it can sort in-place. Algorithm It is possible implement heap sort by using heap_insert and heap_pop (see notes on heaps). However, both of these operations require the allocation of additional space. Instead, we can perform the same operations in place. The function build_heap applies the function percoluate_down on each non-leaf node in the tree starting at the bottom. End of explanation """ build_heap_example() """ Explanation: a visualize demonstration of what build_heap is doing: End of explanation """ def heap_sort(lst): build_heap(lst) # build up list starting from largset at back to smallest up front for i in range(len(lst)-1, 0, -1): # move largest value to the back lst[0], lst[i] = lst[i], lst[0] # re-heapify tree percolate_down(lst, 0, i) return lst """ Explanation: which allows us to define: End of explanation """ heap_sort_example() """ Explanation: and we can see the sort in action: End of explanation """
timzhangau/ml_nano
student_intervention/student_intervention.ipynb
mit
# Import libraries import numpy as np import pandas as pd from time import time from sklearn.metrics import f1_score # Read student data student_data = pd.read_csv("student-data.csv") print "Student data read successfully!" """ Explanation: Machine Learning Engineer Nanodegree Supervised Learning Project: Building a Student Intervention System Welcome to the second project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been provided for you, and it will be your job to implement the additional functionality necessary to successfully complete this project. Sections that begin with 'Implementation' in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a 'Question X' header. Carefully read each question and provide thorough answers in the following text boxes that begin with 'Answer:'. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide. Note: Code and Markdown cells can be executed using the Shift + Enter keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode. Question 1 - Classification vs. Regression Your goal for this project is to identify students who might need early intervention before they fail to graduate. Which type of supervised learning problem is this, classification or regression? Why? Answer: Exploring the Data Run the code cell below to load necessary Python libraries and load the student data. Note that the last column from this dataset, 'passed', will be our target label (whether the student graduated or didn't graduate). All other columns are features about each student. End of explanation """ # TODO: Calculate number of students n_students = None # TODO: Calculate number of features n_features = None # TODO: Calculate passing students n_passed = None # TODO: Calculate failing students n_failed = None # TODO: Calculate graduation rate grad_rate = None # Print the results print "Total number of students: {}".format(n_students) print "Number of features: {}".format(n_features) print "Number of students who passed: {}".format(n_passed) print "Number of students who failed: {}".format(n_failed) print "Graduation rate of the class: {:.2f}%".format(grad_rate) """ Explanation: Implementation: Data Exploration Let's begin by investigating the dataset to determine how many students we have information on, and learn about the graduation rate among these students. In the code cell below, you will need to compute the following: - The total number of students, n_students. - The total number of features for each student, n_features. - The number of those students who passed, n_passed. - The number of those students who failed, n_failed. - The graduation rate of the class, grad_rate, in percent (%). End of explanation """ # Extract feature columns feature_cols = list(student_data.columns[:-1]) # Extract target column 'passed' target_col = student_data.columns[-1] # Show the list of columns print "Feature columns:\n{}".format(feature_cols) print "\nTarget column: {}".format(target_col) # Separate the data into feature data and target data (X_all and y_all, respectively) X_all = student_data[feature_cols] y_all = student_data[target_col] # Show the feature information by printing the first five rows print "\nFeature values:" print X_all.head() """ Explanation: Preparing the Data In this section, we will prepare the data for modeling, training and testing. Identify feature and target columns It is often the case that the data you obtain contains non-numeric features. This can be a problem, as most machine learning algorithms expect numeric data to perform computations with. Run the code cell below to separate the student data into feature and target columns to see if any features are non-numeric. End of explanation """ def preprocess_features(X): ''' Preprocesses the student data and converts non-numeric binary variables into binary (0/1) variables. Converts categorical variables into dummy variables. ''' # Initialize new output DataFrame output = pd.DataFrame(index = X.index) # Investigate each feature column for the data for col, col_data in X.iteritems(): # If data type is non-numeric, replace all yes/no values with 1/0 if col_data.dtype == object: col_data = col_data.replace(['yes', 'no'], [1, 0]) # If data type is categorical, convert to dummy variables if col_data.dtype == object: # Example: 'school' => 'school_GP' and 'school_MS' col_data = pd.get_dummies(col_data, prefix = col) # Collect the revised columns output = output.join(col_data) return output X_all = preprocess_features(X_all) print "Processed feature columns ({} total features):\n{}".format(len(X_all.columns), list(X_all.columns)) """ Explanation: Preprocess Feature Columns As you can see, there are several non-numeric columns that need to be converted! Many of them are simply yes/no, e.g. internet. These can be reasonably converted into 1/0 (binary) values. Other columns, like Mjob and Fjob, have more than two values, and are known as categorical variables. The recommended way to handle such a column is to create as many columns as possible values (e.g. Fjob_teacher, Fjob_other, Fjob_services, etc.), and assign a 1 to one of them and 0 to all others. These generated columns are sometimes called dummy variables, and we will use the pandas.get_dummies() function to perform this transformation. Run the code cell below to perform the preprocessing routine discussed in this section. End of explanation """ # TODO: Import any additional functionality you may need here # TODO: Set the number of training points num_train = None # Set the number of testing points num_test = X_all.shape[0] - num_train # TODO: Shuffle and split the dataset into the number of training and testing points above X_train = None X_test = None y_train = None y_test = None # Show the results of the split print "Training set has {} samples.".format(X_train.shape[0]) print "Testing set has {} samples.".format(X_test.shape[0]) """ Explanation: Implementation: Training and Testing Data Split So far, we have converted all categorical features into numeric values. For the next step, we split the data (both features and corresponding labels) into training and test sets. In the following code cell below, you will need to implement the following: - Randomly shuffle and split the data (X_all, y_all) into training and testing subsets. - Use 300 training points (approximately 75%) and 95 testing points (approximately 25%). - Set a random_state for the function(s) you use, if provided. - Store the results in X_train, X_test, y_train, and y_test. End of explanation """ def train_classifier(clf, X_train, y_train): ''' Fits a classifier to the training data. ''' # Start the clock, train the classifier, then stop the clock start = time() clf.fit(X_train, y_train) end = time() # Print the results print "Trained model in {:.4f} seconds".format(end - start) def predict_labels(clf, features, target): ''' Makes predictions using a fit classifier based on F1 score. ''' # Start the clock, make predictions, then stop the clock start = time() y_pred = clf.predict(features) end = time() # Print and return results print "Made predictions in {:.4f} seconds.".format(end - start) return f1_score(target.values, y_pred, pos_label='yes') def train_predict(clf, X_train, y_train, X_test, y_test): ''' Train and predict using a classifer based on F1 score. ''' # Indicate the classifier and the training set size print "Training a {} using a training set size of {}. . .".format(clf.__class__.__name__, len(X_train)) # Train the classifier train_classifier(clf, X_train, y_train) # Print the results of prediction for both training and testing print "F1 score for training set: {:.4f}.".format(predict_labels(clf, X_train, y_train)) print "F1 score for test set: {:.4f}.".format(predict_labels(clf, X_test, y_test)) """ Explanation: Training and Evaluating Models In this section, you will choose 3 supervised learning models that are appropriate for this problem and available in scikit-learn. You will first discuss the reasoning behind choosing these three models by considering what you know about the data and each model's strengths and weaknesses. You will then fit the model to varying sizes of training data (100 data points, 200 data points, and 300 data points) and measure the F<sub>1</sub> score. You will need to produce three tables (one for each model) that shows the training set size, training time, prediction time, F<sub>1</sub> score on the training set, and F<sub>1</sub> score on the testing set. The following supervised learning models are currently available in scikit-learn that you may choose from: - Gaussian Naive Bayes (GaussianNB) - Decision Trees - Ensemble Methods (Bagging, AdaBoost, Random Forest, Gradient Boosting) - K-Nearest Neighbors (KNeighbors) - Stochastic Gradient Descent (SGDC) - Support Vector Machines (SVM) - Logistic Regression Question 2 - Model Application List three supervised learning models that are appropriate for this problem. For each model chosen - Describe one real-world application in industry where the model can be applied. (You may need to do a small bit of research for this — give references!) - What are the strengths of the model; when does it perform well? - What are the weaknesses of the model; when does it perform poorly? - What makes this model a good candidate for the problem, given what you know about the data? Answer: Setup Run the code cell below to initialize three helper functions which you can use for training and testing the three supervised learning models you've chosen above. The functions are as follows: - train_classifier - takes as input a classifier and training data and fits the classifier to the data. - predict_labels - takes as input a fit classifier, features, and a target labeling and makes predictions using the F<sub>1</sub> score. - train_predict - takes as input a classifier, and the training and testing data, and performs train_clasifier and predict_labels. - This function will report the F<sub>1</sub> score for both the training and testing data separately. End of explanation """ # TODO: Import the three supervised learning models from sklearn # from sklearn import model_A # from sklearn import model_B # from sklearn import model_C # TODO: Initialize the three models clf_A = None clf_B = None clf_C = None # TODO: Set up the training set sizes X_train_100 = None y_train_100 = None X_train_200 = None y_train_200 = None X_train_300 = None y_train_300 = None # TODO: Execute the 'train_predict' function for each classifier and each training set size # train_predict(clf, X_train, y_train, X_test, y_test) """ Explanation: Implementation: Model Performance Metrics With the predefined functions above, you will now import the three supervised learning models of your choice and run the train_predict function for each one. Remember that you will need to train and predict on each classifier for three different training set sizes: 100, 200, and 300. Hence, you should expect to have 9 different outputs below — 3 for each model using the varying training set sizes. In the following code cell, you will need to implement the following: - Import the three supervised learning models you've discussed in the previous section. - Initialize the three models and store them in clf_A, clf_B, and clf_C. - Use a random_state for each model you use, if provided. - Note: Use the default settings for each model — you will tune one specific model in a later section. - Create the different training set sizes to be used to train each model. - Do not reshuffle and resplit the data! The new training points should be drawn from X_train and y_train. - Fit each model with each training set size and make predictions on the test set (9 in total). Note: Three tables are provided after the following code cell which can be used to store your results. End of explanation """ # TODO: Import 'GridSearchCV' and 'make_scorer' # TODO: Create the parameters list you wish to tune parameters = None # TODO: Initialize the classifier clf = None # TODO: Make an f1 scoring function using 'make_scorer' f1_scorer = None # TODO: Perform grid search on the classifier using the f1_scorer as the scoring method grid_obj = None # TODO: Fit the grid search object to the training data and find the optimal parameters grid_obj = None # Get the estimator clf = grid_obj.best_estimator_ # Report the final F1 score for training and testing after parameter tuning print "Tuned model has a training F1 score of {:.4f}.".format(predict_labels(clf, X_train, y_train)) print "Tuned model has a testing F1 score of {:.4f}.".format(predict_labels(clf, X_test, y_test)) """ Explanation: Tabular Results Edit the cell below to see how a table can be designed in Markdown. You can record your results from above in the tables provided. Classifer 1 - ? | Training Set Size | Training Time | Prediction Time (test) | F1 Score (train) | F1 Score (test) | | :---------------: | :---------------------: | :--------------------: | :--------------: | :-------------: | | 100 | | | | | | 200 | EXAMPLE | | | | | 300 | | | | EXAMPLE | Classifer 2 - ? | Training Set Size | Training Time | Prediction Time (test) | F1 Score (train) | F1 Score (test) | | :---------------: | :---------------------: | :--------------------: | :--------------: | :-------------: | | 100 | | | | | | 200 | EXAMPLE | | | | | 300 | | | | EXAMPLE | Classifer 3 - ? | Training Set Size | Training Time | Prediction Time (test) | F1 Score (train) | F1 Score (test) | | :---------------: | :---------------------: | :--------------------: | :--------------: | :-------------: | | 100 | | | | | | 200 | | | | | | 300 | | | | | Choosing the Best Model In this final section, you will choose from the three supervised learning models the best model to use on the student data. You will then perform a grid search optimization for the model over the entire training set (X_train and y_train) by tuning at least one parameter to improve upon the untuned model's F<sub>1</sub> score. Question 3 - Choosing the Best Model Based on the experiments you performed earlier, in one to two paragraphs, explain to the board of supervisors what single model you chose as the best model. Which model is generally the most appropriate based on the available data, limited resources, cost, and performance? Answer: Question 4 - Model in Layman's Terms In one to two paragraphs, explain to the board of directors in layman's terms how the final model chosen is supposed to work. Be sure that you are describing the major qualities of the model, such as how the model is trained and how the model makes a prediction. Avoid using advanced mathematical or technical jargon, such as describing equations or discussing the algorithm implementation. Answer: Implementation: Model Tuning Fine tune the chosen model. Use grid search (GridSearchCV) with at least one important parameter tuned with at least 3 different values. You will need to use the entire training set for this. In the code cell below, you will need to implement the following: - Import sklearn.grid_search.GridSearchCV and sklearn.metrics.make_scorer. - Create a dictionary of parameters you wish to tune for the chosen model. - Example: parameters = {'parameter' : [list of values]}. - Initialize the classifier you've chosen and store it in clf. - Create the F<sub>1</sub> scoring function using make_scorer and store it in f1_scorer. - Set the pos_label parameter to the correct value! - Perform grid search on the classifier clf using f1_scorer as the scoring method, and store it in grid_obj. - Fit the grid search object to the training data (X_train, y_train), and store it in grid_obj. End of explanation """
chetnapriyadarshini/deep-learning
reinforcement/Q-learning-cart.ipynb
mit
import gym import tensorflow as tf import numpy as np """ Explanation: Deep Q-learning In this notebook, we'll build a neural network that can learn to play games through reinforcement learning. More specifically, we'll use Q-learning to train an agent to play a game called Cart-Pole. In this game, a freely swinging pole is attached to a cart. The cart can move to the left and right, and the goal is to keep the pole upright as long as possible. We can simulate this game using OpenAI Gym. First, let's check out how OpenAI Gym works. Then, we'll get into training an agent to play the Cart-Pole game. End of explanation """ # Create the Cart-Pole game environment env = gym.make('CartPole-v0') """ Explanation: Note: Make sure you have OpenAI Gym cloned into the same directory with this notebook. I've included gym as a submodule, so you can run git submodule --init --recursive to pull the contents into the gym repo. End of explanation """ env.reset() rewards = [] for _ in range(100): env.render() state, reward, done, info = env.step(env.action_space.sample()) # take a random action rewards.append(reward) if done: rewards = [] env.reset() """ Explanation: We interact with the simulation through env. To show the simulation running, you can use env.render() to render one frame. Passing in an action as an integer to env.step will generate the next step in the simulation. You can see how many actions are possible from env.action_space and to get a random action you can use env.action_space.sample(). This is general to all Gym games. In the Cart-Pole game, there are two possible actions, moving the cart left or right. So there are two actions we can take, encoded as 0 and 1. Run the code below to watch the simulation run. End of explanation """ print(rewards[-20:]) """ Explanation: To shut the window showing the simulation, use env.close(). If you ran the simulation above, we can look at the rewards: End of explanation """ class QNetwork: def __init__(self, learning_rate=0.01, state_size=4, action_size=2, hidden_size=10, name='QNetwork'): # state inputs to the Q-network with tf.variable_scope(name): self.inputs_ = tf.placeholder(tf.float32, [None, state_size], name='inputs') # One hot encode the actions to later choose the Q-value for the action self.actions_ = tf.placeholder(tf.int32, [None], name='actions') one_hot_actions = tf.one_hot(self.actions_, action_size) # Target Q values for training self.targetQs_ = tf.placeholder(tf.float32, [None], name='target') # ReLU hidden layers self.fc1 = tf.contrib.layers.fully_connected(self.inputs_, hidden_size) self.fc2 = tf.contrib.layers.fully_connected(self.fc1, hidden_size) # Linear output layer self.output = tf.contrib.layers.fully_connected(self.fc2, action_size, activation_fn=None) ### Train with loss (targetQ - Q)^2 # output has length 2, for two actions. This next line chooses # one value from output (per row) according to the one-hot encoded actions. self.Q = tf.reduce_sum(tf.multiply(self.output, one_hot_actions), axis=1) self.loss = tf.reduce_mean(tf.square(self.targetQs_ - self.Q)) self.opt = tf.train.AdamOptimizer(learning_rate).minimize(self.loss) """ Explanation: The game resets after the pole has fallen past a certain angle. For each frame while the simulation is running, it returns a reward of 1.0. The longer the game runs, the more reward we get. Then, our network's goal is to maximize the reward by keeping the pole vertical. It will do this by moving the cart to the left and the right. Q-Network We train our Q-learning agent using the Bellman Equation: $$ Q(s, a) = r + \gamma \max{Q(s', a')} $$ where $s$ is a state, $a$ is an action, and $s'$ is the next state from state $s$ and action $a$. Before we used this equation to learn values for a Q-table. However, for this game there are a huge number of states available. The state has four values: the position and velocity of the cart, and the position and velocity of the pole. These are all real-valued numbers, so ignoring floating point precisions, you practically have infinite states. Instead of using a table then, we'll replace it with a neural network that will approximate the Q-table lookup function. <img src="assets/deep-q-learning.png" width=450px> Now, our Q value, $Q(s, a)$ is calculated by passing in a state to the network. The output will be Q-values for each available action, with fully connected hidden layers. <img src="assets/q-network.png" width=550px> As I showed before, we can define our targets for training as $\hat{Q}(s,a) = r + \gamma \max{Q(s', a')}$. Then we update the weights by minimizing $(\hat{Q}(s,a) - Q(s,a))^2$. For this Cart-Pole game, we have four inputs, one for each value in the state, and two outputs, one for each action. To get $\hat{Q}$, we'll first choose an action, then simulate the game using that action. This will get us the next state, $s'$, and the reward. With that, we can calculate $\hat{Q}$ then pass it back into the $Q$ network to run the optimizer and update the weights. Below is my implementation of the Q-network. I used two fully connected layers with ReLU activations. Two seems to be good enough, three might be better. Feel free to try it out. End of explanation """ from collections import deque class Memory(): def __init__(self, max_size = 1000): self.buffer = deque(maxlen=max_size) def add(self, experience): self.buffer.append(experience) def sample(self, batch_size): idx = np.random.choice(np.arange(len(self.buffer)), size=batch_size, replace=False) return [self.buffer[ii] for ii in idx] """ Explanation: Experience replay Reinforcement learning algorithms can have stability issues due to correlations between states. To reduce correlations when training, we can store the agent's experiences and later draw a random mini-batch of those experiences to train on. Here, we'll create a Memory object that will store our experiences, our transitions $<s, a, r, s'>$. This memory will have a maxmium capacity, so we can keep newer experiences in memory while getting rid of older experiences. Then, we'll sample a random mini-batch of transitions $<s, a, r, s'>$ and train on those. Below, I've implemented a Memory object. If you're unfamiliar with deque, this is a double-ended queue. You can think of it like a tube open on both sides. You can put objects in either side of the tube. But if it's full, adding anything more will push an object out the other side. This is a great data structure to use for the memory buffer. End of explanation """ train_episodes = 1000 # max number of episodes to learn from max_steps = 200 # max steps in an episode gamma = 0.99 # future reward discount # Exploration parameters explore_start = 1.0 # exploration probability at start explore_stop = 0.01 # minimum exploration probability decay_rate = 0.0001 # exponential decay rate for exploration prob # Network parameters hidden_size = 64 # number of units in each Q-network hidden layer learning_rate = 0.0001 # Q-network learning rate # Memory parameters memory_size = 10000 # memory capacity batch_size = 20 # experience mini-batch size pretrain_length = batch_size # number experiences to pretrain the memory tf.reset_default_graph() mainQN = QNetwork(name='main', hidden_size=hidden_size, learning_rate=learning_rate) """ Explanation: Exploration - Exploitation To learn about the environment and rules of the game, the agent needs to explore by taking random actions. We'll do this by choosing a random action with some probability $\epsilon$ (epsilon). That is, with some probability $\epsilon$ the agent will make a random action and with probability $1 - \epsilon$, the agent will choose an action from $Q(s,a)$. This is called an $\epsilon$-greedy policy. At first, the agent needs to do a lot of exploring. Later when it has learned more, the agent can favor choosing actions based on what it has learned. This is called exploitation. We'll set it up so the agent is more likely to explore early in training, then more likely to exploit later in training. Q-Learning training algorithm Putting all this together, we can list out the algorithm we'll use to train the network. We'll train the network in episodes. One episode is one simulation of the game. For this game, the goal is to keep the pole upright for 195 frames. So we can start a new episode once meeting that goal. The game ends if the pole tilts over too far, or if the cart moves too far the left or right. When a game ends, we'll start a new episode. Now, to train the agent: Initialize the memory $D$ Initialize the action-value network $Q$ with random weights For episode = 1, $M$ do For $t$, $T$ do With probability $\epsilon$ select a random action $a_t$, otherwise select $a_t = \mathrm{argmax}_a Q(s,a)$ Execute action $a_t$ in simulator and observe reward $r_{t+1}$ and new state $s_{t+1}$ Store transition $<s_t, a_t, r_{t+1}, s_{t+1}>$ in memory $D$ Sample random mini-batch from $D$: $<s_j, a_j, r_j, s'_j>$ Set $\hat{Q}j = r_j$ if the episode ends at $j+1$, otherwise set $\hat{Q}_j = r_j + \gamma \max{a'}{Q(s'_j, a')}$ Make a gradient descent step with loss $(\hat{Q}_j - Q(s_j, a_j))^2$ endfor endfor Hyperparameters One of the more difficult aspects of reinforcememt learning are the large number of hyperparameters. Not only are we tuning the network, but we're tuning the simulation. End of explanation """ # Initialize the simulation env.reset() # Take one random step to get the pole and cart moving state, reward, done, _ = env.step(env.action_space.sample()) memory = Memory(max_size=memory_size) # Make a bunch of random actions and store the experiences for ii in range(pretrain_length): # Uncomment the line below to watch the simulation env.render() # Make a random action action = env.action_space.sample() next_state, reward, done, _ = env.step(action) if done: # The simulation fails so no next state next_state = np.zeros(state.shape) # Add experience to memory memory.add((state, action, reward, next_state)) # Start new episode env.reset() # Take one random step to get the pole and cart moving state, reward, done, _ = env.step(env.action_space.sample()) else: # Add experience to memory memory.add((state, action, reward, next_state)) state = next_state """ Explanation: Populate the experience memory Here I'm re-initializing the simulation and pre-populating the memory. The agent is taking random actions and storing the transitions in memory. This will help the agent with exploring the game. End of explanation """ # Now train with experiences saver = tf.train.Saver() rewards_list = [] with tf.Session() as sess: # Initialize variables sess.run(tf.global_variables_initializer()) step = 0 for ep in range(1, train_episodes): total_reward = 0 t = 0 while t < max_steps: step += 1 # Uncomment this next line to watch the training # env.render() # Explore or Exploit explore_p = explore_stop + (explore_start - explore_stop)*np.exp(-decay_rate*step) if explore_p > np.random.rand(): # Make a random action action = env.action_space.sample() else: # Get action from Q-network feed = {mainQN.inputs_: state.reshape((1, *state.shape))} Qs = sess.run(mainQN.output, feed_dict=feed) action = np.argmax(Qs) # Take action, get new state and reward next_state, reward, done, _ = env.step(action) total_reward += reward if done: # the episode ends so no next state next_state = np.zeros(state.shape) t = max_steps print('Episode: {}'.format(ep), 'Total reward: {}'.format(total_reward), 'Training loss: {:.4f}'.format(loss), 'Explore P: {:.4f}'.format(explore_p)) rewards_list.append((ep, total_reward)) # Add experience to memory memory.add((state, action, reward, next_state)) # Start new episode env.reset() # Take one random step to get the pole and cart moving state, reward, done, _ = env.step(env.action_space.sample()) else: # Add experience to memory memory.add((state, action, reward, next_state)) state = next_state t += 1 # Sample mini-batch from memory batch = memory.sample(batch_size) states = np.array([each[0] for each in batch]) actions = np.array([each[1] for each in batch]) rewards = np.array([each[2] for each in batch]) next_states = np.array([each[3] for each in batch]) # Train network target_Qs = sess.run(mainQN.output, feed_dict={mainQN.inputs_: next_states}) # Set target_Qs to 0 for states where episode ends episode_ends = (next_states == np.zeros(states[0].shape)).all(axis=1) target_Qs[episode_ends] = (0, 0) targets = rewards + gamma * np.max(target_Qs, axis=1) loss, _ = sess.run([mainQN.loss, mainQN.opt], feed_dict={mainQN.inputs_: states, mainQN.targetQs_: targets, mainQN.actions_: actions}) saver.save(sess, "checkpoints/cartpole.ckpt") """ Explanation: Training Below we'll train our agent. If you want to watch it train, uncomment the env.render() line. This is slow because it's rendering the frames slower than the network can train. But, it's cool to watch the agent get better at the game. End of explanation """ %matplotlib inline import matplotlib.pyplot as plt def running_mean(x, N): cumsum = np.cumsum(np.insert(x, 0, 0)) return (cumsum[N:] - cumsum[:-N]) / N eps, rews = np.array(rewards_list).T smoothed_rews = running_mean(rews, 10) plt.plot(eps[-len(smoothed_rews):], smoothed_rews) plt.plot(eps, rews, color='grey', alpha=0.3) plt.xlabel('Episode') plt.ylabel('Total Reward') """ Explanation: Visualizing training Below I'll plot the total rewards for each episode. I'm plotting the rolling average too, in blue. End of explanation """ test_episodes = 10 test_max_steps = 400 env.reset() with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) for ep in range(1, test_episodes): t = 0 while t < test_max_steps: env.render() # Get action from Q-network feed = {mainQN.inputs_: state.reshape((1, *state.shape))} Qs = sess.run(mainQN.output, feed_dict=feed) action = np.argmax(Qs) # Take action, get new state and reward next_state, reward, done, _ = env.step(action) if done: t = test_max_steps env.reset() # Take one random step to get the pole and cart moving state, reward, done, _ = env.step(env.action_space.sample()) else: state = next_state t += 1 env.close() """ Explanation: Testing Let's checkout how our trained agent plays the game. End of explanation """
eyadsibai/rep
howto/00-intro_ipython.ipynb
apache-2.0
%pylab inline from IPython.display import YouTubeVideo YouTubeVideo("qb7FT68tcA8", width=600, height=400, theme="light", color="blue") # You can ignore this, it's just for aesthetic purposes matplotlib.rcParams['figure.figsize'] = (8,5) rcParams['savefig.dpi'] = 100 """ Explanation: Intro into IPython notebooks End of explanation """ # These import commands set up the environment so we have access to numpy and pylab functions import numpy as np import pylab as pl # Data Fitting # First, we'll generate some fake data to use x = np.linspace(0,10,50) # 50 x points from 0 to 10 # Remember, you can look at the help for linspace too: # help(np.linspace) # y = m x + b y = 2.5 * x + 1.2 # let's plot that pl.clf() pl.plot(x,y) # looks like a simple line. But we want to see the individual data points pl.plot(x,y,marker='s') # We need to add noise first noise = pl.randn(y.size) # Like IDL, python has a 'randn' function that is centered at 0 with a standard deviation of 1. # IDL's 'randomu' is 'pl.rand' instead # What's y.size? print y.size print len(y) """ Explanation: Fitting Lines to Data We'll cover very basic line fitting, largely ignoring the subtleties of the statistics in favor of showing you how to perform simple fits of models to data. End of explanation """ # We can add arrays in python just like in IDL noisy_flux = y + noise # We'll plot it too, but this time without any lines # between the points, and we'll use black dots # ('k' is a shortcut for 'black', '.' means 'point') pl.clf() # clear the figure pl.plot(x,noisy_flux,'k.') # We need labels, of course pl.xlabel("Time") pl.ylabel("Flux") """ Explanation: y.size is the number of elements in y, just like len(y) or, in IDL, n_elements(y) End of explanation """ # We'll use polyfit to find the values of the coefficients. The third # parameter is the "order" p = np.polyfit(x,noisy_flux,1) # help(polyfit) if you want to find out more # print our fit parameters. They are not exact because there's noise in the data! # note that this is an array! print p print type(p) # you can ask python to tell you what type a variable is # Great! We've got our fit. Let's overplot the data and the fit now pl.clf() # clear the figure pl.plot(x,noisy_flux,'k.') # repeated from above pl.plot(x,p[0]*x+p[1],'r-') # A red solid line pl.xlabel("Time") # labels again pl.ylabel("Flux") # Cool, but there's another (better) way to do this. We'll use the polyval # function instead of writing out the m x + b equation ourselves pl.clf() # clear the figure pl.plot(x,noisy_flux,'k.') # repeated from above pl.plot(x,np.polyval(p,x),'r-') # A red solid line pl.xlabel("Time") # labels again pl.ylabel("Flux") # help(polyval) if you want to find out more """ Explanation: Now we're onto the fitting stage. We're going to fit a function of the form $$y = mx + b$$ which is the same as $$f(x) = p[1]x + p[0]$$ to the data. This is called "linear regression", but it is also a special case of a more general concept: this is a first-order polynomial. "First Order" means that the highest exponent of x in the equation is 1 End of explanation """ noisy_flux = y+noise*10 p = polyfit(x,noisy_flux,1) print p # plot it pl.clf() # clear the figure pl.plot(x,noisy_flux,'k.') # repeated from above pl.plot(x,np.polyval(p,x),'r-',label="Best fit") # A red solid line pl.plot(x,2.5*x+1.2,'b--',label="Input") # a blue dashed line showing the REAL line pl.legend(loc='best') # make a legend in the best location pl.xlabel("Time") # labels again pl.ylabel("Flux") """ Explanation: Let's do the same thing with a noisier data set. I'm going to leave out most of the comments this time. End of explanation """ pl.clf() # clear the figure pl.errorbar(x,noisy_flux,yerr=10,marker='.',color='k',linestyle='none') # errorbar requires some extras to look nice pl.plot(x,np.polyval(p,x),'r-',label="Best fit") # A red solid line pl.plot(x,2.5*x+1.2,'b--',label="Input") # a blue dashed line showing the REAL line pl.legend(loc='best') # make a legend in the best location pl.xlabel("Time") # labels again pl.ylabel("Flux") """ Explanation: Despite the noisy data, our fit is still pretty good! One last plotting trick, then we'll move on. End of explanation """ # this time we want our "independent variable" to be in radians x = np.linspace(0,2*np.pi,50) y = np.sin(x) pl.clf() pl.plot(x,y) # We'll make it noisy again noise = pl.randn(y.size) noisy_flux = y + noise pl.plot(x,noisy_flux,'k.') # no clear this time """ Explanation: Curve Fitting We'll now move on to more complicated curves. What if the data looks more like a sine curve? We'll create "fake data" in basically the same way as above. End of explanation """ # curve_fit is the function we need for this, but it's in another package called scipy from scipy.optimize import curve_fit # we need to know what it does: help(curve_fit) """ Explanation: That looks like kind of a mess. Let's see how well we can fit it. The function we're trying to fit has the form: $$f(x) = A * sin(x - B)$$ where $A$ is a "scale" parameter and $B$ is the side-to-side offset (or the "delay" if the x-axis is time). For our data, they are $A=1$ and $B=0$ respectively, because we made $y=sin(x)$ End of explanation """ def sinfunc(x,a,b): return a*np.sin(x-b) fitpars, covmat = curve_fit(sinfunc,x,noisy_flux) # The diagonals of the covariance matrix are variances # variance = standard deviation squared, so we'll take the square roots to get the standard devations! # You can get the diagonals of a 2D array easily: variances = covmat.diagonal() std_devs = np.sqrt(variances) print fitpars,std_devs # Let's plot our best fit, see how well we did # These two lines are equivalent: pl.plot(x, sinfunc(x, fitpars[0], fitpars[1]), 'r-') pl.plot(x, sinfunc(x, *fitpars), 'r-') """ Explanation: Look at the returns: Returns ------- popt : array Optimal values for the parameters so that the sum of the squared error of ``f(xdata, *popt) - ydata`` is minimized pcov : 2d array The estimated covariance of popt. The diagonals provide the variance of the parameter estimate. So the first set of returns is the "best-fit parameters", while the second set is the "covariance matrix" End of explanation """ t = np.linspace(0.1,10) a = 1.5 b = 2.5 z = a*t**b pl.clf() pl.plot(t,z) # Change the variables # np.log is the natural log y = np.log(z) x = np.log(t) pl.clf() pl.plot(x,y) pl.ylabel("log(z)") pl.xlabel("log(t)") """ Explanation: Again, this is pretty good despite the noisiness. Fitting a Power Law Power laws occur all the time in physis, so it's a good idea to learn how to use them. What's a power law? Any function of the form: $$f(t) = a t^b$$ where $x$ is your independent variable, $a$ is a scale parameter, and $b$ is the exponent (the power). When fitting power laws, it's very useful to take advantage of the fact that "a power law is linear in log-space". That means, if you take the log of both sides of the equation (which is allowed) and change variables, you get a linear equation! $$\ln(f(t)) = \ln(a t^b) = \ln(a) + b \ln(t)$$ We'll use the substitutions $y=\ln(f(t))$, $A=\ln(a)$, and $x=\ln(t)$, so that $$y=a+bx$$ which looks just like our linear equation from before (albeit with different letters for the fit parameters). We'll now go through the same fitting exercise as before, but using powerlaws instead of lines. End of explanation """ noisy_z = z + pl.randn(z.size)*10 pl.clf() pl.plot(t,z) pl.plot(t,noisy_z,'k.') noisy_y = np.log(noisy_z) pl.clf() pl.plot(x,y) pl.plot(x,noisy_y,'k.') pl.ylabel("log(z)") pl.xlabel("log(t)") """ Explanation: It's a straight line. Now, for our "fake data", we'll add the noise before transforming from "linear" to "log" space End of explanation """ print noisy_y # try to polyfit a line pars = np.polyfit(x,noisy_y,1) print pars """ Explanation: Note how different this looks from the "noisy line" we plotted earlier. Power laws are much more sensitive to noise! In fact, there are some data points that don't even show up on this plot because you can't take the log of a negative number. Any points where the random noise was negative enough that the curve dropped below zero ended up being "NAN", or "Not a Number". Luckily, our plotter knows to ignore those numbers, but polyfit doesnt. End of explanation """ print 1 == 1 print np.nan == np.nan """ Explanation: In order to get around this problem, we need to mask the data. That means we have to tell the code to ignore all the data points where noisy_y is nan. My favorite way to do this is to take advantage of a curious fact: $1=1$, but nan!=nan End of explanation """ OK = noisy_y == noisy_y print OK """ Explanation: So if we find all the places were noisy_y != noisy_y, we can get rid of them. Or we can just use the places where noisy_y equals itself. End of explanation """ print "There are %i OK values" % (OK.sum()) masked_noisy_y = noisy_y[OK] masked_x = x[OK] print "masked_noisy_y has length",len(masked_noisy_y) # now polyfit again pars = np.polyfit(masked_x,masked_noisy_y,1) print pars # cool, it worked. But the fit looks a little weird! fitted_y = polyval(pars,x) pl.plot(x, fitted_y, 'r--') """ Explanation: This OK array is a "boolean mask". We can use it as an "index array", which is pretty neat. End of explanation """ # Convert bag to linear-space to see what it "really" looks like fitted_z = np.exp(fitted_y) pl.clf() pl.plot(t,z) pl.plot(t,noisy_z,'k.') pl.plot(t,fitted_z,'r--') pl.xlabel('t') pl.ylabel('z') """ Explanation: The noise seems to have affected our fit. End of explanation """ def powerlaw(x,a,b): return a*(x**b) pars,covar = curve_fit(powerlaw,t,noisy_z) pl.clf() pl.plot(t,z) pl.plot(t,noisy_z,'k.') pl.plot(t,powerlaw(t,*pars),'r--') pl.xlabel('t') pl.ylabel('z') """ Explanation: That's pretty bad. A "least-squares" approach, as with curve_fit, is probably going to be the better choice. However, in the absence of noise (i.e., on your homework), this approach should work End of explanation """ # sin(x) is already defined def sin2x(x): """ sin^2 of x """ return np.sin(x)**2 def sin3x(x): """ sin^3 of x """ return np.sin(x)**3 def sincos(x): """ sin(x)*cos(x) """ return np.sin(x)*np.cos(x) list_of_functions = [np.sin, sin2x, sin3x, sincos] # we want 0-2pi for these functions t = np.linspace(0,2*np.pi) # this is the cool part: we can make a variable function for fun in list_of_functions: # the functions know their own names (in a "secret hidden variable" called __name__) print "The maximum of ",fun.__name__," is ", fun(t).max() # OK, but we wanted the location of the maximum.... for fun in list_of_functions: print "The location of the maximum of ",fun.__name__," is ", fun(t).argmax() # well, that's not QUITE what we want, but it's close # We want to know the value of t, not the index! for fun in list_of_functions: print "The location of the maximum of ",fun.__name__," is ", t[fun(t).argmax()] # Finally, what if we want to store all that in an array? # Well, here's a cool trick: you can sort of invert the for loop # This is called a "list comprehension": maxlocs = [ t[fun(t).argmax()] for fun in list_of_functions ] print maxlocs # Confused? OK. Try this one: print range(6) print [ii**2 for ii in range(6)] """ Explanation: Tricks with Arrays We need to cover a few syntactic things comparing IDL and python. In IDL, if you wanted the maximum value in an array, you would do: maxval = max(array, location_of_max) In python, it's more straightforward: location_of_max = array.argmax() or location_of_max = np.argmax(array) Now, say we want to determine the location of the maximum of a number of different functions. The functions we'll use are: sin(x) sin$^2$(x) sin$^3$(x) sin(x)cos(x) We'll define these functions, then loop over them. End of explanation """ from IPython.display import YouTubeVideo YouTubeVideo("xe_ATRmw0KM", width=600, height=400, theme="light", color="blue") from IPython.display import YouTubeVideo YouTubeVideo("zG8FYPFU9n4", width=600, height=400, theme="light", color="blue") """ Explanation: Further info on IPython Notebooks | Overview | link | |--------------------------------------|------------------------------------------------------------------------------------| | Blog of IPython creator | http://blog.fperez.org/2012/09/blogging-with-ipython-notebook.html | | Blog of an avid IPython user | http://www.damian.oquanta.info/index.html | | Turning notebook into a presentation | https://www.youtube.com/watch?v=rBS6hmiK-H8 | | Tutorial on IPython & SciPy | https://github.com/esc/scipy2013-tutorial-numpy-ipython | | IPython notebooks gallery | https://github.com/ipython/ipython/wiki/A-gallery-of-interesting-IPython-Notebooks | End of explanation """
wilkeraziz/notebooks
nlp2/fsa_permutations.ipynb
apache-2.0
import fst """ Explanation: Permutations End of explanation """ # Let's see the input as a simple linear chain FSA def make_input(srcstr, sigma = None): """ converts a nonempty string into a linear chain acceptor @param srcstr is a nonempty string @param sigma is the source vocabulary """ assert(srcstr.split()) return fst.linear_chain(srcstr.split(), sigma) # this function will enumerate all paths in an automaton def enumerate_paths(fsa): paths = [[str(arc.ilabel) for arc in path] for path in fsa.paths()] print len(paths), 'paths:' for path in paths: print ' '.join(path) # I am going to start with a very simple wrapper for a python dictionary that # will help us associate unique ids to items # this wrapper simply offers one aditional method (insert) similar to the insert method of an std::map class ItemFactory(object): def __init__(self): self.nextid_ = 0 self.i2s_ = {} def insert(self, item): """ Inserts a previously unmapped item. Returns the item's unique id and a flag with the result of the intertion. """ uid = self.i2s_.get(item, None) if uid is None: uid = self.nextid_ self.nextid_ += 1 self.i2s_[item] = uid return uid, True return uid, False def get(self, item): """ Returns the item's unique id (assumes the item has been mapped before) """ return self.i2s_[item] """ Explanation: Helper code Let's start by defining a few functions that will help us construct and inspect automata End of explanation """ # This program packs all permutations of an input sentence def Permutations(sentence, sigma=None, delta=None): from collections import deque from itertools import takewhile A = fst.Transducer(isyms=sigma, osyms=delta) I = len(sentence) axiom = tuple([False]*I) ifactory = ItemFactory() ifactory.insert(axiom) Q = deque([axiom]) while Q: ant = Q.popleft() # antecedent (coverage vector) sfrom = ifactory.get(ant) # state id if all(ant): # goal item A[sfrom].final = True # is a final node continue for i in range(I): if not ant[i]: cons = list(ant) cons[i] = True cons = tuple(cons) sto, new = ifactory.insert(cons) if new: Q.append(cons) A.add_arc(sfrom, sto, str(i + 1), sentence[i], 0) return A """ Explanation: All permutations End of explanation """ # Let's define a model of translational equivalences that performs word replacement of arbitrary permutations of the input # constrained to a window of length $d$ (see WLd in (Lopez, 2009)) # same strategy in Moses (for phrase-based models) def WLdPermutations(sentence, d = 2, sigma = None, delta = None): from collections import deque from itertools import takewhile A = fst.Transducer(isyms = sigma, osyms = delta) I = len(sentence) axiom = (1, tuple([False]*min(I - 1, d - 1))) ifactory = ItemFactory() ifactory.insert(axiom) Q = deque([axiom]) while Q: ant = Q.popleft() # antecedent l, C = ant # signature sfrom = ifactory.get(ant) # state id if l == I + 1: # goal item A[sfrom].final = True # is a final node continue # adjacent n = 0 if (len(C) == 0 or not C[0]) else sum(takewhile(lambda b : b, C)) # leading ones ll = l + n + 1 CC = list(C[n+1:]) maxlen = min(I - ll, d - 1) if maxlen: m = maxlen - len(CC) # missing positions [CC.append(False) for _ in range(m)] cons = (ll, tuple(CC)) sto, inserted = ifactory.insert(cons) if inserted: Q.append(cons) A.add_arc(sfrom, sto, str(l), sentence[l-1], 0) # non-adjacent ll = l for i in range(l + 1, I + 1): if i - l + 1 > d: # beyond limit break if C[i - l - 1]: # already used continue # free position CC = list(C) CC[i-l-1] = True cons = (ll, tuple(CC)) sto, inserted = ifactory.insert(cons) if inserted: Q.append(cons) A.add_arc(sfrom, sto, str(i), sentence[i-1], 0) return A """ Explanation: Window of length d Here we keep track of a coverage vector (C) of length d starting from the leftmost uncovered word (l) [l, C] There are two inference rules: one moves the window ahead whenever the leftmost uncovered position chances; and another that fills up the window without touching the leftmost input word. End of explanation """ # Let's create a table for the input vocabulary $\Sigma$ sigma = fst.SymbolTable() # and for the output vocabulary $\Delta$ delta = fst.SymbolTable() """ Explanation: Examples End of explanation """ # Let's have a look at the input as an automaton # we call it F ('f' is the cannonical source language) ex1_F = make_input('nosso amigo comum', sigma) ex1_F """ Explanation: Input End of explanation """ ex1_all = Permutations('1 2 3 4'.split(), None, sigma) ex1_all """ Explanation: All permutations End of explanation """ enumerate_paths(ex1_all) """ Explanation: For a toy example we can enumerate the permutations End of explanation """ # these are the permutations of the input according to WL$2$ ex2_WLd2 = WLdPermutations('1 2 3 4'.split(), 2, None, sigma) ex2_WLd2 enumerate_paths(ex2_WLd2) """ Explanation: WLd End of explanation """
laa-1-yay/SDC1-DetectLaneLines
.ipynb_checkpoints/Laav_Lane_Lines_Detection-checkpoint.ipynb
gpl-3.0
#importing some useful packages import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np import cv2 %matplotlib inline """ Explanation: Self-Driving Car Engineer Nanodegree Project: Finding Lane Lines on the Road In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below. Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right. In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a write up template that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the rubric points for this project. Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image. Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output". The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below. <figure> <img src="examples/line-segments-example.jpg" width="380" alt="Combined Image" /> <figcaption> <p></p> <p style="text-align: center;"> Your output should look something like this (above) after detecting line segments using the helper functions below </p> </figcaption> </figure> <p></p> <figure> <img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" /> <figcaption> <p></p> <p style="text-align: center;"> Your goal is to connect/average/extrapolate line segments to get output like this</p> </figcaption> </figure> Run the cell below to import some packages. If you get an import error for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips. Import Packages End of explanation """ #reading in an image img_name= 'solidWhiteRight.jpg' image = mpimg.imread('test_images/' + img_name) #printing out some stats and plotting print('This image is:', type(image), 'with dimensions:', image.shape) plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray') #DEFINE LIMIT FOR LANE's Y coordinate height lane_limit_height = 330 #Store dimensions of image ysize = image.shape[0] xsize = image.shape[1] """ Explanation: Read in an Image End of explanation """ import math # im,port sys for computing max, min int import sys def grayscale(img): """Applies the Grayscale transform This will return an image with only one color channel but NOTE: to see the returned image as grayscale (assuming your grayscaled image is called 'gray') you should call plt.imshow(gray, cmap='gray')""" return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Or use BGR2GRAY if you read an image with cv2.imread() # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) def canny(img, low_threshold, high_threshold): """Applies the Canny transform""" return cv2.Canny(img, low_threshold, high_threshold) def gaussian_blur(img, kernel_size): """Applies a Gaussian Noise kernel""" return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0) def region_of_interest(img, vertices): """ Applies an image mask. Only keeps the region of the image defined by the polygon formed from `vertices`. The rest of the image is set to black. """ #defining a blank mask to start with mask = np.zeros_like(img) #defining a 3 channel or 1 channel color to fill the mask with depending on the input image if len(img.shape) > 2: channel_count = img.shape[2] # i.e. 3 or 4 depending on your image ignore_mask_color = (255,) * channel_count else: ignore_mask_color = 255 #filling pixels inside the polygon defined by "vertices" with the fill color cv2.fillPoly(mask, vertices, ignore_mask_color) #returning the image only where mask pixels are nonzero masked_image = cv2.bitwise_and(img, mask) return masked_image def draw_lines(img, lines, color=[255, 0, 0], thickness=2): """ NOTE: this is the function you might want to use as a starting point once you want to average/extrapolate the line segments you detect to map out the full extent of the lane (going from the result shown in raw-lines-example.mp4 to that shown in P1_example.mp4). Think about things like separating line segments by their slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left line vs. the right line. Then, you can average the position of each of the lines and extrapolate to the top and bottom of the lane. This function draws `lines` with `color` and `thickness`. Lines are drawn on the image inplace (mutates the image). If you want to make the lines semi-transparent, think about combining this function with the weighted_img() function below """ # ``````````````` BELOW WAS THE CODE TO PLOT SMALL LINES BEFORE AVERAGING AND EXTRAPOLATING````````` for line in lines: for x1,y1,x2,y2 in line: cv2.line(img, (x1, y1), (x2, y2), color, thickness) # #````````````````````` NEW CODE FOR AVERAGING AND EXTRA POLATING BELOW``````````````````````````` def draw_lines_improved(img, lines, color=[255, 0, 0], thickness=2): """ NOTE: this is the function you might want to use as a starting point once you want to average/extrapolate the line segments you detect to map out the full extent of the lane (going from the result shown in raw-lines-example.mp4 to that shown in P1_example.mp4). Think about things like separating line segments by their slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left line vs. the right line. Then, you can average the position of each of the lines and extrapolate to the top and bottom of the lane. This function draws `lines` with `color` and `thickness`. Lines are drawn on the image inplace (mutates the image). If you want to make the lines semi-transparent, think about combining this function with the weighted_img() function below """ l_x = [] l_y = [] r_x = [] r_y = [] # The goal is to have one straight line from the top right corner to all the way down. # First, separate x and y points. for line in lines: for x1,y1,x2,y2 in line: slope = (y2-y1)/(x2-x1) # `````````````````` In Image y is reversed. The higher y value is actually lower in the image. # `````````````````` The slope is negative for the left line, and positive for the right line. if slope<0: l_x += [x1, x2] l_y += [y1, y2] elif slope>0: r_x += [x1, x2] r_y += [y1, y2] # Then we can use np.polyfit to fit a line to these points. # A straight line can be represented with y = mx + b which is a polynomial of degree 1. l_slope_and_intercept = np.polyfit(l_x, l_y, 1) r_slope_and_intercept = np.polyfit(r_x, r_y, 1) # print will show value of m and c in 'y=mx +c' : [ 1.40241735 -21.23284749] l_m = l_slope_and_intercept[0] l_c = l_slope_and_intercept[1] r_m = r_slope_and_intercept[0] r_c = r_slope_and_intercept[1] l_y1 = ysize # max size of image (bottom most point) l_x1 = int((l_y1 - l_c) / l_m) # x = (y -c)/m l_y2 = lane_limit_height # central point l_x2 = int((l_y2 - l_c) / l_m) r_y1 = ysize r_x1 = int((r_y1 - r_c) / r_m) r_y2 = lane_limit_height r_x2 = int((r_y2 - r_c) / r_m) # DRAW LINES cv2.line(img, (l_x1, l_y1), (l_x2, l_y2), color, thickness*5) cv2.line(img, (r_x1, r_y1), (r_x2, r_y2), color, thickness*5) # ````````````````````` OLD CODE FOR AVERAGING AND EXTRA POLATING BELOW `````````````````````````` # ````````````````````` TWO WORKING SOLUTIONS ``````````````````````````` # ``````````````````````````````````` NOW COMMENTED OUT ```````````````````````````````````````````` # # ```````````````````````````` SOLUTION 1 ```````````````````````````````````````````````` # # ``````````` TAKE OUT OUTLIERS USING MEAN AND STANDARD DEVIATION AND THEN TAKE MEAN SLOPE # # ``````````` PROBLEM : HOW TO FIND AVG INTERCEPT , ONCE YOU HAVE FOUNF AVG SLOPE ? # l_slope_arr = [] # l_intercept_arr = [] # r_slope_arr = [] # r_intercept_arr = [] # for line in lines: # for x1,y1,x2,y2 in line: # slope = ((y2-y1)/(x2-x1)) # # print(slope) # # `````````````````` In Image y is reversed. The higher y value is actually lower in the image. # # `````````````````` The slope is negative for the left line, and positive for the right line. # if slope<0: # l_slope_arr.append(slope) # l_intercept_arr.append(y1-(slope*x1)) # elif slope>0: # r_slope_arr.append(slope) # r_intercept_arr.append(y1-(slope*x1)) # l_mean_slope = np.mean(l_slope_arr) # l_std_slope = np.std(l_slope_arr) # l_final_list = [x for x in l_slope_arr if (x > l_mean_slope - 2 * l_std_slope)] # l_final_list = [x for x in l_final_list if (x < l_mean_slope + 2 * l_std_slope)] # l_avg_slope = np.mean(l_final_list) # print(l_avg_slope) # l_avg_intercept = np.mean(l_intercept_arr) # l_x1 = (ysize - l_avg_intercept)/l_avg_slope # l_x2 = (lane_limit_height - l_avg_intercept)/l_avg_slope # cv2.line(img, (int(l_x1), ysize), (int(l_x2), lane_limit_height), color, thickness*5) # # ````````` ```````````````````SOLUTION 2 ```````````````````````````````` # # ````` ``````````````TO FIND THE BOTTOM MOST MIN POINT # # `````` `````````````TO FIND UPMOST MAX POINT # # ``` ````````````````THEN DRAW A LINE FROM THERE # # ``` `````````````````````` (WORKING) # x_left_bottom = sys.maxsize # y_left_bottom = -sys.maxsize # x_left_up = -sys.maxsize # y_left_up = sys.maxsize # x_right_bottom = -sys.maxsize # y_right_bottom = -sys.maxsize # x_right_up = sys.maxsize # y_right_up = sys.maxsize # for line in lines: # for x1,y1,x2,y2 in line: # slope = ((y2-y1)/(x2-x1)) # # print(slope) # # `````````````````` In Image y is reversed. The higher y value is actually lower in the image. # # `````````````````` The slope is negative for the left line, and positive for the right line. # if slope<0: # x_left_bottom = min(x_left_bottom, x1) # y_left_bottom = max(y_left_bottom, y1) # x_left_up = max(x_left_up, x2) # y_left_up = min(y_left_up, y2) # elif slope>0: # x_right_bottom = max(x_right_bottom, x2) # y_right_bottom = max(y_right_bottom, y2) # x_right_up = min(x_right_up, x1) # y_right_up = min(y_right_up, y1) # # ```````````````````` PRINT ALL THE COORDINATES`````````````````````````` # # print('x_left_bottom: ' + str(x_left_bottom)) # # print('y_left_bottom: ' + str(y_left_bottom)) # # print('x_left_up: ' + str(x_left_up)) # # print('y_left_up: ' + str(y_left_up)) # # print() # # print('x_right_bottom: ' + str(x_right_bottom)) # # print('y_right_bottom: ' + str(y_right_bottom)) # # print('x_right_up: ' + str(x_right_up)) # # print('y_right_up: ' + str(y_right_up)) # # # Draw left and right averaged lines (ACTUAL - TAKING MINIMUM VALUES OF ACTUAL LANE) # # cv2.line(img, (x_left_bottom, y_left_bottom), (x_left_up, y_left_up), color, thickness*5) # # cv2.line(img, (x_right_bottom, y_right_bottom), (x_right_up, y_right_up), color, thickness*5) # # Draw left and right averaged lines (Y BOTTOM MAX FOR BOTH, Y UP 330 FOR BOTH) # cv2.line(img, (x_left_bottom, ysize), (x_left_up, y_left_up), color, thickness*5) # cv2.line(img, (x_right_bottom, ysize), (x_right_up, y_right_up), color, thickness*5) def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap): """ `img` should be the output of a Canny transform. Returns an image with hough lines drawn. """ lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap) line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) # USE THE IMPROVED DRAW LINES # draw_lines(line_img, lines) draw_lines_improved(line_img, lines) return line_img # Python 3 has support for cool math symbols. def weighted_img(img, initial_img, α=0.8, β=1., λ=0.): """ `img` is the output of the hough_lines(), An image with lines drawn on it. Should be a blank image (all black) with lines drawn on it. `initial_img` should be the image before any processing. The result image is computed as follows: initial_img * α + img * β + λ NOTE: initial_img and img must be the same shape! """ return cv2.addWeighted(initial_img, α, img, β, λ) """ Explanation: Ideas for Lane Detection Pipeline Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are: cv2.inRange() for color selection cv2.fillPoly() for regions selection cv2.line() to draw lines on an image given endpoints cv2.addWeighted() to coadd / overlay two images cv2.cvtColor() to grayscale or change color cv2.imwrite() to output images to file cv2.bitwise_and() to apply a mask to an image Check out the OpenCV documentation to learn about these and discover even more awesome functionality! Helper Functions Below are some helper functions to help get you started. They should look familiar from the lesson! End of explanation """ import os os.listdir("test_images/") """ Explanation: Test Images Build your pipeline to work on the images in the directory "test_images" You should make sure your pipeline works well on these images before you try the videos. End of explanation """ # TODO: Build your pipeline that will draw lane lines on the test_images # then save them to the test_images directory. for im in os.listdir("test_images/"): if im=='output': continue image = mpimg.imread('test_images/' + im) #Copy the image to prevent unecessary changes image_cp = np.copy(image) # Convert image to grey scale gray = grayscale(image_cp) # Define a kernel size and apply Gaussian smoothing kernel_size = 5 blur_gray = gaussian_blur(gray, kernel_size) # Define our parameters for Canny and apply low_threshold = 50 high_threshold = 150 edges = canny(blur_gray, low_threshold, high_threshold) # Define a polygon region of interest # Keep in mind the origin (x=0, y=0) is in the upper left in image processing # Coordinates of vertices left_upper = [int(xsize/2)-50, lane_limit_height] right_upper = [int(xsize/2)+50, lane_limit_height] right_bottom = [xsize-50, ysize] left_bottom = [50, ysize] vertices = np.array( [[ left_upper,right_upper,right_bottom,left_bottom]], dtype=np.int32 ) region_canny_image = region_of_interest(edges, vertices) # ````````````````` Test region of interest``````````````````````````````` # mpimg.imsave('test_images/output/' + 'test_region_area.png', region_image) # plt.imshow(region_image) # Define the Hough transform parameters # Make a blank the same size as our image to draw on rho = 2 theta = np.pi/180 threshold = 32 min_line_len = 10 max_line_gap = 200 # As you increase above longer the line connecting the left lane lines # Run Hough on edge detected image line_image = hough_lines(region_canny_image, rho, theta, threshold, min_line_len, max_line_gap) # plt.imshow(line_image) # Combine the processed image after selecting the required region with the original img comb_hough_orig = weighted_img(line_image, image) # plt.imshow(comb_hough_orig) #saving the resultant image img_out_name = im.split('.')[0] + '_out.png' mpimg.imsave('test_images_output/' + img_out_name, comb_hough_orig) #reading the output image image_out = mpimg.imread('test_images_output/' + img_out_name) plt.imshow(image_out) """ Explanation: Build a Lane Finding Pipeline Build the pipeline and run your solution on all test_images. Make copies into the test_images_output directory, and you can use the images in your writeup report. Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters. End of explanation """ # Import everything needed to edit/save/watch video clips from moviepy.editor import VideoFileClip from IPython.display import HTML def process_image(image): # NOTE: The output you return should be a color image (3 channel) for processing video below # TODO: put your pipeline here, # you should return the final output (image where lines are drawn on lanes) #Copy the image to prevent unecessary changes image_cp = np.copy(image) # Convert image to grey scale gray = grayscale(image_cp) # Define a kernel size and apply Gaussian smoothing kernel_size = 5 blur_gray = gaussian_blur(gray, kernel_size) # Define our parameters for Canny and apply low_threshold = 50 high_threshold = 150 edges = canny(blur_gray, low_threshold, high_threshold) # Define a polygon region of interest # Keep in mind the origin (x=0, y=0) is in the upper left in image processing # Coordinates of vertices left_upper = [int(xsize/2)-50, lane_limit_height] right_upper = [int(xsize/2)+50, lane_limit_height] right_bottom = [xsize-50, ysize] left_bottom = [50, ysize] vertices = np.array( [[ left_upper,right_upper,right_bottom,left_bottom]], dtype=np.int32 ) region_canny_image = region_of_interest(edges, vertices) # ````````````````` Test region of interest``````````````````````````````` # mpimg.imsave('test_images/output/' + 'test_region_area.png', region_image) # plt.imshow(region_image) # Define the Hough transform parameters # Make a blank the same size as our image to draw on rho = 2 theta = np.pi/180 threshold = 32 min_line_len = 10 max_line_gap = 200 # As you increase above longer the line connecting the left lane lines # Run Hough on edge detected image line_image = hough_lines(region_canny_image, rho, theta, threshold, min_line_len, max_line_gap) # plt.imshow(line_image) # Combine the processed image after selecting the required region with the original img comb_hough_orig = weighted_img(line_image, image) # plt.imshow(comb_hough_orig) result = comb_hough_orig return result """ Explanation: Test on Videos You know what's cooler than drawing lanes over images? Drawing lanes over video! We can test our solution on two provided videos: solidWhiteRight.mp4 solidYellowLeft.mp4 Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips. If you get an error that looks like this: NeedDownloadError: Need ffmpeg exe. You can download it by calling: imageio.plugins.ffmpeg.download() Follow the instructions in the error message and check out this forum post for more troubleshooting tips across operating systems. End of explanation """ white_output = 'test_videos_output/solidWhiteRight.mp4' ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video ## To do so add .subclip(start_second,end_second) to the end of the line below ## Where start_second and end_second are integer values representing the start and end of the subclip ## You may also uncomment the following line for a subclip of the first 5 seconds ##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5) clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4") white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!! %time white_clip.write_videofile(white_output, audio=False) """ Explanation: Let's try the one with the solid white lane on the right first ... End of explanation """ HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(white_output)) """ Explanation: Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice. End of explanation """ yellow_output = 'test_videos_output/solidYellowLeft.mp4' ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video ## To do so add .subclip(start_second,end_second) to the end of the line below ## Where start_second and end_second are integer values representing the start and end of the subclip ## You may also uncomment the following line for a subclip of the first 5 seconds ##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5) clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4') yellow_clip = clip2.fl_image(process_image) %time yellow_clip.write_videofile(yellow_output, audio=False) HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(yellow_output)) """ Explanation: Improve the draw_lines() function At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest. Now for the one with the solid yellow lane on the left. This one's more tricky! End of explanation """ challenge_output = 'test_videos_output/challenge.mp4' ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video ## To do so add .subclip(start_second,end_second) to the end of the line below ## Where start_second and end_second are integer values representing the start and end of the subclip ## You may also uncomment the following line for a subclip of the first 5 seconds ##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5) clip3 = VideoFileClip('test_videos/challenge.mp4') challenge_clip = clip3.fl_image(process_image) %time challenge_clip.write_videofile(challenge_output, audio=False) HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(challenge_output)) """ Explanation: Writeup and Submission If you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a link to the writeup template file. Optional Challenge Try your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project! End of explanation """
sorig/shogun
doc/ipython-notebooks/structure/FGM.ipynb
bsd-3-clause
%pylab inline %matplotlib inline import os SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data') import numpy as np import scipy.io dataset = scipy.io.loadmat(os.path.join(SHOGUN_DATA_DIR, 'ocr/ocr_taskar.mat')) # patterns for training p_tr = dataset['patterns_train'] # patterns for testing p_ts = dataset['patterns_test'] # labels for training l_tr = dataset['labels_train'] # labels for testing l_ts = dataset['labels_test'] # feature dimension n_dims = p_tr[0,0].shape[0] # number of states n_stats = 26 # number of training samples n_tr_samples = p_tr.shape[1] # number of testing samples n_ts_samples = p_ts.shape[1] """ Explanation: General Structured Output Models with Shogun Machine Learning Toolbox Shell Hu (GitHub ID: hushell) Thanks Patrick Pletscher and Fernando J. Iglesias García for taking time to help me finish the project! Shoguners = awesome! Me = grateful! Introduction This notebook illustrates the training of a <a href="http://en.wikipedia.org/wiki/Factor_graph">factor graph</a> model using <a href="http://en.wikipedia.org/wiki/Structured_support_vector_machine">structured SVM</a> in Shogun. We begin by giving a brief outline of factor graphs and <a href="http://en.wikipedia.org/wiki/Structured_prediction">structured output learning</a> followed by the corresponding API in Shogun. Finally, we test the scalability by performing an experiment on a real <a href="http://en.wikipedia.org/wiki/Optical_character_recognition">OCR</a> data set for <a href="http://en.wikipedia.org/wiki/Handwriting_recognition">handwritten character recognition</a>. Factor Graph A factor graph explicitly represents the factorization of an undirected graphical model in terms of a set of factors (potentials), each of which is defined on a clique in the original graph [1]. For example, a MRF distribution can be factorized as $$ P(\mathbf{y}) = \frac{1}{Z} \prod_{F \in \mathcal{F}} \theta_F(\mathbf{y}_F), $$ where $F$ is the factor index, $\theta_F(\mathbf{y}_F)$ is the energy with respect to assignment $\mathbf{y}_F$. In this demo, we focus only on table representation of factors. Namely, each factor holds an energy table $\theta_F$, which can be viewed as an unnormalized CPD. According to different factorizations, there are different types of factors. Usually we assume the Markovian property is held, that is, factors have the same parameterization if they belong to the same type, no matter how location or time changes. In addition, we have parameter free factor type, but nothing to learn for such kinds of types. More detailed implementation will be explained later. Structured Prediction Structured prediction typically involves an input $\mathbf{x}$ (can be structured) and a structured output $\mathbf{y}$. A joint feature map $\Phi(\mathbf{x},\mathbf{y})$ is defined to incorporate structure information into the labels, such as chains, trees or general graphs. In general, the linear parameterization will be used to give the prediction rule. We leave the kernelized version for future work. $$ \hat{\mathbf{y}} = \underset{\mathbf{y} \in \mathcal{Y}}{\operatorname{argmax}} \langle \mathbf{w}, \Phi(\mathbf{x},\mathbf{y}) \rangle $$ where $\Phi(\mathbf{x},\mathbf{y})$ is the feature vector by mapping local factor features to corresponding locations in terms of $\mathbf{y}$, and $\mathbf{w}$ is the global parameter vector. In factor graph model, parameters are associated with a set of factor types. So $\mathbf{w}$ is a collection of local parameters. The parameters are learned by regularized risk minimization, where the risk defined by user provided loss function $\Delta(\mathbf{y},\mathbf{\hat{y}})$ is usually non-convex and non-differentiable, e.g. the Hamming loss. So the empirical risk is defined in terms of the surrogate hinge loss $H_i(\mathbf{w}) = \max_{\mathbf{y} \in \mathcal{Y}} \Delta(\mathbf{y}_i,\mathbf{y}) - \langle \mathbf{w}, \Psi_i(\mathbf{y}) \rangle $, which is an upper bound of the user defined loss. Here $\Psi_i(\mathbf{y}) = \Phi(\mathbf{x}_i,\mathbf{y}_i) - \Phi(\mathbf{x}_i,\mathbf{y})$. The training objective is given by $$ \min_{\mathbf{w}} \frac{\lambda}{2} ||\mathbf{w}||^2 + \frac{1}{N} \sum_{i=1}^N H_i(\mathbf{w}). $$ In Shogun's factor graph model, the corresponding implemented functions are: <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CStructuredModel.html#a15bd99e15bbf0daa8a727d03dbbf4bcd">FactorGraphModel::get_joint_feature_vector()</a> $\longleftrightarrow \Phi(\mathbf{x}_i,\mathbf{y})$ <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CFactorGraphModel.html#a36665cfdd7ea2dfcc9b3c590947fe67f">FactorGraphModel::argmax()</a> $\longleftrightarrow H_i(\mathbf{w})$ <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CFactorGraphModel.html#a17dac99e933f447db92482a6dce8489b">FactorGraphModel::delta_loss()</a> $\longleftrightarrow \Delta(\mathbf{y}_i,\mathbf{y})$ Experiment: OCR Show Data First of all, we load the OCR data from a prepared mat file. The raw data can be downloaded from <a href="http://www.seas.upenn.edu/~taskar/ocr/">http://www.seas.upenn.edu/~taskar/ocr/</a>. It has 6876 handwritten words with an average length of 8 letters from 150 different persons. Each letter is rasterized into a binary image of size 16 by 8 pixels. Thus, each $\mathbf{y}$ is a chain, and each node has 26 possible states denoting ${a,\cdots,z}$. End of explanation """ import matplotlib.pyplot as plt def show_word(patterns, index): """show a word with padding""" plt.rc('image', cmap='binary') letters = patterns[0,index][:128,:] n_letters = letters.shape[1] for l in range(n_letters): lett = np.transpose(np.reshape(letters[:,l], (8,16))) lett = np.hstack((np.zeros((16,1)), lett, np.zeros((16,1)))) lett = np.vstack((np.zeros((1,10)), lett, np.zeros((1,10)))) subplot(1,n_letters,l+1) imshow(lett) plt.xticks(()) plt.yticks(()) plt.tight_layout() show_word(p_tr, 174) show_word(p_tr, 471) show_word(p_tr, 57) """ Explanation: Few examples of the handwritten words are shown below. Note that the first capitalized letter has been removed. End of explanation """ from shogun import TableFactorType # unary, type_id = 0 cards_u = np.array([n_stats], np.int32) w_gt_u = np.zeros(n_stats*n_dims) fac_type_u = TableFactorType(0, cards_u, w_gt_u) # pairwise, type_id = 1 cards = np.array([n_stats,n_stats], np.int32) w_gt = np.zeros(n_stats*n_stats) fac_type = TableFactorType(1, cards, w_gt) # first bias, type_id = 2 cards_s = np.array([n_stats], np.int32) w_gt_s = np.zeros(n_stats) fac_type_s = TableFactorType(2, cards_s, w_gt_s) # last bias, type_id = 3 cards_t = np.array([n_stats], np.int32) w_gt_t = np.zeros(n_stats) fac_type_t = TableFactorType(3, cards_t, w_gt_t) # all initial parameters w_all = [w_gt_u,w_gt,w_gt_s,w_gt_t] # all factor types ftype_all = [fac_type_u,fac_type,fac_type_s,fac_type_t] """ Explanation: Define Factor Types and Build Factor Graphs Let's define 4 factor types, such that a word will be able to be modeled as a chain graph. The unary factor type will be used to define unary potentials that capture the appearance likelihoods of each letter. In our case, each letter has $16 \times 8$ pixels, thus there are $(16 \times 8 + 1) \times 26$ parameters. Here the additional bits in the parameter vector are bias terms. One for each state. The pairwise factor type will be used to define pairwise potentials between each pair of letters. This type in fact gives the Potts potentials. There are $26 \times 26$ parameters. The bias factor type for the first letter is a compensation factor type, since the interaction is one-sided. So there are $26$ parameters to be learned. The bias factor type for the last letter, which has the same intuition as the last item. There are also $26$ parameters. Putting all parameters together, the global parameter vector $\mathbf{w}$ has length $4082$. End of explanation """ def prepare_data(x, y, ftype, num_samples): """prepare FactorGraphFeatures and FactorGraphLabels """ from shogun import Factor, TableFactorType, FactorGraph from shogun import FactorGraphObservation, FactorGraphLabels, FactorGraphFeatures samples = FactorGraphFeatures(num_samples) labels = FactorGraphLabels(num_samples) for i in range(num_samples): n_vars = x[0,i].shape[1] data = x[0,i].astype(np.float64) vc = np.array([n_stats]*n_vars, np.int32) fg = FactorGraph(vc) # add unary factors for v in range(n_vars): datau = data[:,v] vindu = np.array([v], np.int32) facu = Factor(ftype[0], vindu, datau) fg.add_factor(facu) # add pairwise factors for e in range(n_vars-1): datap = np.array([1.0]) vindp = np.array([e,e+1], np.int32) facp = Factor(ftype[1], vindp, datap) fg.add_factor(facp) # add bias factor to first letter datas = np.array([1.0]) vinds = np.array([0], np.int32) facs = Factor(ftype[2], vinds, datas) fg.add_factor(facs) # add bias factor to last letter datat = np.array([1.0]) vindt = np.array([n_vars-1], np.int32) fact = Factor(ftype[3], vindt, datat) fg.add_factor(fact) # add factor graph samples.add_sample(fg) # add corresponding label states_gt = y[0,i].astype(np.int32) states_gt = states_gt[0,:]; # mat to vector loss_weights = np.array([1.0/n_vars]*n_vars) fg_obs = FactorGraphObservation(states_gt, loss_weights) labels.add_label(fg_obs) return samples, labels # prepare training pairs (factor graph, node states) n_tr_samples = 350 # choose a subset of training data to avoid time out on buildbot samples, labels = prepare_data(p_tr, l_tr, ftype_all, n_tr_samples) """ Explanation: Next, we write a function to construct the factor graphs and prepare labels for training. For each factor graph instance, the structure is a chain but the number of nodes and edges depend on the number of letters, where unary factors will be added for each letter, pairwise factors will be added for each pair of neighboring letters. Besides, the first and last letter will get an additional bias factor respectively. End of explanation """ try: import networkx as nx # pip install networkx except ImportError: import pip pip.main(['install', '--user', 'networkx']) import networkx as nx import matplotlib.pyplot as plt # create a graph G = nx.Graph() node_pos = {} # add variable nodes, assuming there are 3 letters G.add_nodes_from(['v0','v1','v2']) for i in range(3): node_pos['v%d' % i] = (2*i,1) # add factor nodes G.add_nodes_from(['F0','F1','F2','F01','F12','Fs','Ft']) for i in range(3): node_pos['F%d' % i] = (2*i,1.006) for i in range(2): node_pos['F%d%d' % (i,i+1)] = (2*i+1,1) node_pos['Fs'] = (-1,1) node_pos['Ft'] = (5,1) # add edges to connect variable nodes and factor nodes G.add_edges_from([('v%d' % i,'F%d' % i) for i in range(3)]) G.add_edges_from([('v%d' % i,'F%d%d' % (i,i+1)) for i in range(2)]) G.add_edges_from([('v%d' % (i+1),'F%d%d' % (i,i+1)) for i in range(2)]) G.add_edges_from([('v0','Fs'),('v2','Ft')]) # draw graph fig, ax = plt.subplots(figsize=(6,2)) nx.draw_networkx_nodes(G,node_pos,nodelist=['v0','v1','v2'],node_color='white',node_size=700,ax=ax) nx.draw_networkx_nodes(G,node_pos,nodelist=['F0','F1','F2'],node_color='yellow',node_shape='s',node_size=300,ax=ax) nx.draw_networkx_nodes(G,node_pos,nodelist=['F01','F12'],node_color='blue',node_shape='s',node_size=300,ax=ax) nx.draw_networkx_nodes(G,node_pos,nodelist=['Fs'],node_color='green',node_shape='s',node_size=300,ax=ax) nx.draw_networkx_nodes(G,node_pos,nodelist=['Ft'],node_color='purple',node_shape='s',node_size=300,ax=ax) nx.draw_networkx_edges(G,node_pos,alpha=0.7) plt.axis('off') plt.tight_layout() """ Explanation: An example of graph structure is visualized as below, from which you may have a better sense how a factor graph being built. Note that different colors are used to represent different factor types. End of explanation """ from shogun import FactorGraphModel, TREE_MAX_PROD # create model and register factor types model = FactorGraphModel(samples, labels, TREE_MAX_PROD) model.add_factor_type(ftype_all[0]) model.add_factor_type(ftype_all[1]) model.add_factor_type(ftype_all[2]) model.add_factor_type(ftype_all[3]) """ Explanation: Training Now we can create the factor graph model and start training. We will use the tree max-product belief propagation to do MAP inference. End of explanation """ from shogun import DualLibQPBMSOSVM from shogun import BmrmStatistics import pickle import time # create bundle method SOSVM, there are few variants can be chosen # BMRM, Proximal Point BMRM, Proximal Point P-BMRM, NCBM # usually the default one i.e. BMRM is good enough # lambda is set to 1e-2 bmrm = DualLibQPBMSOSVM(model, labels, 0.01) bmrm.put('m_TolAbs', 20.0) bmrm.put('verbose', True) bmrm.set_store_train_info(True) # train t0 = time.time() bmrm.train() t1 = time.time() w_bmrm = bmrm.get_real_vector('m_w') print("BMRM took", t1 - t0, "seconds.") """ Explanation: In Shogun, we implemented several batch solvers and online solvers. Let's first try to train the model using a batch solver. We choose the dual bundle method solver (<a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CDualLibQPBMSOSVM.html">DualLibQPBMSOSVM</a>) [2], since in practice it is slightly faster than the primal n-slack cutting plane solver (<a a href="http://www.shogun-toolbox.org/doc/en/latest/PrimalMosekSOSVM_8h.html">PrimalMosekSOSVM</a>) [3]. However, it still will take a while until convergence. Briefly, in each iteration, a gradually tighter piece-wise linear lower bound of the objective function will be constructed by adding more cutting planes (most violated constraints), then the approximate QP will be solved. Finding a cutting plane involves calling the max oracle $H_i(\mathbf{w})$ and in average $N$ calls are required in an iteration. This is basically why the training is time consuming. End of explanation """ import matplotlib.pyplot as plt fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12,4)) primal_bmrm = bmrm.get_helper().get_real_vector('primal') dual_bmrm = bmrm.get_result().get_hist_Fd_vector() len_iter = min(primal_bmrm.size, dual_bmrm.size) primal_bmrm = primal_bmrm[1:len_iter] dual_bmrm = dual_bmrm[1:len_iter] # plot duality gaps xs = range(dual_bmrm.size) axes[0].plot(xs, (primal_bmrm-dual_bmrm), label='duality gap') axes[0].set_xlabel('iteration') axes[0].set_ylabel('duality gap') axes[0].legend(loc=1) axes[0].set_title('duality gaps'); axes[0].grid(True) # plot primal and dual values xs = range(dual_bmrm.size-1) axes[1].plot(xs, primal_bmrm[1:], label='primal') axes[1].plot(xs, dual_bmrm[1:], label='dual') axes[1].set_xlabel('iteration') axes[1].set_ylabel('objective') axes[1].legend(loc=1) axes[1].set_title('primal vs dual'); axes[1].grid(True) """ Explanation: Let's check the duality gap to see if the training has converged. We aim at minimizing the primal problem while maximizing the dual problem. By the weak duality theorem, the optimal value of the primal problem is always greater than or equal to dual problem. Thus, we could expect the duality gap will decrease during the time. A relative small and stable duality gap may indicate the convergence. In fact, the gap doesn't have to become zero, since we know it is not far away from the local minima. End of explanation """ # statistics bmrm_stats = bmrm.get_result() nCP = bmrm_stats.nCP nzA = bmrm_stats.nzA print('number of cutting planes: %d' % nCP) print('number of active cutting planes: %d' % nzA) """ Explanation: There are other statitics may also be helpful to check if the solution is good or not, such as the number of cutting planes, from which we may have a sense how tight the piece-wise lower bound is. In general, the number of cutting planes should be much less than the dimension of the parameter vector. End of explanation """ from shogun import StochasticSOSVM # the 3rd parameter is do_weighted_averaging, by turning this on, # a possibly faster convergence rate may be achieved. # the 4th parameter controls outputs of verbose training information sgd = StochasticSOSVM(model, labels, True, True) sgd.put('num_iter', 100) sgd.put('lambda', 0.01) # train t0 = time.time() sgd.train() t1 = time.time() w_sgd = sgd.get_real_vector('m_w') print("SGD took", t1 - t0, "seconds.") """ Explanation: In our case, we have 101 active cutting planes, which is much less than 4082, i.e. the number of parameters. We could expect a good model by looking at these statistics. Now come to the online solvers. Unlike the cutting plane algorithms re-optimizes over all the previously added dual variables, an online solver will update the solution based on a single point. This difference results in a faster convergence rate, i.e. less oracle calls, please refer to Table 1 in [4] for more detail. Here, we use the stochastic subgradient descent (<a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CStochasticSOSVM.html">StochasticSOSVM</a>) to compare with the BMRM algorithm shown before. End of explanation """ fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12,4)) primal_sgd = sgd.get_helper().get_real_vector('primal') xs = range(dual_bmrm.size-1) axes[0].plot(xs, primal_bmrm[1:], label='BMRM') axes[0].plot(range(99), primal_sgd[1:100], label='SGD') axes[0].set_xlabel('effecitve passes') axes[0].set_ylabel('primal objective') axes[0].set_title('whole training progress') axes[0].legend(loc=1) axes[0].grid(True) axes[1].plot(range(99), primal_bmrm[1:100], label='BMRM') axes[1].plot(range(99), primal_sgd[1:100], label='SGD') axes[1].set_xlabel('effecitve passes') axes[1].set_ylabel('primal objective') axes[1].set_title('first 100 effective passes') axes[1].legend(loc=1) axes[1].grid(True) """ Explanation: We compare the SGD and BMRM in terms of the primal objectives versus effective passes. We first plot the training progress (until both algorithms converge) and then zoom in to check the first 100 passes. In order to make a fair comparison, we set the regularization constant to 1e-2 for both algorithms. End of explanation """ fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12,4)) terr_bmrm = bmrm.get_helper().get_real_vector('train_error') terr_sgd = sgd.get_helper().get_real_vector('train_error') xs = range(terr_bmrm.size-1) axes[0].plot(xs, terr_bmrm[1:], label='BMRM') axes[0].plot(range(99), terr_sgd[1:100], label='SGD') axes[0].set_xlabel('effecitve passes') axes[0].set_ylabel('training error') axes[0].set_title('whole training progress') axes[0].legend(loc=1) axes[0].grid(True) axes[1].plot(range(99), terr_bmrm[1:100], label='BMRM') axes[1].plot(range(99), terr_sgd[1:100], label='SGD') axes[1].set_xlabel('effecitve passes') axes[1].set_ylabel('training error') axes[1].set_title('first 100 effective passes') axes[1].legend(loc=1) axes[1].grid(True) """ Explanation: As is shown above, the SGD solver uses less oracle calls to get to converge. Note that the timing is 2 times slower than they actually need, since there are additional computations of primal objective and training error in each pass. The training errors of both algorithms for each pass are shown in below. End of explanation """ def hinton(matrix, max_weight=None, ax=None): """Draw Hinton diagram for visualizing a weight matrix.""" ax = ax if ax is not None else plt.gca() if not max_weight: max_weight = 2**np.ceil(np.log(np.abs(matrix).max())/np.log(2)) ax.patch.set_facecolor('gray') ax.set_aspect('equal', 'box') ax.xaxis.set_major_locator(plt.NullLocator()) ax.yaxis.set_major_locator(plt.NullLocator()) for (x,y),w in np.ndenumerate(matrix): color = 'white' if w > 0 else 'black' size = np.sqrt(np.abs(w)) rect = plt.Rectangle([x - size / 2, y - size / 2], size, size, facecolor=color, edgecolor=color) ax.add_patch(rect) ax.autoscale_view() ax.invert_yaxis() # get pairwise parameters, also accessible from # w[n_dims*n_stats:n_dims*n_stats+n_stats*n_stats] model.w_to_fparams(w_sgd) # update factor parameters w_p = ftype_all[1].get_w() w_p = np.reshape(w_p,(n_stats,n_stats)) hinton(w_p) """ Explanation: Interestingly, the training errors of SGD solver are lower than BMRM's in first 100 passes, but in the end the BMRM solver obtains a better training performance. A probable explanation is that BMRM uses very limited number of cutting planes at beginning, which form a poor approximation of the objective function. As the number of cutting planes increasing, we got a tighter piecewise lower bound, thus improve the performance. In addition, we would like to show the pairwise weights, which may learn important co-occurrances of letters. The hinton diagram is a wonderful tool for visualizing 2D data, in which positive and negative values are represented by white and black squares, respectively, and the size of each square represents the magnitude of each value. In our case, a smaller number i.e. a large black square indicates the two letters tend to coincide. End of explanation """ # get testing data samples_ts, labels_ts = prepare_data(p_ts, l_ts, ftype_all, n_ts_samples) from shogun import FactorGraphFeatures, FactorGraphObservation, TREE_MAX_PROD, MAPInference # get a factor graph instance from test data fg0 = samples_ts.get_sample(100) fg0.compute_energies() fg0.connect_components() # create a MAP inference using tree max-product infer_met = MAPInference(fg0, TREE_MAX_PROD) infer_met.inference() # get inference results y_pred = infer_met.get_structured_outputs() y_truth = FactorGraphObservation.obtain_from_generic(labels_ts.get_label(100)) print(y_pred.get_data()) print(y_truth.get_data()) """ Explanation: Inference Next, we show how to do inference with the learned model parameters for a given data point. End of explanation """ from shogun import SOSVMHelper # training error of BMRM method bmrm.put('m_w', w_bmrm) model.w_to_fparams(w_bmrm) lbs_bmrm = bmrm.apply() acc_loss = 0.0 ave_loss = 0.0 for i in range(n_tr_samples): y_pred = lbs_bmrm.get_label(i) y_truth = labels.get_label(i) acc_loss = acc_loss + model.delta_loss(y_truth, y_pred) ave_loss = acc_loss / n_tr_samples print('BMRM: Average training error is %.4f' % ave_loss) # training error of stochastic method print('SGD: Average training error is %.4f' % SOSVMHelper.average_loss(w_sgd, model)) # testing error bmrm.set_features(samples_ts) bmrm.set_labels(labels_ts) lbs_bmrm_ts = bmrm.apply() acc_loss = 0.0 ave_loss_ts = 0.0 for i in range(n_ts_samples): y_pred = lbs_bmrm_ts.get_label(i) y_truth = labels_ts.get_label(i) acc_loss = acc_loss + model.delta_loss(y_truth, y_pred) ave_loss_ts = acc_loss / n_ts_samples print('BMRM: Average testing error is %.4f' % ave_loss_ts) # testing error of stochastic method print('SGD: Average testing error is %.4f' % SOSVMHelper.average_loss(sgd.get_real_vector('m_w'), model)) """ Explanation: Evaluation In the end, we check average training error and average testing error. The evaluation can be done by two methods. We can either use the apply() function in the structured output machine or use the <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CSOSVMHelper.html">SOSVMHelper</a>. End of explanation """
statsmodels/statsmodels.github.io
v0.13.0/examples/notebooks/generated/robust_models_0.ipynb
bsd-3-clause
%matplotlib inline import matplotlib.pyplot as plt import numpy as np import statsmodels.api as sm """ Explanation: Robust Linear Models End of explanation """ data = sm.datasets.stackloss.load() data.exog = sm.add_constant(data.exog) """ Explanation: Estimation Load data: End of explanation """ huber_t = sm.RLM(data.endog, data.exog, M=sm.robust.norms.HuberT()) hub_results = huber_t.fit() print(hub_results.params) print(hub_results.bse) print( hub_results.summary( yname="y", xname=["var_%d" % i for i in range(len(hub_results.params))] ) ) """ Explanation: Huber's T norm with the (default) median absolute deviation scaling End of explanation """ hub_results2 = huber_t.fit(cov="H2") print(hub_results2.params) print(hub_results2.bse) """ Explanation: Huber's T norm with 'H2' covariance matrix End of explanation """ andrew_mod = sm.RLM(data.endog, data.exog, M=sm.robust.norms.AndrewWave()) andrew_results = andrew_mod.fit(scale_est=sm.robust.scale.HuberScale(), cov="H3") print("Parameters: ", andrew_results.params) """ Explanation: Andrew's Wave norm with Huber's Proposal 2 scaling and 'H3' covariance matrix End of explanation """ nsample = 50 x1 = np.linspace(0, 20, nsample) X = np.column_stack((x1, (x1 - 5) ** 2)) X = sm.add_constant(X) sig = 0.3 # smaller error variance makes OLS<->RLM contrast bigger beta = [5, 0.5, -0.0] y_true2 = np.dot(X, beta) y2 = y_true2 + sig * 1.0 * np.random.normal(size=nsample) y2[[39, 41, 43, 45, 48]] -= 5 # add some outliers (10% of nsample) """ Explanation: See help(sm.RLM.fit) for more options and module sm.robust.scale for scale options Comparing OLS and RLM Artificial data with outliers: End of explanation """ res = sm.OLS(y2, X).fit() print(res.params) print(res.bse) print(res.predict()) """ Explanation: Example 1: quadratic function with linear truth Note that the quadratic term in OLS regression will capture outlier effects. End of explanation """ resrlm = sm.RLM(y2, X).fit() print(resrlm.params) print(resrlm.bse) """ Explanation: Estimate RLM: End of explanation """ fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(111) ax.plot(x1, y2, "o", label="data") ax.plot(x1, y_true2, "b-", label="True") pred_ols = res.get_prediction() iv_l = pred_ols.summary_frame()["obs_ci_lower"] iv_u = pred_ols.summary_frame()["obs_ci_upper"] ax.plot(x1, res.fittedvalues, "r-", label="OLS") ax.plot(x1, iv_u, "r--") ax.plot(x1, iv_l, "r--") ax.plot(x1, resrlm.fittedvalues, "g.-", label="RLM") ax.legend(loc="best") """ Explanation: Draw a plot to compare OLS estimates to the robust estimates: End of explanation """ X2 = X[:, [0, 1]] res2 = sm.OLS(y2, X2).fit() print(res2.params) print(res2.bse) """ Explanation: Example 2: linear function with linear truth Fit a new OLS model using only the linear term and the constant: End of explanation """ resrlm2 = sm.RLM(y2, X2).fit() print(resrlm2.params) print(resrlm2.bse) """ Explanation: Estimate RLM: End of explanation """ pred_ols = res2.get_prediction() iv_l = pred_ols.summary_frame()["obs_ci_lower"] iv_u = pred_ols.summary_frame()["obs_ci_upper"] fig, ax = plt.subplots(figsize=(8, 6)) ax.plot(x1, y2, "o", label="data") ax.plot(x1, y_true2, "b-", label="True") ax.plot(x1, res2.fittedvalues, "r-", label="OLS") ax.plot(x1, iv_u, "r--") ax.plot(x1, iv_l, "r--") ax.plot(x1, resrlm2.fittedvalues, "g.-", label="RLM") legend = ax.legend(loc="best") """ Explanation: Draw a plot to compare OLS estimates to the robust estimates: End of explanation """
lyndond/Analyzing_Neural_Time_Series
chapter33.ipynb
mit
import numpy as np import matplotlib.pyplot as plt import scipy as sp from scipy.stats import norm from scipy.signal import convolve2d import skimage.measure """ Explanation: Chapter 33. Nonparametric permutation testing End of explanation """ x = np.arange(-5,5, .01) pdf = norm.pdf(x) data = np.random.randn(1000) fig, ax = plt.subplots(1,2, sharex='all') ax[0].plot(x, pdf) ax[0].set(ylabel='PDF', xlabel='Statistical value') ax[1].hist(data, bins=50) ax[1].set(ylabel='counts') fig.tight_layout() """ Explanation: Figure 33.1 End of explanation """ print(f'p_n = {sum(data>2)/1000:.3f}') print(f'p_z = {1-norm.cdf(2):.3f}') """ Explanation: 33.3 Using the same fig/data as 33.1 End of explanation """ np.random.seed(1) # create random smoothed map xi, yi = np.meshgrid(np.arange(-10, 11), np.arange(-10, 11)) zi = xi**2 + yi**2 zi = 1 - (zi/np.max(zi)) map = convolve2d(np.random.randn(100,100), zi,'same') # threshold at arb value mapt = map.copy() mapt[(np.abs(map)<map.flatten().std()*2)] = 0 # turn binary bw_map = mapt!=0 conn_comp = skimage.measure.label(bw_map) fig, ax = plt.subplots(1,2,sharex='all',sharey='all') ax[0].imshow(mapt) ax[1].imshow(conn_comp) print(f'There are {len(np.unique(conn_comp))} unique blobs') """ Explanation: 33.5/6 These are generated in chap 34. 33.8 End of explanation """ def max_blob_size(img): """helper function to compute max blob size""" bw_img = img != 0 blobbed = skimage.measure.label(bw_img) num_blobs = len(np.unique(blobbed)) max_size = max([np.sum(blobbed==i) for i in range(1, num_blobs)]) return max_size n_perms = 1000 max_sizes = [] for _ in range(n_perms): mapt_flat = mapt.flatten() rand_flat = np.random.permutation(mapt_flat) mapt_permuted = rand_flat.reshape(mapt.shape) max_sizes.append(max_blob_size(mapt_permuted)) plt.hist(max_sizes, label='null') plt.vlines(max_blob_size(mapt), 0, 200, label='true', color='red') plt.legend() """ Explanation: 33.9 Rather than do perm testing on the spectrogram I'll just write the code below using the data we generated above. End of explanation """
eds-uga/csci4360-fa17
workshops/w8/CSCI+6360-Data+Science-Workshops.ipynb
mit
# Autoendoer using H2o #CSCI6360 H2O WORKSHOP from IPython.display import Image,display from IPython.core.display import HTML import matplotlib.pyplot as plot from h2o.estimators.deeplearning import H2ODeepLearningEstimator from h2o.grid.grid_search import H2OGridSearch #special thanks to wikipedia for the image #Code available at https://github.com/CodeMaster001/CSCI6360 img = Image(url="images/autoencoder_structure.png") display(img) img_1 = Image(url="images/autoencoder_equation.png") #special thanks to wikipedia for the image display(img_1) img_1 = Image(url="images/autoencoder_network.png") #special thanks to ufld.stanford.edu for the image display(img_1) """ Explanation: First let us discuss what exactly are AutoEncoders ? End of explanation """ import h2o h2o.init() #initialize h2o cluster #Once h2o is initialized it actually automatically sets up the spark cluster if spark is configured as a backend, #applies same for mxnet and tensorflow h2o.init(ip="localhost", port=54323) """ Explanation: This is a workshop on H2O,a library that is used extensively in the production environment, widely used in healthcare and finance Here is its OFFICIAL WEBSITE https://www.h2o.ai First, lets create a seperate environment for h2o in Anaconda and performs a switch to that environment. conda create --name h2o-py python=3.5 h2o h2o-py As,I am currently in Mac , I would like to use a UI as I am a bit confortable with it , reducing complexity is nice !!! What is h2o? A library which is used for building machine learning models at ease on huge dataset.It supports mxnet, tensorflow and caffe. It is not an alternative for any of those, its just exends the backent (h2o.ai!!) Other examples that work like h2o is keras. We are now going to import h2o inside python Adventages of having h2o : 1.Notable adventages variation in Stochastic Gradient descent implementation H2O SGD algorithm is executed in parallel across all cores. The training set is also distributed across all nodes. At the final an average is taken of all the values. For more detials on Page 16. http://docs.h2o.ai/h2o/latest-stable/h2o-docs/booklets/DeepLearningBooklet.pdf Lets start h2o programming End of explanation """ h2o.cluster().show_status() """ Explanation: Once h2o is initialized it actually automatically sets up the spark cluster, if spark is configured as a backend, applies same for mxnet, tensorflow and theano backends. We will discuss shortly how to use spark. Please use Sparkling Water if you want to use H2O wth spark. Now lets see our cluster status info. End of explanation """ h2o.ls() #list files #Now lets import a file to H2o cluster h2o.import_file("LICENSE") h2o.ls() #first let us see if License1 file is actually present in cluster #h2o.remove("LICENSE1.hex") #REMOVE THE LICENSE FILE h2o.remove("LICENSE") h2o.ls() help(h2o.import_file) """ Explanation: Lets us import a file and see if its added to cluster End of explanation """ train = h2o.import_file("data/ecg_discord_train.csv") train.summary() """ Explanation: Lets load the ECG training dataset End of explanation """ test = h2o.import_file("data/ecg_discord_test.csv") model = H2ODeepLearningEstimator(activation="RectifierWithDropout", hidden=[32,32,32], autoencoder=True,input_dropout_ratio=0.2,sparse=True,l1=1e-5,epochs=10) model.train(x=train.names,training_frame=train,validation_frame=test) model.predict(test) model = H2ODeepLearningEstimator(activation="RectifierWithDropout", hidden=[32,32,32], autoencoder=False,input_dropout_ratio=0.2,sparse=True,l1=1e-5,epochs=10) model.train(x=train.names[:-1],y=train.names[-1],training_frame=train,validation_frame=test) print(train.names[-1]) model_path = h2o.save_model(model = model,force = True) print(model_path) saved_model = h2o.load_model(model_path) print(saved_model) hyper_parameters = {'input_dropout_ratio':[0.1,0.2,0.5,0.7]} h2o_gridSearch = H2OGridSearch(H2ODeepLearningEstimator(activation="RectifierWithDropout", hidden=[50,40,30,20,10,5], autoencoder=True,sparse=True,l1=1e-5,epochs=10),hyper_parameters) h2o_gridSearch.train(x=train.names,training_frame=train,validation_frame=test) print(h2o_gridSearch.get_grid(sort_by="mse")) """ Explanation: Lets load the heart disease dataset End of explanation """ hyper_parameters = {'input_dropout_ratio':[0.1,0.2,0.5,0.7],'epochs':[10,20,30,40]} h2o_gridSearch = H2OGridSearch(H2ODeepLearningEstimator(activation="RectifierWithDropout", hidden=[32,32,32], autoencoder=True,sparse=True,l1=1e-5,epochs=10),hyper_parameters) h2o_gridSearch.train(x=train.names,training_frame=train,validation_frame=test) print(h2o_gridSearch.get_grid(sort_by="mse")) """ Explanation: Now tell me who is going to win ? one with greater epochs or with lower dropout or something else ? End of explanation """
masterfish2015/my_project
python/demo1/scipy-advanced-tutorial-master/Part2/Exercise 2.ipynb
mit
%%javascript delete requirejs.s.contexts._.defined.CustomViewModule; define('CustomViewModule', ['jquery', 'widgets/js/widget'], function($, widget) { var CustomView = widget.DOMWidgetView.extend({ }); return {CustomView: CustomView}; }); from IPython.html.widgets import DOMWidget from IPython.display import display from IPython.utils.traitlets import Unicode class CustomWidget(DOMWidget): _view_module = Unicode('CustomViewModule', sync=True) _view_name = Unicode('CustomView', sync=True) display(CustomWidget()) answer('2_1.js') answer('2_1.py') """ Explanation: Using the template below, make a widget view that displays text, possibly 'Hello World'. End of explanation """ from IPython.html.widgets import DOMWidget from IPython.display import display from IPython.utils.traitlets import Unicode class ColorWidget(DOMWidget): _view_module = Unicode('ColorViewModule', sync=True) _view_name = Unicode('ColorView', sync=True) %%javascript delete requirejs.s.contexts._.defined.ColorViewModule; define('ColorViewModule', ['jquery', 'widgets/js/widget'], function($, widget) { var ColorView = widget.DOMWidgetView.extend({ }); return {ColorView: ColorView}; }); answer('2_2.py') answer('2_2_1.js') answer('2_2_2.js') answer('2_2.js') w = ColorWidget() display(w) display(w) w.value = '#00FF00' w.value """ Explanation: Using the template below, make a color picker widget. This can be done in a few steps: 1. Add a synced traitlet to the Python class. 2. Add a render method that inserts a input element, with attribute type='color'. The easiest way to do this is to use jQuery. 3. Add a method that updates the color picker's value to the model's value. Use listenTo listen to changes of the model. 4. Listen to changes of the color picker's value, and update the model accordingly. End of explanation """
daniel-koehn/Theory-of-seismic-waves-II
05_2D_acoustic_FD_modelling/lecture_notebooks/5_fdac2d_heterogeneous.ipynb
gpl-3.0
# Execute this cell to load the notebook's style sheet, then ignore it from IPython.core.display import HTML css_file = '../../style/custom.css' HTML(open(css_file, "r").read()) """ Explanation: Content under Creative Commons Attribution license CC-BY 4.0, code under BSD 3-Clause License © 2018 by D. Koehn, heterogeneous models are from this Jupyter notebook by Heiner Igel (@heinerigel), Florian Wölfl and Lion Krischer (@krischer) which is a supplemenatry material to the book Computational Seismology: A Practical Introduction, notebook style sheet by L.A. Barba, N.C. Clementi End of explanation """ # Import Libraries # ---------------- import numpy as np from numba import jit import matplotlib import matplotlib.pyplot as plt from pylab import rcParams # Ignore Warning Messages # ----------------------- import warnings warnings.filterwarnings("ignore") from mpl_toolkits.axes_grid1 import make_axes_locatable # Definition of initial modelling parameters # ------------------------------------------ xmax = 2000.0 # maximum spatial extension of the 1D model in x-direction (m) zmax = xmax # maximum spatial extension of the 1D model in z-direction (m) dx = 10.0 # grid point distance in x-direction (m) dz = dx # grid point distance in z-direction (m) tmax = 0.75 # maximum recording time of the seismogram (s) dt = 0.0010 # time step vp0 = 3000. # P-wave speed in medium (m/s) # acquisition geometry xsrc = 1000.0 # x-source position (m) zsrc = xsrc # z-source position (m) f0 = 100.0 # dominant frequency of the source (Hz) t0 = 0.1 # source time shift (s) isnap = 2 # snapshot interval (timesteps) @jit(nopython=True) # use JIT for C-performance def update_d2px_d2pz(p, dx, dz, nx, nz, d2px, d2pz): for i in range(1, nx - 1): for j in range(1, nz - 1): d2px[i,j] = (p[i + 1,j] - 2 * p[i,j] + p[i - 1,j]) / dx**2 d2pz[i,j] = (p[i,j + 1] - 2 * p[i,j] + p[i,j - 1]) / dz**2 return d2px, d2pz # Define simple absorbing boundary frame based on wavefield damping # according to Cerjan et al., 1985, Geophysics, 50, 705-708 def absorb(nx,nz): FW = 60 # thickness of absorbing frame (gridpoints) a = 0.0053 coeff = np.zeros(FW) # define coefficients in absorbing frame for i in range(FW): coeff[i] = np.exp(-(a**2 * (FW-i)**2)) # initialize array of absorbing coefficients absorb_coeff = np.ones((nx,nz)) # compute coefficients for left grid boundaries (x-direction) zb=0 for i in range(FW): ze = nz - i - 1 for j in range(zb,ze): absorb_coeff[i,j] = coeff[i] # compute coefficients for right grid boundaries (x-direction) zb=0 for i in range(FW): ii = nx - i - 1 ze = nz - i - 1 for j in range(zb,ze): absorb_coeff[ii,j] = coeff[i] # compute coefficients for bottom grid boundaries (z-direction) xb=0 for j in range(FW): jj = nz - j - 1 xb = j xe = nx - j for i in range(xb,xe): absorb_coeff[i,jj] = coeff[j] return absorb_coeff # FD_2D_acoustic code with JIT optimization # ----------------------------------------- def FD_2D_acoustic_JIT(dt,dx,dz,f0,xsrc,zsrc): # define model discretization # --------------------------- nx = (int)(xmax/dx) # number of grid points in x-direction print('nx = ',nx) nz = (int)(zmax/dz) # number of grid points in x-direction print('nz = ',nz) nt = (int)(tmax/dt) # maximum number of time steps print('nt = ',nt) isrc = (int)(xsrc/dx) # source location in grid in x-direction jsrc = (int)(zsrc/dz) # source location in grid in x-direction # Source time function (Gaussian) # ------------------------------- src = np.zeros(nt + 1) time = np.linspace(0 * dt, nt * dt, nt) # 1st derivative of Gaussian src = -2. * (time - t0) * (f0 ** 2) * (np.exp(- (f0 ** 2) * (time - t0) ** 2)) # define clip value: 0.1 * absolute maximum value of source wavelet clip = 0.1 * max([np.abs(src.min()), np.abs(src.max())]) / (dx*dz) * dt**2 # Define absorbing boundary frame # ------------------------------- absorb_coeff = absorb(nx,nz) # Define model # ------------ vp = np.zeros((nx,nz)) vp = model(nx,nz,vp,dx,dz) vp2 = vp**2 # Initialize empty pressure arrays # -------------------------------- p = np.zeros((nx,nz)) # p at time n (now) pold = np.zeros((nx,nz)) # p at time n-1 (past) pnew = np.zeros((nx,nz)) # p at time n+1 (present) d2px = np.zeros((nx,nz)) # 2nd spatial x-derivative of p d2pz = np.zeros((nx,nz)) # 2nd spatial z-derivative of p # Initalize animation of pressure wavefield # ----------------------------------------- fig = plt.figure(figsize=(7,3.5)) # define figure size plt.tight_layout() extent = [0.0,xmax,zmax,0.0] # define model extension # Plot pressure wavefield movie ax1 = plt.subplot(121) image = plt.imshow(p.T, animated=True, cmap="RdBu", extent=extent, interpolation='nearest', vmin=-clip, vmax=clip) plt.title('Pressure wavefield') plt.xlabel('x [m]') plt.ylabel('z [m]') # Plot Vp-model ax2 = plt.subplot(122) image1 = plt.imshow((vp.T)/1000, cmap=plt.cm.viridis, interpolation='nearest', extent=extent) plt.title('Vp-model') plt.xlabel('x [m]') plt.setp(ax2.get_yticklabels(), visible=False) divider = make_axes_locatable(ax2) cax2 = divider.append_axes("right", size="2%", pad=0.1) fig.colorbar(image1, cax=cax2) plt.ion() plt.show(block=False) # Calculate Partial Derivatives # ----------------------------- for it in range(nt): # FD approximation of spatial derivative by 3 point operator d2px, d2pz = update_d2px_d2pz(p, dx, dz, nx, nz, d2px, d2pz) # Time Extrapolation # ------------------ pnew = 2 * p - pold + vp2 * dt**2 * (d2px + d2pz) # Add Source Term at isrc # ----------------------- # Absolute pressure w.r.t analytical solution pnew[isrc,jsrc] = pnew[isrc,jsrc] + src[it] / (dx * dz) * dt ** 2 # Apply absorbing boundary frame # ------------------------------ p *= absorb_coeff pnew *= absorb_coeff # Remap Time Levels # ----------------- pold, p = p, pnew # display pressure snapshots if (it % isnap) == 0: image.set_data(p.T) fig.canvas.draw() return vp """ Explanation: 2D acoustic FD modelling for heterogeneous media So far, we only compared 2D acoustic FD modelling results for homogeneous acoustic media with analytical solutions. Next, we want to model some more interesting, heterogeneous problems. End of explanation """ # Homogeneous model def model(nx,nz,vp,dx,dz): return vp """ Explanation: Problem 1: Homogeneous Model As a reference, we start with a problem, which should be quite familiar to you - the homogeneous model. I modified the FD code to define models in a separate Python function: End of explanation """ %matplotlib notebook xsrc = 1000.0 # x-source position (m) zsrc = xsrc # z-source position (m) dx = 5.0 # grid point distance in x-direction (m) dz = dx # grid point distance in z-direction (m) f0 = 100.0 # centre frequency of the source wavelet (Hz) # calculate dt according to the CFL-criterion dt = dx / (np.sqrt(2.0) * vp0) vp_hom = FD_2D_acoustic_JIT(dt,dx,dz,f0,xsrc,zsrc) """ Explanation: Time to define the modelling parameters and run the new FD code ... End of explanation """ # Random medium model def model(nx,nz,vp,dx,dz): return vp xsrc = 1000.0 # x-source position (m) zsrc = xsrc # z-source position (m) dx = 5.0 # grid point distance in x-direction (m) dz = dx # grid point distance in z-direction (m) f0 = 100.0 # centre frequency of the source wavelet (Hz) vpmax = vp0 # calculate dt according to the CFL-criterion dt = dx / (np.sqrt(2.0) * vpmax) vp_rand = FD_2D_acoustic_JIT(dt,dx,dz,f0,xsrc,zsrc) """ Explanation: Problem 2: Random Medium Next, we add some random perturbations to the homogeneous Vp-model: End of explanation """ # Vertical fault zone model def model(nx,nz,vp,dx,dz): return vp xsrc = 1000.0 # x-source position (m) zsrc = 1000.0 # z-source position (m) dx = 5.0 # grid point distance in x-direction (m) dz = dx # grid point distance in z-direction (m) f0 = 100.0 # centre frequency of the source wavelet (Hz) vpmax = 4200.0 # vpmax = np.max(vp0) # calculate dt according to the CFL-criterion dt = dx / (np.sqrt(2.0) * vpmax) vp_fault = FD_2D_acoustic_JIT(dt,dx,dz,f0,xsrc,zsrc) """ Explanation: Problem 3: Fault Zone In this problem, we model acoustic wave propagation in a vertical fault zone End of explanation """ # Simplified vulcano (Gaussian hill) def model(nx,nz,vp,dx,dz): return vp xsrc = 1000.0 # x-source position (m) zsrc = 200.0 # z-source position (m) dx = 5.0 # grid point distance in x-direction (m) dz = dx # grid point distance in z-direction (m) f0 = 100.0 # centre frequency of the source wavelet (Hz) vpmax = np.max(vp0) # calculate dt according to the CFL-criterion dt = dx / (np.sqrt(2.0) * vpmax) vp_topo = FD_2D_acoustic_JIT(dt,dx,dz,f0,xsrc,zsrc) """ Explanation: Problem 4: Simplified Vulcano How does the surface topography of a vulcano scatter the acoustic wavefield? End of explanation """
BrainIntensive/OnlineBrainIntensive
resources/matplotlib/Examples/lineplots.ipynb
mit
%load_ext watermark %watermark -u -v -d -p matplotlib,numpy """ Explanation: Sebastian Raschka back to the matplotlib-gallery at https://github.com/rasbt/matplotlib-gallery Link the matplotlib gallery at https://github.com/rasbt/matplotlib-gallery End of explanation """ %matplotlib inline """ Explanation: <font size="1.5em">More info about the %watermark extension</font> End of explanation """ import matplotlib.pyplot as plt x = [1, 2, 3] y_1 = [50, 60, 70] y_2 = [20, 30, 40] plt.plot(x, y_1, marker='x') plt.plot(x, y_2, marker='^') plt.xlim([0, len(x)+1]) plt.ylim([0, max(y_1+y_2) + 10]) plt.xlabel('x-axis label') plt.ylabel('y-axis label') plt.title('Simple line plot') plt.legend(['sample 1', 'sample2'], loc='upper left') plt.show() """ Explanation: <br> <br> Lineplots in matplotlib Sections Simple line plot Line plot with error bars Line plot with x-axis labels and log-scale Gaussian probability density functions Cumulative Plots Cumulative Sum Absolute Count Colormaps Marker styles Line styles <br> <br> Simple line plot [back to top] End of explanation """ import matplotlib.pyplot as plt x = [1, 2, 3] y_1 = [50, 60, 70] y_2 = [20, 30, 40] y_1_err = [4.3, 4.5, 2.0] y_2_err = [2.3, 6.9, 2.1] x_labels = ["x1", "x2", "x3"] plt.errorbar(x, y_1, yerr=y_1_err, fmt='-x') plt.errorbar(x, y_2, yerr=y_2_err, fmt='-^') plt.xticks(x, x_labels) plt.xlim([0, len(x)+1]) plt.ylim([0, max(y_1+y_2) + 10]) plt.xlabel('x-axis label') plt.ylabel('y-axis label') plt.title('Line plot with error bars') plt.legend(['sample 1', 'sample2'], loc='upper left') plt.show() """ Explanation: <br> <br> Line plot with error bars [back to top] End of explanation """ import matplotlib.pyplot as plt x = [1, 2, 3] y_1 = [0.5,7.0,60.0] y_2 = [0.3,6.0,30.0] x_labels = ["x1", "x2", "x3"] plt.plot(x, y_1, marker='x') plt.plot(x, y_2, marker='^') plt.xticks(x, x_labels) plt.xlim([0,4]) plt.xlabel('x-axis label') plt.ylabel('y-axis label') plt.yscale('log') plt.title('Line plot with x-axis labels and log-scale') plt.legend(['sample 1', 'sample2'], loc='upper left') plt.show() """ Explanation: <br> <br> Line plot with x-axis labels and log-scale [back to top] End of explanation """ import numpy as np from matplotlib import pyplot as plt import math def pdf(x, mu=0, sigma=1): """ Calculates the normal distribution's probability density function (PDF). """ term1 = 1.0 / ( math.sqrt(2*np.pi) * sigma ) term2 = np.exp( -0.5 * ( (x-mu)/sigma )**2 ) return term1 * term2 x = np.arange(0, 100, 0.05) pdf1 = pdf(x, mu=5, sigma=2.5**0.5) pdf2 = pdf(x, mu=10, sigma=6**0.5) plt.plot(x, pdf1) plt.plot(x, pdf2) plt.title('Probability Density Functions') plt.ylabel('p(x)') plt.xlabel('random variable x') plt.legend(['pdf1 ~ N(5,2.5)', 'pdf2 ~ N(10,6)'], loc='upper right') plt.ylim([0,0.5]) plt.xlim([0,20]) plt.show() """ Explanation: <br> <br> Gaussian probability density functions [back to top] End of explanation """ import numpy as np import matplotlib.pyplot as plt A = np.arange(1, 11) B = np.random.randn(10) # 10 rand. values from a std. norm. distr. C = B.cumsum() fig, (ax0, ax1) = plt.subplots(ncols=2, sharex=True, sharey=True, figsize=(10,5)) ## A) via plt.step() ax0.step(A, C, label='cumulative sum') # cumulative sum via numpy.cumsum() ax0.scatter(A, B, label='actual values') ax0.set_ylabel('Y value') ax0.legend(loc='upper right') ## B) via plt.plot() ax1.plot(A, C, label='cumulative sum') # cumulative sum via numpy.cumsum() ax1.scatter(A, B, label='actual values') ax1.legend(loc='upper right') fig.text(0.5, 0.04, 'sample number', ha='center', va='center') fig.text(0.5, 0.95, 'Cumulative sum of 10 samples from a random normal distribution', ha='center', va='center') plt.show() """ Explanation: <br> <br> Cumulative Plots [back to top] <br> <br> Cumulative Sum [back to top] End of explanation """ import numpy as np import matplotlib.pyplot as plt A = np.arange(1, 11) B = np.random.randn(10) # 10 rand. values from a std. norm. distr. plt.figure(figsize=(10,5)) plt.step(np.sort(B), A) plt.ylabel('sample count') plt.xlabel('x value') plt.title('Number of samples at a certain threshold') plt.show() """ Explanation: <br> <br> Absolute Count [back to top] End of explanation """ import numpy as np import matplotlib.pyplot as plt fig, (ax0, ax1) = plt.subplots(1,2, figsize=(14, 7)) samples = range(1,16) # Default Color Cycle for i in samples: ax0.plot([0, 10], [0, i], label=i, lw=3) # Colormap colormap = plt.cm.Paired plt.gca().set_color_cycle([colormap(i) for i in np.linspace(0, 0.9, len(samples))]) for i in samples: ax1.plot([0, 10], [0, i], label=i, lw=3) # Annotation ax0.set_title('Default color cycle') ax1.set_title('plt.cm.Paired colormap') ax0.legend(loc='upper left') ax1.legend(loc='upper left') plt.show() """ Explanation: <br> <br> Colormaps [back to top] More color maps are available at http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps End of explanation """ import numpy as np import matplotlib.pyplot as plt markers = [ '.', # point ',', # pixel 'o', # circle 'v', # triangle down '^', # triangle up '<', # triangle_left '>', # triangle_right '1', # tri_down '2', # tri_up '3', # tri_left '4', # tri_right '8', # octagon 's', # square 'p', # pentagon '*', # star 'h', # hexagon1 'H', # hexagon2 '+', # plus 'x', # x 'D', # diamond 'd', # thin_diamond '|', # vline ] plt.figure(figsize=(13, 10)) samples = range(len(markers)) for i in samples: plt.plot([i-1, i, i+1], [i, i, i], label=markers[i], marker=markers[i], markersize=10) # Annotation plt.title('Matplotlib Marker styles', fontsize=20) plt.ylim([-1, len(markers)+1]) plt.legend(loc='lower right') plt.show() """ Explanation: <br> <br> Marker styles [back to top] End of explanation """ import numpy as np import matplotlib.pyplot as plt linestyles = ['-.', '--', 'None', '-', ':'] plt.figure(figsize=(8, 5)) samples = range(len(linestyles)) for i in samples: plt.plot([i-1, i, i+1], [i, i, i], label='"%s"' %linestyles[i], linestyle=linestyles[i], lw=4 ) # Annotation plt.title('Matplotlib line styles', fontsize=20) plt.ylim([-1, len(linestyles)+1]) plt.legend(loc='lower right') plt.show() """ Explanation: <br> <br> Line styles [back to top] End of explanation """
M-R-Houghton/euroscipy_2015
scikit_image/lectures/adv3_panorama-stitching.ipynb
mit
import numpy as np import matplotlib.pyplot as plt def compare(*images, **kwargs): """ Utility function to display images side by side. Parameters ---------- image0, image1, image2, ... : ndarrray Images to display. labels : list Labels for the different images. """ f, axes = plt.subplots(1, len(images), **kwargs) axes = np.array(axes, ndmin=1) labels = kwargs.pop('labels', None) if labels is None: labels = [''] * len(images) for n, (image, label) in enumerate(zip(images, labels)): axes[n].imshow(image, interpolation='nearest', cmap='gray') axes[n].set_title(label) axes[n].axis('off') f.tight_layout() """ Explanation: scikit-image advanced panorama tutorial Enhanced from the original demo as featured in the scikit-image paper. Multiple overlapping images of the same scene, combined into a single image, can yield amazing results. This tutorial will illustrate how to accomplish panorama stitching using scikit-image, from loading the images to cleverly stitching them together. First things first Import NumPy and matplotlib, then define a utility function to compare multiple images End of explanation """ import skimage.io as io pano_imgs = io.ImageCollection('../images/pano/JDW_03*') """ Explanation: Load data The ImageCollection class provides an easy and efficient way to load and represent multiple images. Images in the ImageCollection are not only read from disk when accessed. Load a series of images into an ImageCollection with a wildcard, as they share similar names. End of explanation """ # compare(...) """ Explanation: Inspect these images using the convenience function compare() defined earlier End of explanation """ from skimage.color import rgb2gray # Make grayscale versions of the three color images in pano_imgs # named pano0, pano1, and pano2 # View the results using compare() """ Explanation: Credit: Images of Private Arch and the trail to Delicate Arch in Arches National Park, USA, taken by Joshua D. Warner.<br> License: CC-BY 4.0 0. Pre-processing This stage usually involves one or more of the following: * Resizing, often downscaling with fixed aspect ratio * Conversion to grayscale, as some feature descriptors are not defined for color images * Cropping to region(s) of interest For convenience our example data is already resized smaller, and we won't bother cropping. However, they are presently in color so coversion to grayscale with skimage.color.rgb2gray is appropriate. End of explanation """ from skimage.feature import ORB # Initialize ORB # This number of keypoints is large enough for robust results, # but low enough to run within a few seconds. orb = ORB(n_keypoints=800, fast_threshold=0.05) # Detect keypoints in pano0 orb.detect_and_extract(pano0) keypoints0 = orb.keypoints descriptors0 = orb.descriptors # Detect keypoints in pano1 and pano2 """ Explanation: 1. Feature detection and matching We need to estimate a projective transformation that relates these images together. The steps will be Define one image as a target or destination image, which will remain anchored while the others are warped Detect features in all three images Match features from left and right images against the features in the center, anchored image. In this three-shot series, the middle image pano1 is the logical anchor point. We detect "Oriented FAST and rotated BRIEF" (ORB) features in both images. Note: For efficiency, in this tutorial we're finding 800 keypoints. The results are good but small variations are expected. If you need a more robust estimate in practice, run multiple times and pick the best result or generate additional keypoints. End of explanation """ from skimage.feature import match_descriptors # Match descriptors between left/right images and the center matches01 = match_descriptors(descriptors0, descriptors1, cross_check=True) matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True) """ Explanation: Match features from images 0 <-> 1 and 1 <-> 2. End of explanation """ from skimage.feature import plot_matches fig, ax = plt.subplots(1, 1, figsize=(12, 12)) # Best match subset for pano0 -> pano1 plot_matches(ax, pano0, pano1, keypoints0, keypoints1, matches01) ax.axis('off'); """ Explanation: Inspect these matched features side-by-side using the convenience function skimage.feature.plot_matches. End of explanation """ fig, ax = plt.subplots(1, 1, figsize=(12, 12)) # Best match subset for pano2 -> pano1 plot_matches(ax, pano1, pano2, keypoints1, keypoints2, matches12) ax.axis('off'); """ Explanation: Most of these line up similarly, but it isn't perfect. There are a number of obvious outliers or false matches. End of explanation """ from skimage.transform import ProjectiveTransform from skimage.measure import ransac # Select keypoints from # * source (image to be registered): pano0 # * target (reference image): pano1, our middle frame registration target src = keypoints0[matches01[:, 0]][:, ::-1] dst = keypoints1[matches01[:, 1]][:, ::-1] model_robust01, inliers01 = ransac((src, dst), ProjectiveTransform, min_samples=4, residual_threshold=1, max_trials=300) # Select keypoints from # * source (image to be registered): pano2 # * target (reference image): pano1, our middle frame registration target src = keypoints2[matches12[:, 1]][:, ::-1] dst = keypoints1[matches12[:, 0]][:, ::-1] model_robust12, inliers12 = ransac((src, dst), ProjectiveTransform, min_samples=4, residual_threshold=1, max_trials=300) """ Explanation: Similar to above, decent signal but numerous false matches. 2. Transform estimation To filter out the false matches, we apply RANdom SAmple Consensus (RANSAC), a powerful method of rejecting outliers available in skimage.transform.ransac. The transformation is estimated using an iterative process based on randomly chosen subsets, finally selecting the model which corresponds best with the majority of matches. We need to do this twice, once each for the transforms left -> center and right -> center. End of explanation """ # Use plot_matches as before, but select only good matches with fancy indexing # e.g., matches01[inliers01] # Use plot_matches as before, but select only good matches with fancy indexing # e.g., matches12[inliers12] """ Explanation: The inliers returned from RANSAC select the best subset of matches. How do they look? End of explanation """ from skimage.transform import SimilarityTransform # Shape of middle image, our registration target r, c = pano1.shape[:2] # Note that transformations take coordinates in (x, y) format, # not (row, column), in order to be consistent with most literature corners = np.array([[0, 0], [0, r], [c, 0], [c, r]]) # Warp the image corners to their new positions warped_corners01 = model_robust01(corners) warped_corners12 = model_robust12(corners) # Find the extents of both the reference image and the warped # target image all_corners = np.vstack((warped_corners01, warped_corners12, corners)) # The overall output shape will be max - min corner_min = np.min(all_corners, axis=0) corner_max = np.max(all_corners, axis=0) output_shape = (corner_max - corner_min) # Ensure integer shape with np.ceil and dtype conversion output_shape = np.ceil(output_shape[::-1]).astype(int) """ Explanation: Most of the false matches are rejected! 3. Warping Next, we produce the panorama itself. We must warp, or transform, two of the three images so they will properly align with the stationary image. Extent of output image The first step is to find the shape of the output image to contain all three transformed images. To do this we consider the extents of all warped images. End of explanation """ from skimage.transform import warp # This in-plane offset is the only necessary transformation for the middle image offset1 = SimilarityTransform(translation= -corner_min) # Translate pano1 into place pano1_warped = warp(pano1, offset1.inverse, order=3, output_shape=output_shape, cval=-1) # Acquire the image mask for later use pano1_mask = (pano1_warped != -1) # Mask == 1 inside image pano1_warped[~pano1_mask] = 0 # Return background values to 0 """ Explanation: Apply estimated transforms Warp the images with skimage.transform.warp according to the estimated models. A shift, or translation is needed to place as our middle image in the middle - it isn't truly stationary. Values outside the input images are initially set to -1 to distinguish the "background", which is identified for later use. Note: warp takes the inverse mapping as an input. End of explanation """ # Warp pano0 to pano1 transform01 = (model_robust01 + offset1).inverse pano0_warped = warp(pano0, transform01, order=3, output_shape=output_shape, cval=-1) pano0_mask = (pano0_warped != -1) # Mask == 1 inside image pano0_warped[~pano0_mask] = 0 # Return background values to 0 """ Explanation: Warp left panel into place End of explanation """ # Warp pano2 to pano1 transform12 = (model_robust12 + offset1).inverse pano2_warped = warp(pano2, transform12, order=3, output_shape=output_shape, cval=-1) pano2_mask = (pano2_warped != -1) # Mask == 1 inside image pano2_warped[~pano2_mask] = 0 # Return background values to 0 """ Explanation: Warp right panel into place End of explanation """ compare(pano0_warped, pano1_warped, pano2_warped, figsize=(12, 10)); """ Explanation: Inspect the warped images: End of explanation """ # Add the three warped images together. This could create dtype overflows! # We know they are are floating point images after warping, so it's OK. merged = ## Sum warped images # Track the overlap by adding the masks together overlap = ## Sum masks # Normalize through division by `overlap` - but ensure the minimum is 1 normalized = merged / ## Divisor here """ Explanation: 4. Combining images the easy (and bad) way This method simply sums the warped images tracks how many images overlapped to create each point normalizes the result. End of explanation """ fig, ax = plt.subplots(figsize=(12, 12)) ax.imshow(normalized, cmap='gray') fig.tight_layout() ax.axis('off'); """ Explanation: Finally, view the results! End of explanation """ fig, ax = plt.subplots(figsize=(12, 12)) # Generate difference image and inspect it difference_image = pano0_warped - pano1_warped ax.imshow(difference_image, cmap='gray') ax.axis('off'); """ Explanation: <div style="height: 400px;"></div> What happened?! Why are there nasty dark lines at boundaries, and why does the middle look so blurry? The lines are artifacts (boundary effect) from the warping method. When the image is warped with interpolation, edge pixels containing part image and part background combine these values. We would have bright lines if we'd chosen cval=2 in the warp calls (try it!), but regardless of choice there will always be discontinuities. ...Unless you use order=0 in warp, which is nearest neighbor. Then edges are perfect (try it!). But who wants to be limited to an inferior interpolation method? Even then, it's blurry! Is there a better way? 5. Stitching images along a minimum-cost path Let's step back a moment and consider: Is it even reasonable to blend pixels? Take a look at a difference image, which is just one image subtracted from the other. End of explanation """ ymax = output_shape[1] - 1 xmax = output_shape[0] - 1 # Start anywhere along the top and bottom, left of center. mask_pts01 = [[0, ymax // 3], [xmax, ymax // 3]] # Start anywhere along the top and bottom, right of center. mask_pts12 = [[0, 2*ymax // 3], [xmax, 2*ymax // 3]] """ Explanation: The surrounding flat gray is zero. A perfect overlap would show no structure! Instead, the overlap region matches fairly well in the middle... but off to the sides where things start to look a little embossed, a simple average blurs the result. This caused the blurring in the previous, method (look again). Unfortunately, this is almost always the case for panoramas! How can we fix this? Let's attempt to find a vertical path through this difference image which stays as close to zero as possible. If we use that to build a mask, defining a transition between images, the result should appear seamless. Seamless image stitching with Minimum-Cost Paths and skimage.graph Among other things, skimage.graph allows you to * start at any point on an array * find the path to any other point in the array * the path found minimizes the sum of values on the path. The array is called a cost array, while the path found is a minimum-cost path or MCP. To accomplish this we need Starting and ending points for the path A cost array (a modified difference image) This method is so powerful that, with a carefully constructed cost array, the seed points are essentially irrelevant. It just works! Define seed points End of explanation """ from skimage.measure import label def generate_costs(diff_image, mask, vertical=True, gradient_cutoff=2.): """ Ensures equal-cost paths from edges to region of interest. Parameters ---------- diff_image : (M, N) ndarray of floats Difference of two overlapping images. mask : (M, N) ndarray of bools Mask representing the region of interest in ``diff_image``. vertical : bool Control operation orientation. gradient_cutoff : float Controls how far out of parallel lines can be to edges before correction is terminated. The default (2.) is good for most cases. Returns ------- costs_arr : (M, N) ndarray of floats Adjusted costs array, ready for use. """ if vertical is not True: return tweak_costs(diff_image.T, mask.T, vertical=vertical, gradient_cutoff=gradient_cutoff).T # Start with a high-cost array of 1's costs_arr = np.ones_like(diff_image) # Obtain extent of overlap row, col = mask.nonzero() cmin = col.min() cmax = col.max() # Label discrete regions cslice = slice(cmin, cmax + 1) labels = label(mask[:, cslice]) # Find distance from edge to region upper = (labels == 0).sum(axis=0) lower = (labels == 2).sum(axis=0) # Reject areas of high change ugood = np.abs(np.gradient(upper)) < gradient_cutoff lgood = np.abs(np.gradient(lower)) < gradient_cutoff # Give areas slightly farther from edge a cost break costs_upper = np.ones_like(upper, dtype=np.float64) costs_lower = np.ones_like(lower, dtype=np.float64) costs_upper[ugood] = upper.min() / np.maximum(upper[ugood], 1) costs_lower[lgood] = lower.min() / np.maximum(lower[lgood], 1) # Expand from 1d back to 2d vdist = mask.shape[0] costs_upper = costs_upper[np.newaxis, :].repeat(vdist, axis=0) costs_lower = costs_lower[np.newaxis, :].repeat(vdist, axis=0) # Place these in output array costs_arr[:, cslice] = costs_upper * (labels == 0) costs_arr[:, cslice] += costs_lower * (labels == 2) # Finally, place the difference image costs_arr[mask] = diff_image[mask] return costs_arr """ Explanation: Construct cost array This utility function exists to give a "cost break" for paths from the edge to the overlap region. We will visually explore the results shortly. Examine the code later - for now, just use it. End of explanation """ # Start with the absolute value of the difference image. # np.abs necessary because we don't want negative costs! costs01 = generate_costs(np.abs(pano0_warped - pano1_warped), pano0_mask & pano1_mask) """ Explanation: Use this function to generate the cost array. End of explanation """ # Set top and bottom edges to zero in `costs01` # Remember (row, col) indexing! costs01[0, :] = 0 costs01[-1, :] = 0 """ Explanation: Allow the path to "slide" along top and bottom edges to the optimal horizontal position by setting top and bottom edges to zero cost. End of explanation """ fig, ax = plt.subplots(figsize=(15, 12)) ax.imshow(costs01, cmap='gray', interpolation='none') ax.axis('off'); """ Explanation: Our cost array now looks like this End of explanation """ from skimage.graph import route_through_array # Arguments are: # cost array # start pt # end pt # can it traverse diagonally pts, _ = route_through_array(costs01, mask_pts01[0], mask_pts01[1], fully_connected=True) # Convert list of lists to 2d coordinate array for easier indexing pts = np.array(pts) """ Explanation: The tweak we made with generate_costs is subtle but important. Can you see it? Find the minimum-cost path (MCP) Use skimage.graph.route_through_array to find an optimal path through the cost array End of explanation """ fig, ax = plt.subplots(figsize=(12, 12)) # Plot the difference image ax.imshow(pano0_warped - pano1_warped, cmap='gray') # Overlay the minimum-cost path ax.plot(pts[:, 1], pts[:, 0]) plt.tight_layout() ax.axis('off'); """ Explanation: Did it work? End of explanation """ # Start with an array of zeros and place the path mask0 = np.zeros_like(pano0_warped, dtype=np.uint8) mask0[pts[:, 0], pts[:, 1]] = 1 """ Explanation: That looks like a great seam to stitch these images together - the path looks very close to zero. Irregularities Due to the random element in the RANSAC transform estimation, everyone will have a slightly different blue path. Your path will look different from mine, and different from your neighbor's. That's expected! The awesome thing about MCP is that everyone just calculated the best possible path to stitch together their unique transforms! Filling the mask Turn that path into a mask, which will be 1 where we want the left image to show through and zero elsewhere. We need to fill the left side of the mask with ones over to our path. Note: This is the inverse of NumPy masked array conventions (numpy.ma), which specify a negative mask (mask == bad/missing) rather than a positive mask as used here (mask == good/selected). Place the path into a new, empty array. End of explanation """ fig, ax = plt.subplots(figsize=(12, 12)) # View the path in black and white ax.imshow(mask0, cmap='gray') ax.axis('off'); """ Explanation: Ensure the path appears as expected End of explanation """ from skimage.measure import label # Labeling starts with zero at point (0, 0) mask0[label(mask0, connectivity=1) == 0] = 1 # The result plt.imshow(mask0, cmap='gray'); """ Explanation: Label the various contiguous regions in the image using skimage.measure.label End of explanation """ # Start with the absolute value of the difference image. # np.abs is necessary because we don't want negative costs! costs12 = generate_costs(np.abs(pano1_warped - pano2_warped), pano1_mask & pano2_mask) # Allow the path to "slide" along top and bottom edges to the optimal # horizontal position by setting top and bottom edges to zero cost costs12[0, :] = 0 costs12[-1, :] = 0 """ Explanation: Looks great! Apply the same principles to images 1 and 2: first, build the cost array End of explanation """ costs12[mask0 > 0] = 1 """ Explanation: Add an additional constraint this time, to prevent this path crossing the prior one! End of explanation """ fig, ax = plt.subplots(figsize=(8, 8)) ax.imshow(costs12, cmap='gray'); """ Explanation: Check the result End of explanation """ # Arguments are: # cost array # start pt # end pt # can it traverse diagonally pts, _ = route_through_array(costs12, mask_pts12[0], mask_pts12[1], fully_connected=True) # Convert list of lists to 2d coordinate array for easier indexing pts = np.array(pts) """ Explanation: Your results may look slightly different. Compute the minimal cost path End of explanation """ fig, ax = plt.subplots(figsize=(12, 12)) # Plot the difference image ax.imshow(pano1_warped - pano2_warped, cmap='gray') # Overlay the minimum-cost path ax.plot(pts[:, 1], pts[:, 0]); ax.axis('off'); """ Explanation: Verify a reasonable result End of explanation """ mask2 = np.zeros_like(pano0_warped, dtype=np.uint8) mask2[pts[:, 0], pts[:, 1]] = 1 """ Explanation: Initialize the mask by placing the path in a new array End of explanation """ mask2[label(mask2, connectivity=1) == 2] = 1 # The result plt.imshow(mask2, cmap='gray'); """ Explanation: Fill the right side this time, again using skimage.measure.label - the label of interest is 2 End of explanation """ mask1 = ~(mask0 | mask2).astype(bool) """ Explanation: Final mask The last mask for the middle image is one of exclusion - it will be displayed everywhere mask0 and mask2 are not. End of explanation """ def add_alpha(img, mask=None): """ Adds a masked alpha channel to an image. Parameters ---------- img : (M, N[, 3]) ndarray Image data, should be rank-2 or rank-3 with RGB channels mask : (M, N[, 3]) ndarray, optional Mask to be applied. If None, the alpha channel is added with full opacity assumed (1) at all locations. """ from skimage.color import gray2rgb if mask is None: mask = np.ones_like(img) if img.ndim == 2: img = gray2rgb(img) return np.dstack((img, mask)) """ Explanation: Define a convenience function to place masks in alpha channels End of explanation """ pano0_final = add_alpha(pano0_warped, mask0) pano1_final = add_alpha(pano1_warped, mask1) pano2_final = add_alpha(pano2_warped, mask2) compare(pano0_final, pano1_final, pano2_final, figsize=(15, 15)) """ Explanation: Obtain final, alpha blended individual images and inspect them End of explanation """ fig, ax = plt.subplots(figsize=(12, 12)) # This is a perfect combination, but matplotlib's interpolation # makes it appear to have gaps. So we turn it off. ax.imshow(pano0_final, interpolation='none') ax.imshow(pano1_final, interpolation='none') ax.imshow(pano2_final, interpolation='none') fig.tight_layout() ax.axis('off'); """ Explanation: What we have here is the world's most complicated and precisely-fitting jigsaw puzzle... Plot all three together and view the results! End of explanation """ # Identical transforms as before, except # * Operating on original color images # * filling with cval=0 as we know the masks pano0_color = warp(pano_imgs[0], (model_robust01 + offset1).inverse, order=3, output_shape=output_shape, cval=0) pano1_color = warp(pano_imgs[1], offset1.inverse, order=3, output_shape=output_shape, cval=0) pano2_color = warp(pano_imgs[2], (model_robust12 + offset1).inverse, order=3, output_shape=output_shape, cval=0) """ Explanation: Fantastic! Without the black borders, you'd never know this was composed of separate images! Bonus round: now, in color! We converted to grayscale for ORB feature detection, back in the initial preprocessing steps. Since we stored our transforms and masks, adding color is straightforward! Transform the colored images End of explanation """ pano0_final = add_alpha(pano0_color, mask0) pano1_final = add_alpha(pano1_color, mask1) pano2_final = add_alpha(pano2_color, mask2) """ Explanation: Apply the custom alpha channel masks End of explanation """ fig, ax = plt.subplots(figsize=(12, 12)) # Turn off matplotlib's interpolation ax.imshow(pano0_final, interpolation='none') ax.imshow(pano1_final, interpolation='none') ax.imshow(pano2_final, interpolation='none') fig.tight_layout() ax.axis('off'); """ Explanation: View the result! End of explanation """ from skimage.color import gray2rgb # Start with empty image pano_combined = np.zeros_like(pano0_color) # Place the masked portion of each image into the array # masks are 2d, they need to be (M, N, 3) to match the color images pano_combined += pano0_color * gray2rgb(mask0) pano_combined += pano1_color * gray2rgb(mask1) pano_combined += pano2_color * gray2rgb(mask2) # Save the output - precision loss warning is expected # moving from floating point -> uint8 io.imsave('./pano-advanced-output.png', pano_combined) """ Explanation: Save the combined, color panorama locally as './pano-advanced-output.png' End of explanation """ %reload_ext load_style %load_style ../themes/tutorial.css """ Explanation: <div style="height: 400px;"></div> <div style="height: 400px;"></div> Once more, from the top I hear what you're saying. "But Josh, those were too easy! The panoramas had too much overlap! Does this still work in the real world?" Go back to the top. Under "Load Data" replace the string 'data/JDW_03*' with 'data/JDW_9*', and re-run all of the cells in order. <div style="height: 400px;"></div> End of explanation """
jamesjia94/BIDMach
tutorials/BIDMat_intro.ipynb
bsd-3-clause
import BIDMat.{CMat,CSMat,DMat,Dict,IDict,FMat,GMat,GIMat,GSMat,GSDMat,HMat,IMat,Image,LMat,Mat,SMat,SBMat,SDMat} import BIDMat.MatFunctions._ import BIDMat.SciFunctions._ import BIDMat.Solvers._ import BIDMat.JPlotting._ Mat.checkMKL Mat.checkCUDA Mat.setInline if (Mat.hasCUDA > 0) GPUmem """ Explanation: Introduction to BIDMat BIDMat is a multi-platform matrix library similar to Matlab, Julia or Numpy/Scipy. Its intended primarily for machine learning, but is has a broad set of operations and datatypes and should be suitable for many other applications. BIDMat is probably unique in its integration of CPU and GPU data types. Other features include: * Interactivity. Thanks to the Scala language, BIDMat is interactive and scriptable. * Performance, thanks to CPU and GPU native code, and to Scala's speed on memory-bound operations. * Parallelism, thanks to Scala's actor framework and parallel collection classes. * Rich, open syntax of math operators, +,-,*,/,⊗,∙,∘ * Runs on JVM, extremely portable, leverages Hadoop, Yarn, Spark etc. BIDMat is a library that is loaded by a startup script, and a set of imports that include the default classes and functions. We include them explicitly in this notebook. End of explanation """ val a = ones(4,4) // ones creates a 4x4 Float matrix (FMat). FMat is the default. // you use prefixes to get other types. e.g. iones gives an integer matrix. %type a """ Explanation: These calls check that CPU and GPU native libs loaded correctly, and what GPUs are accessible. If you have a GPU and CUDA installed, GPUmem will printout the fraction of free memory, the absolute free memory and the total memory for the default GPU. Basic Matrix Algebra From this cell onward, we are in the BIDMat environment. Let define some matrices and basic algebra on them. BIDMat has Float, Double, Int and Complex matrix types. We'll start with integer matrices. To create an array of ones, do End of explanation """ row(2,2,4,4,5,5) col(5,4,3,2) """ Explanation: You can create row and column matrices (FMat) by listing their elements: End of explanation """ val r = irow(0 until 10) // until gives a range excluding the last element %type r icol(0 to 2) // to gives a range including the last element """ Explanation: You can also create integer row or column matrices (IMat) with irow and icol. These functions accept lists of values, or ranges: End of explanation """ val b = izeros(4,4) // An integer matrix this time, filled with zeros b(?) = icol(0 until 16) // Now treat as a 1d array, fill with a range b """ Explanation: You can create a matrix of sequential integers like this End of explanation """ val bt = b.t """ Explanation: The questionmark ? is BIDMat's wildcard character. Even though b is two-dimensional, b(?) linearizes its contents into a 16-element column and puts the RHS into it. The RHS should be another 16x1 integer matrix (IMat), but when supplied with a range (0 until 16), BIDMat automatically casts the range to an IMat. This is called an implicit conversion in Scala. From the order of elments in the array after the assignment, you can see that BIDMat uses Column-major-order. This is similar to Matlab, Fortran and Julia, but different from C and Python which are row-major. Transpose is implemented with a "t" operator: End of explanation """ val c = a + b %type c // We added an integer matrix (IMat) and a float matrix (FMat), so what type is the result? """ Explanation: Basic Math Operators Math operators have their expected results: End of explanation """ b - a a * b // Matrix multiply (not element-wise multiply) b / a // This is element-wise division, some toolkits instead multiply by the inverse of a. """ Explanation: BIDMat implicitly casts IMats to FMats to perform algebraic operations. End of explanation """ b ∘ a b ∙ a b ∙→ a b ⊗ a """ Explanation: Advanced Math Operators As well as these operators, BIDMat includes several other important operators with their standard unicode representation. They have an ASCII alias in case unicode input is difficult. Here they are: <pre> Unicode operator ASCII alias Operation ================ =========== ========= ∘ *@ Element-wise (Hadamard) product ∙ dot Column-wise dot product ∙→ dotr Row-wise dot product ⊗ kron Kronecker (Cartesian) product </pre> End of explanation """ a ^* b a.t * b b *^ a b * a.t """ Explanation: TODO: using the operators above, construct a 5x5 matrix such that every element is one greater than the element to the left, and the element above. Transposed Multiplies Matrix multiply is the most expensive step in many calculations, and often involves transposed matrices. To speed up those calcualtions, we expose two operators that combine the transpose and multiply operations: <pre> ^&ast; - transpose the first argument, so a ^&ast; b is equivalent to a.t &ast; b &ast;^ - transpose the second argument, so a &ast;^ b is equivalent to a &ast; b.t </pre> these operators are implemented natively, i.e. they do not actually perform transposes, but implement the effective calculation. This is particulary important for sparse matrices since transpose would involve an index sort. End of explanation """ val v = col(1,2,3,4) v ∘ a """ Explanation: Edge Operators Most operators support scalar arguments as in the last section. There are also many situations where its helpful to apply an operation with an "edge" argument, that is a vector whose long dimension matches the matrix. This is similar to the "broadcast" feature in Numpy. For example, we can define a vector v as: End of explanation """ v.t ∘ a """ Explanation: The elementwise multiply by v is applied to every column of a. We could also apply v.t to the rows of a: End of explanation """ sum(c) """ Explanation: Edge operators arise in several context, e.g. normalizing a matrix along rows or columns. The sum() function computes the sums of columns of b and returns a row vector: End of explanation """ val d = c / sum(c) sum(d) """ Explanation: We can use this to normalize b along its columns: End of explanation """ val e = c / sum(c,2) sum(e,2) """ Explanation: or rows End of explanation """ //val cn = //cn ∙ cn """ Explanation: TODO: Using the sqrt() function on matrices, normalize the columns of the matrix c so that their L2 norm (or equivalently the dot product of the column with itself) is 1. End of explanation """ a + 1 // Add an integer, result is still an FMat val aa = 3.5 * a // Multiply by a double, result is still an FMat %type aa """ Explanation: Scalars Scalars (primitive numerical values) can be used in most expressions: End of explanation """ a + 2.7f """ Explanation: Note In Scala, floating point numbers like 3.5 have double precision by default. Many languages would cast the last result to a double matrix since double is the smallest container for both Float and Double data. We argue that when someone writes 3.5 x a, they mean to scale the matrix a by that factor and preserve its type, not to cause a type conversion. Single-precision constants in Scala need an "f" suffix, i.e. End of explanation """ val c = FMat(b) """ Explanation: In case you encounter double matrices in a calculation without meaning to, it may because of operations with double-precision constants. Use the floating point notation above for scalars to minimize the chances of unintentional up-conversion to double matrices. For the next step, we will need a floating point version of the matrix b, which we can construct like this: End of explanation """ val x= val y= plot(x,y) """ Explanation: TODO: Create a float vector of values from -10 to 10 spaced by 0.1. Then apply the logistic function 1/(1+exp(-c)) to and call the plot() function on the results End of explanation """ a(1,1) = 2 a a(0,3) """ Explanation: Indexing and Slicing You access invidual array elements using parenthesis (unlike [] in Python) End of explanation """ b b(?,1) b(2,?) """ Explanation: You can use the wildcard ? to access rows and columns: End of explanation """ b(1->3, 1->3) """ Explanation: Ranges work as expected: End of explanation """ b(icol(0,1), icol(0,1,3)) """ Explanation: And you can use arbitrary integer vectors to access submatrices: End of explanation """ val ii = 0\1\3 %type ii """ Explanation: Another shorthand constructor for integer matrices is the backslash operator: End of explanation """ b(0\1, 0\1\3) """ Explanation: And this syntax is handy for indexing expressions End of explanation """ b(1, 0\1\3) = 0\0\0 b """ Explanation: Slices can be used for assignment: End of explanation """ b(0\1, 0\1\3) = -1 b """ Explanation: and you can use scalars on the RHS to simplify bulk assignments End of explanation """ b(7) """ Explanation: Matrices also accept single indices that reference elements in column-major order: End of explanation """ b(0->16) = (16 to 1 by -1) """ Explanation: You can also use vectors of indices or ranges to assign arbitrary elements of a matrix, or all of them: End of explanation """ val m = rand(4,4) // val ii = // m(ii) """ Explanation: TODO: Define a set of indices ii such that for any 4x4 matrix m, m(ii) = m.t. HINT: you already computed it! End of explanation """ val x = rand(4,5) val xm = maxi(x) val xm2 = maxi(x,2) """ Explanation: Reducers BIDMach has several "reducers" that aggregate along rows or columns. We already saw one of these, which was the <code>sum()</code> function. Two other important ones are maxi() and mini(). These both compute the max or min respectively, along columns (default) or rows. e.g. End of explanation """ val (vmax, imax) = maxi2(x) vmax imax """ Explanation: Its often very useful to know not only what element was the max or min, but also its index. The functions <code>maxi2</code> and <code>mini2</code> do this. They have a "2" suffix to indicate that they return 2 values. The first is the max or min value, the second is the max or min index: End of explanation """ val (vmin, imin) = mini2(x,2) vmin imin """ Explanation: The first 3 means that the max element of the first column was in row number 3, etc. We can similarly compute the min along rows: End of explanation """ val inds = 0\0 on 0\1 on 2\3 on 0\1 on 3\3 val vals = col(1f, 2f, 3f, 4f, 5f) accum(inds, vals, 4, 5) """ Explanation: A last important reducer is <code>accum</code> which is similar to Matlab's <code>accumarray</code>, or numpy's <code>accum</code>. It allows you to tally some values into specific positions in an output array. The format is: <pre> accum(inds, vals, nrows, ncols) </pre> where inds is an nx2 matrix (IMat) of row,column indices, vals are the values to sum there, and nrows and ncols are the matrix dimensions. Its easiest to see with an example: End of explanation """ val rr = rand(4,5) // deconstruct rr // val rr2 = // Rebuild it """ Explanation: You can see that each of the <code>vals</code> was saved in the position specified by the corresponding <code>inds</code>. Most of the locations to save were distinct, except for the second and fourth rows, which specified the same location. Those two values (2f and 4f) were summed in that location. Find Functions The <code>find</code> function is similar to Matlab's find, and Numpy's <code>nonzero</code> function. It comes in several flavors, depending on how many values are returned: <pre> val ii = find(m) // find and return the single indices of non-zero elements of m val (ii, jj) = find2(m) // find and return the (row, column) indices of non-zeros in m val (ii, jj, vv) = find3(m) // find an return the (row, column) indices and values of non-zeros of m </pre> TODO: For the matrix below, use find3 to deconstruct it into row, column and value matrices. Then use accum to build it up again. You can use the \ operator to horizontally concatenate two matrices. End of explanation """ 0 to 5 0 until 5 """ Explanation: For and While Loops We used ranges before. There are two flavors, closed or "to" ranges, and open or "until" ranges. End of explanation """ for (i <- 0 until 5) { println("run number %d" format i) } """ Explanation: For loops use ranges in a natural way End of explanation """ var i = 6 while (i > 0) { println("counting down %d" format i) i -= 1 } """ Explanation: While loops provide no special loop variable management, simply a test End of explanation """ import java.util.Random val randgen = new Random tic var sum = 0f for (i <- 0 until 1000000) { sum += randgen.nextFloat - 0.5f } val t = toc (sum, t) tic var sum = 0f var i = 0 while (i < 1000000*100) { sum += randgen.nextFloat - 0.5f i += 1 } val t = toc (sum, t) """ Explanation: Warning: Performance Sinkhole! For loops are much more complex that while loops. They apparently create a local evaluation context for each iteration, and the overhead is several times higher than for while loops. Lets measure this: End of explanation """ val n = 8192 val a = rand(n,n) // a random dense matrix (CPU) val b = powrand(n,n,100) // a random power-law sparse matrix with 100 elements per column (CPU) val ga = GMat(a) // a GPU version of a val gb = GSMat(b) // a GPU version of b """ Explanation: For loops (and Scala's other functional tools like sequence classes) are extremely powerful for e.g. multi-threading and multi-GPU computing. But they're not suitable for lightweight iteration over elements. Functional Programming in BIDMat Functional programming in BIDMat (or Numpy or Matlab) avoids explicit iteration over the elements of matrices and concentrates instead on whole-array operations and (if irregular access is needed) on manipulation of index matrices. Its not unlike the use of global operations on DataFrames. This approach allows highly-parallelized code to be used to implement these routines. It often makes for more succinct code and (with some practice), greater readability. We'll concentrate on applying those ideas in the next part of the Lab. I Feel the Need for Speed !! Yes, its not only fun to have a fast toolkit, but it really matters for performance. Not just runtime, but most algorithms can trade off time for precision by simply training more thoroughly, or training a richer model. BIDMat/BIDMach is currently the only system which fully integrates GPU computing with CPU computing. Its only the only system to have fully rooflined sparse matrix primitives. This is very important, since these are the bottleneck for machine learning on the most common types of data (text, web, clickthrough etc). Let's measure exactly how much difference this makes. First, we'll define a few matrices, both on the CPU and on a GPU (assuming you have one). End of explanation """ var ma:Mat = a // create a generic Mat variable and bind it to a var mb:Mat = b // create a generic Mat variable and bind it to b var mc:Mat = null // we'll use this to hold results """ Explanation: Now we could just go ahead and do our calculations on a,b,ga,gb directly. This is a common scenario. But we would also like to illustrate BIDMat/BIDMach's support for generics. So instead will create variables of type "Mat" to hold those variables, and perform arithmetic on those instead. End of explanation """ flip mc = ma * ma gflop """ Explanation: Now we'll benchmark both dense and sparse matrix multiply. Dense CPU multiply first. End of explanation """ flip mc = ma * mb gflop """ Explanation: The "flip" function starts a timer and reset the flop count. "gflop" returns two values: the gigaflop count, and the time since the last "flip". End of explanation """ ma = ga mb = gb """ Explanation: Now lets bind those variables to GPU matrices instead: End of explanation """ flip mc = ma * ma gflop """ Explanation: and run exactly the same code: End of explanation """ flip mc = ma * mb gflop """ Explanation: You'll probably see a good order-of-magnitude speedup over the CPU calculation. This shouldnt be surprising. GPUs have a well-earned reputation for dense-matrix performance. What's less well-known is their sparse matrix performance, which yields roughly order-of-magnitude gains as well. End of explanation """ val a = rand(4,4) val b = rand(4,4) (a.GUID, b.GUID) """ Explanation: You should see performance in the 20-50 gflops range, which is near the roofline for sparse operations on the current generation of Nvidia GPUs. This is very important, because that operation (and two other variants that have similar performance) is the dominant step in most common machine learning algorithms. With careful design of the entire learning pipeline, you can translate that advantage into a end-to-end speedup by the same factor. Furthermore, by writing generic code (using the Mat class as above) you can hide the details of implementation (CPU vs GPU) and run your algorithm in either environment. You can also support either sparse or dense matrices, and many of BIDMach's learning algorithms will work with either. TODO: try the transpose operator &ast;^ in the cells above. Matrix Caching One of the challenges of working with GPUs is the current lack of memory management (i.e. a garbage collector). The very high streaming speed of GPU memory makes it very difficult to do memory management without significant slowdowns. BIDMach includes a matrix caching scheme which allows re-use of matrix storage. It works particularly well in BIDMach's minibatch algorithms, which process same-sized blocks of data many times. To understand caching, lets first notice that every matrix has a unique long id or guid: End of explanation """ val c = a * b val d = a + b (c.GUID, d.GUID) """ Explanation: Normally, when you do calculations with matrices, new containers are created to hold the results: End of explanation """ Mat.useCache = true val c = a + b val d = a + b val e = a * b (c.GUID, d.GUID, e.GUID) """ Explanation: But with caching enabled, the same expression will yield the same container (different expressions with the same arguments will still yield different containers): End of explanation """ Mat.useCache = false """ Explanation: Although this approach causes aliasing (c and d now point to the same container), in a functional programming language the same expressions should always hold the same value. Arrays are mutable objects, so are not guaranteed to hold the same value. Nevertheless if you program in functional style, the same expressions should hold the same value and caching is a safe operation. With that caveat, caching is a very helpful performance optimization. The learners in BIDMach automatically turn caching on and off when you run a learning algorithm and in this way are able to eliminate matrix allocation after the first iteration. Its a necessary feature to be able to use a GPU on large datasets, and it often accelerates calculations on the CPU by removing memory allocation and garbage collection overhead. End of explanation """
quantopian/research_public
case_studies/traditional_value/traditional_value_notebook.ipynb
apache-2.0
import numpy as np import pandas as pd import matplotlib.pyplot as plt from quantopian.pipeline import Pipeline from quantopian.pipeline.data.builtin import USEquityPricing from quantopian.research import run_pipeline from quantopian.pipeline.data import morningstar from quantopian.pipeline.factors import CustomFactor """ Explanation: Traditional Value Factor Algorithm By Gil Wassermann Strategy taken from "130/30: The New Long-Only" by Andrew Lo and Pankaj Patel Part of the Quantopian Lecture Series: * www.quantopian.com/lectures * github.com/quantopian/research_public Notebook released under the Creative Commons Attribution 4.0 License. Please do not remove this attribution. Before the crisis of 2007, 130/30 funds were all the rage. The idea of a 130/30 fund is simple: take a long position of 130% and a short position of 30%; this combination gives a net exposure of 100% (the same as a long-only fund) as well as the added benefit of the ability to short stocks. The ability to short in a trading strategy is crucial as it allows a fund manager to capitalize on a security's poor performance, which is impossible in a traditional, long-only strategy. This notebook, using factors outlined by Andrew Lo and Pankaj Patel in "130/30: The New Long Only", will demonstrate how to create an algorithmic 130/30 strategy. It will also highlight Quantopian's Pipeline API which is a powerful tool for developing factor trading strategies. First, let us import all necessary libraries and functions for this algorithm End of explanation """ # Custom Factor 1 : Dividend Yield class Div_Yield(CustomFactor): inputs = [morningstar.valuation_ratios.dividend_yield] window_length = 1 def compute(self, today, assets, out, d_y): out[:] = d_y[-1] """ Explanation: Traditional Value In this notebook, we will develop a strategy based on the "traditional value" metrics described in the Lo/Patel whitepaper. The factors employed in this strategy designate stocks as either cheap or expensive using classic fundamental analysis. The factors that Lo/Patel used are: Dividend Yield Price to Book Value Price to Trailing 12-Month Sales Price to Trainling 12-Month Cash Flows Dividend Yield Dividend yield is calculated as: $$Dividend\;Yield = \frac{Annual\;Dividends\;per\;share}{Price\;per\;share}$$ When a company makes profit, it faces a choice. It could either reinvest those profits in the company with an eye to increase efficiency, purchase new technology, etc. or it could pay dividends to its equity holders. While reinvestment may increase a company's future share price and thereby reward investors, the most concrete way equity holders are rewarded is through dividends. An equity with a high dividend yield is particularly attractive as the quantity of dividends paid to investors represent a larger proportion of the share price itself. Now we shall create a Dividend Yield factor using the Pipeline API framework and Morningstar's list of fundamental values. End of explanation """ # create the pipeline temp_pipe_1 = Pipeline() # add the factor to the pipeline temp_pipe_1.add(Div_Yield(), 'Dividend Yield') # run the pipeline and get data for first 5 equities run_pipeline(temp_pipe_1, start_date='2015-11-11', end_date='2015-11-11').head() """ Explanation: While this factor could be calculated using other fundamental metrics, Morningstar removes the need for any calculation. It is good practice to check the list of fundamentals (https://www.quantopian.com/help/fundamentals) before creating a custom factor from scratch. We will initialize a temporary Pipeline to get a sense of the values. End of explanation """ # Custom Factor 2 : P/B Ratio class Price_to_Book(CustomFactor): inputs = [morningstar.valuation_ratios.pb_ratio] window_length = 1 def compute(self, today, assets, out, pbr): out[:] = pbr[-1] # create the Pipeline temp_pipe_2 = Pipeline() # add the factor to the Pipeline temp_pipe_2.add(Price_to_Book(), 'P/B Ratio') # run the Pipeline and get data for first 5 equities run_pipeline(temp_pipe_2, start_date='2015-11-11', end_date='2015-11-11').head() """ Explanation: Price to Book Value Price to Book Value (a.k.a Price to Book Ratio) is calculated as: $$P/B\;Ratio = \frac{Price\;per\;share}{Net\;Asset\;Value\;per\;share}$$ Net Asset Value per share can be thought of (very roughly) as a company's total assets less its total liabilities, all divided by the number of shares outstanding. The P/B Ratio gives a sense of a stock being either over- or undervalued. A high P/B ratio suggests that a stock's price is overvalued, and should therefore be shorted, whereas a low P/B ratio is attractive as the stock gained by purchasing the equity is hypothetically "worth more" than the price paid for it. We will now create a P/B Ratio custom factor and look at some of the results. End of explanation """ # Custom Factor 3 : Price to Trailing 12 Month Sales class Price_to_TTM_Sales(CustomFactor): inputs = [morningstar.valuation_ratios.ps_ratio] window_length = 1 def compute(self, today, assets, out, ps): out[:] = -ps[-1] # create the pipeline temp_pipe_3 = Pipeline() # add the factor to the pipeline temp_pipe_3.add(Price_to_TTM_Sales(), 'Price / TTM Sales') # run the pipeline and get data for first 5 equities run_pipeline(temp_pipe_3, start_date='2015-11-11', end_date='2015-11-11').head() """ Explanation: There are two points to make about this data series. Firstly, AA_PR's P/B Ratio is given as NaN by Pipeline. NaN stands for "not a number" and occurs when a value can not be fetched by Pipeline. Eventually, we will remove these NaN values from the dataset as they often lead to confusing errors when manipulating the data. Secondly, a low P/B Ratio and a high Dividend Yield are attractive for investors, whereas a a high P/B Ratio and a low Dividend Yield are unattractive. Therefore, we will "invert" the P/B ratio by making each value negative in the factor output so that, when the data is aggregated later in the algorithm, the maxima and minima have the same underlying "meaning". Price to Trailing 12-Month Sales This is calculated as a simple ratio between price per share and trailing 12-month (TTM) sales. TTM is a transformation rather than a metric and effectively calculates improvement or deterioration of a fundamental value from a particular quarter one year previously. For example, if one wanted to calculate today's TTM Sales for company XYZ, one would take the most recent quarter's revenue and divide it by the difference between this quarter's revenue and this quarter's revenue last year added to the revenue as given by the company's most recent fiscal year-end filing. To calculate the exact TTM of a security is indeed possible using Pipeline; however, the code required is slow. Luckily, this value can be well approximated by the built-in Morningstar valuation ratios, which use annual sales to calculate the Price to Sales fundamental value. This slight change boosts the code's speed enormously yet has very little impact on the results of the strategy itself. Price to TTM Sales is similar to the P/B Ratio in terms of function. The major difference in these two ratios is the fact that inclusion of TTM means that seasonal fluctuations are minimized, as previous data is used to smooth the value. In our case, annualized values accomplish this same smoothing. Also, note that the values produced are negative; this factor requires the same inversion as the P/B Ratio. End of explanation """ # Custom Factor 4 : Price to Trailing 12 Month Cashflow class Price_to_TTM_Cashflows(CustomFactor): inputs = [morningstar.valuation_ratios.pcf_ratio] window_length = 1 def compute(self, today, assets, out, pcf): out[:] = -pcf[-1] # create the pipeline temp_pipe_4 = Pipeline() # add the factor to the pipeline temp_pipe_4.add(Price_to_TTM_Cashflows(), 'Price / TTM Cashflows') # run the pipeline and get data for first 5 equities run_pipeline(temp_pipe_4, start_date='2015-11-11', end_date='2015-11-11').head() """ Explanation: Price to Trailing 12-Month Cashflows This is calculated as a simple ratio between price per share and TTM free cashflow (here using the built-in Morningstar valuation ratio as an approximaton). This ratio serves a similar function to the previous two. A future notebook will explore the subtle differences in these metrics, but they largely serve the same purpose. Once again, low values are attractive and high values are unattractive, so the metric must be inverted. End of explanation """ # This factor creates the synthetic S&P500 class SPY_proxy(CustomFactor): inputs = [morningstar.valuation.market_cap] window_length = 1 def compute(self, today, assets, out, mc): out[:] = mc[-1] # Custom Factor 2 : P/B Ratio class Price_to_Book(CustomFactor): inputs = [morningstar.valuation_ratios.pb_ratio] window_length = 1 def compute(self, today, assets, out, pbr): out[:] = -pbr[-1] def Data_Pull(): # create the piepline for the data pull Data_Pipe = Pipeline() # create SPY proxy Data_Pipe.add(SPY_proxy(), 'SPY Proxy') # Div Yield Data_Pipe.add(Div_Yield(), 'Dividend Yield') # Price to Book Data_Pipe.add(Price_to_Book(), 'Price to Book') # Price / TTM Sales Data_Pipe.add(Price_to_TTM_Sales(), 'Price / TTM Sales') # Price / TTM Cashflows Data_Pipe.add(Price_to_TTM_Cashflows(), 'Price / TTM Cashflow') return Data_Pipe # NB: Data pull is a function that returns a Pipeline object, so need () results = run_pipeline(Data_Pull(), start_date='2015-11-11', end_date='2015-11-11') results.head() """ Explanation: The Full Pipeline Now that each individual factor has been added, it is now time to get all the necessary data at once. In the algorithm, this will take place once every day. Later in the process, we will need a factor in order to create an approximate S&P500, so we will also include another factor called SPY_proxy (SPY is an ETF that tracks the S&P500). The S&P500 is a collection of 500 of the largest companies traded on the stock market. Our interpretation of the S&P500 is a group of 500 companies with the greatest market capitalizations; however, the actual S&P500 will be slightly different as Standard and Poors, who create the index, have a more nuanced algorithm for calculation. We will also alter our P/B Ratio factor in order to account for the inversion. End of explanation """ # limit effect of outliers def filter_fn(x): if x <= -10: x = -10.0 elif x >= 10: x = 10.0 return x # standardize using mean and sd of S&P500 def standard_frame_compute(df): # basic clean of dataset to remove infinite values df = df.replace([np.inf, -np.inf], np.nan) df = df.dropna() # need standardization params from synthetic S&P500 df_SPY = df.sort(columns='SPY Proxy', ascending=False) # create separate dataframe for SPY # to store standardization values df_SPY = df_SPY.head(500) # get dataframes into numpy array df_SPY = df_SPY.as_matrix() # store index values index = df.index.values df = df.as_matrix() df_standard = np.empty(df.shape[0]) for col_SPY, col_full in zip(df_SPY.T, df.T): # summary stats for S&P500 mu = np.mean(col_SPY) sigma = np.std(col_SPY) col_standard = np.array(((col_full - mu) / sigma)) # create vectorized function (lambda equivalent) fltr = np.vectorize(filter_fn) col_standard = (fltr(col_standard)) # make range between -10 and 10 col_standard = (col_standard / df.shape[1]) # attach calculated values as new row in df_standard df_standard = np.vstack((df_standard, col_standard)) # get rid of first entry (empty scores) df_standard = np.delete(df_standard,0,0) return (df_standard, index) # Sum up and sort data def composite_score(df, index): # sum up transformed data df_composite = df.sum(axis=0) # put into a pandas dataframe and connect numbers # to equities via reindexing df_composite = pd.Series(data=df_composite,index=index) # sort descending df_composite.sort(ascending=False) return df_composite # compute the standardized values results_standard, index = standard_frame_compute(results) # aggregate the scores ranked_scores = composite_score(results_standard, index) # print the final rankings ranked_scores """ Explanation: Aggregation Now that we have all our data, we need to manipulate this in order to create a single ranking of the securities. Lo/Patel recommend the following algorithm: Extract the S&P500 from the set of equities and find the mean and standard deviation of each factor for this dataset (standard_frame_compute) Use these computed values to standardize each factor (standard_frame_compute) Replace values that are greater that 10 or less that -10 with 10 and -10 respectively in order to limit the effect of outliers (filter_fn) Sum these values for each equity and divide by the number of factors in order to give a value between -10 and 10 (composite score) The code for this is shown below. End of explanation """ # create histogram of scores ranked_scores.hist() # make scores into list for ease of manipulation ranked_scores_list = ranked_scores.tolist() # add labels to axes plt.xlabel('Standardized Scores') plt.ylabel('Quantity in Basket') # show long bucket plt.axvline(x=ranked_scores_list[25], linewidth=1, color='r') # show short bucket plt.axvline(x=ranked_scores_list[-6], linewidth=1, color='r'); """ Explanation: Stock Choice Now that we have ranked our securities, we need to choose a long basket and a short basket. Since we need to keep the ratio 130/30 between longs and shorts, why not have 26 longs and 6 shorts (in the algorithm we will weigh each of these equally, giving us our desired leverage and exposure). On the graph below, we plot a histogram of the securities to get a sense of the distribution of scores. The red lines represent the cutoff points for the long and short buckets. One thing to notice is that the vast majority of equities are ranked near the middle of the histogram, whereas the tails are quite thin. This would suggest that there is something special about the securities chosen to be in these baskets, and -hopefully- these special qualities will yield positive alpha for the strategy. End of explanation """
tensorflow/workshops
extras/archive/01_linear_regression_low_level.ipynb
apache-2.0
# The next three imports help with compatability between # Python 2 and 3 from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np import pylab import tensorflow as tf # A special command for IPython Notebooks that # intructs Matplotlib to display plots in the notebook %matplotlib inline # This is a directory we'll use to store information # about the graph to later visualize in TensorBoard. # By default, it will be created in the same directory # as this notebook. # Be sure to delete the contents of this directory before # running the script. LOGDIR = './graphs' tf.reset_default_graph() sess = tf.Session() """ Explanation: This notebook demonstrates linear regression in TensorFlow, and visualizes the graph and summary data for the variables in TensorBoard. Uncomment code where instructed to learn more as you go along. End of explanation """ def make_noisy_data(m=0.1, b=0.3, n=100): x = np.random.rand(n).astype(np.float32) noise = np.random.normal(scale=0.01, size=len(x)) y = m * x + b + noise return x, y """ Explanation: This function will create a noisy dataset that's roughly linear, according to the equation y = mx + b + noise End of explanation """ x_train, y_train = make_noisy_data() x_test, y_test = make_noisy_data() """ Explanation: Step 1) Create training and testing data. End of explanation """ # Uncomment the following lines to plot the data. # pylab.plot(x_train, y_train, 'b.') # pylab.plot(x_test, y_test, 'g.') """ Explanation: Plot our training and testing data End of explanation """ # tf.name_scope is used to make a graph legible in the TensorBoard graph explorer # shape=[None] means x_placeholder is a one dimensional array of any length. # name='x' gives TensorBoard a display name for this node. with tf.name_scope('input'): x_placeholder = tf.placeholder(shape=[None], dtype=tf.float32, name='x-input') y_placeholder = tf.placeholder(shape=[None], dtype=tf.float32, name='y-input') # Uncomment the following lines to see what x_placeholder and y_placeholder are. # print(x_placeholder) # print(y_placeholder) """ Explanation: Step 2) Define placeholders for data we'll later input to the graph. End of explanation """ with tf.name_scope('model'): m = tf.Variable(tf.random_normal([1]), name='m') b = tf.Variable(tf.random_normal([1]), name='b') # This is the same as y = tf.add(tf.mul(m, x_placeholder), b), but looks nicer y = m * x_placeholder + b # Uncomment the following lines to see what m, b, and y are # print(m) # print(b) # print(y) """ Explanation: Step 3) Define our model. Here, we'll use a linear model: y = mx + b End of explanation """ LEARNING_RATE = 0.5 with tf.name_scope('training'): with tf.name_scope('loss'): loss = tf.reduce_mean(tf.square(y - y_placeholder)) with tf.name_scope('optimizer'): optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE) train = optimizer.minimize(loss) # Uncomment the following 3 lines to see what 'loss', 'optimizer' and 'train' are. # print("loss:", loss) # print("optimizer:", optimizer) # print("train_step:", train) """ Explanation: Step 4) Define a loss function (here, squared error) and an optimizer (here, gradient descent). End of explanation """ # Write the graph writer = tf.summary.FileWriter(LOGDIR) writer.add_graph(sess.graph) # Attach summaries to Tensors (for TensorBoard visualization) tf.summary.histogram('m', m) tf.summary.histogram('b', b) tf.summary.scalar('loss', loss) # This op will calculate our summary data when run summary_op = tf.summary.merge_all() """ Explanation: Step 5) Set up TensorBoard End of explanation """ sess.run(tf.global_variables_initializer()) """ Explanation: Step 6) Initialize variables At this point, our graph is complete. We're now ready to initialize variables, then begin training. End of explanation """ TRAIN_STEPS = 201 for step in range(TRAIN_STEPS): # Session will run two ops: # - summary_op prepares summary data we'll write to disk in a moment # - train will use the optimizer to adjust our variables to reduce loss summary_result, _ = sess.run([summary_op, train], feed_dict={x_placeholder: x_train, y_placeholder: y_train}) # write the summary data to disk writer.add_summary(summary_result, step) # Uncomment the following two lines to watch training happen real time. if step % 20 == 0: print(step, sess.run([m, b])) # close the writer when we're finished using it writer.close() """ Explanation: Step 7) Training End of explanation """ print ("m: %f, b: %f" % (sess.run(m), sess.run(b))) """ Explanation: Step 8) See the trained values for m and b End of explanation """ # Use the trained model to make a prediction! # Remember that x_placeholder must be a vector, hence [2] not just 2. # We expect the result to be (about): 2 * 0.1 + 0.3 + noise ~= 0.5 sess.run(y, feed_dict={x_placeholder: [2]}) """ Explanation: Step 9) Use the trained model to make a prediction End of explanation """ # To start TensorBoard, run this command from a terminal # Note: you should run this from the same directory that contains this notebook # or, provide absolute path to the 'graphs' directory # If you are runnining in a virtualenv, be sure to start it # if you open a new terminal window. # $ tensorboard --logdir=graphs # To open TensorBoard, point your browser to http://localhost:6006 # Then, click on the tabs for 'scalars', 'distributions', 'histograms', and 'graphs' # to learn more. # If you run into trouble, delete LOGDIR (to clear information from previous runs), # then re-run this script, and restart TensorBoard. """ Explanation: Step 10) Start TensorBoard End of explanation """
tpin3694/tpin3694.github.io
blog/aisle_seat_probabilities.ipynb
mit
# Import required modules import pandas as pd import numpy as np # Set plots to display in the iPython notebook %matplotlib inline """ Explanation: Title: What Is The Probability An Economy Class Seat Is An Aisle Seat? Slug: aisle_seat_probabilities Summary: What Is The Probability An Economy Class Seat Is An Aisle Seat? Date: 2016-05-01 12:00 Category: Blog Tags: Authors: Chris Albon There are two types of people in the world, aisle seaters and window seaters. I am an aisle seater, nothing is worse than limited bathroom access on a long flight. The first thing I do when I get my ticket is check to see if I have a window seat. If not, I immediately head over to the airline counter and try to get one. Last flight, on Turkish Airlines, I ran into a curious situation. I recieved my boarding pass with my seat number, 18C, but the ticket did not specify if C was an aisle seat or not. Making matters worse, the airline counter was swamped with a few dozen people. So I asked myself: given only the seat letter, C, what is the probability that it is an aisle seat? Later, on the flight, I decided to find out. Preliminaries End of explanation """ # An aircraft with two seats per row rows2 = [1,1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan] # An aircraft with three seats per row rows3 = [1,1,0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,] # An aircraft with four seats per row rows4 = [0,1,1,0, np.nan, np.nan, np.nan, np.nan, np.nan] # An aircraft with five seats per row rows5 = [0,1,1,0,0, np.nan, np.nan,np.nan, np.nan] # An aircraft with six seats per row rows6 = [0,1,1,1,1,0, np.nan, np.nan, np.nan] # An aircraft with seven seats per row rows7 = [0,1,1,0,1,1,0, np.nan, np.nan] # An aircraft with eight seats per row rows8 = [0,0,1,1,1,1,0,0, np.nan] # An aircraft with nine seats per row rows9 = [0,0,1,1,0,1,1,0,0] """ Explanation: Setup possible seat configurations I am a pretty frequently flyer on a variety of airlines and aircraft. There are a variety of seating configurations out there, but typically they follow some basic rules: No window cluster of seats has more than three seats. On small flights with three seats, the single seat is on the left side. No flight has more than nine rows. Based on these rules, here are the "typical" seating configurations from aircraft with between two and nine seats per row. A '1' codifies that a seat is an aisle seat, a '0' codifies that it is a non-aisle seat (i.e. window or middle), and 'np.nan' denotes that the aircraft has less than nine seats (this is so all the list lengths are the same). End of explanation """ # Create a list variable of all possible aircraft configurations seating_map = [rows2, rows3, rows4, rows5, rows6, rows7, rows8, rows9] # Create a dataframe from the seating_map variable df = pd.DataFrame(seating_map, columns=['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'], index=['rows2', 'rows3', 'rows4', 'rows5', 'rows6', 'rows7', 'rows8', 'rows9']) """ Explanation: For example, in an aircraft with five seats per row, rows5, the seating arrangement would be: window aisle aisle middle window no seat no seat no seat no seat Next, I'm take advantage of pandas row summation options, but to do this I need to wrangle the data into a pandas dataframe. Essentially I am using the pandas dataframe as a matrix. End of explanation """ # View the dataframe df """ Explanation: Here is all the data we need to construct our probabilities. The columns represent individual seat letters (A, B, etc.) while the rows represent the number of seats-per-row in the aircraft. End of explanation """ # Create a list wherein each element is the mean value of a column aisle_probability = [df['A'].mean(), df['B'].mean(), df['C'].mean(), df['D'].mean(), df['E'].mean(), df['F'].mean(), df['G'].mean(), df['H'].mean(), df['I'].mean()] # Display the variable aisle_probability """ Explanation: Calculate aisle probability Because each aircraft seats-per-row configuration (i.e. row) is binary (1 if aisle, 0 if non-aisle), the probability that a seat is an aisle is simply the mean value of each seat letter (i.e. column). End of explanation """ # Create a list of strings to use as the x-axis labels seats = ['Seat A', 'Seat B', 'Seat C', 'Seat D', 'Seat E', 'Seat F', 'Seat G', 'Seat H', 'Seat I'] # Plot the probabilities, using 'seats' as the index as a bar chart pd.Series(aisle_probability, index=seats).plot(kind='bar', # set y to range between 0 and 1 ylim=[0,1], # set the figure size figsize=[10,6], # set the figure title title='Probabilty of being an Aisle Seat in Economy Class') """ Explanation: So there you have it, the probability that each seat letter is an aisle. However, we can make the presentation a little more intituative. Visualize seat letter probabilities The most obvious visualization to convey the probabilities would be seat letters on the x-axis and probabilities on the y-axis. Panda's plot function makes that easy. End of explanation """
sdpython/ensae_teaching_cs
_doc/notebooks/td2a/td2a_correction_session_5_donnees_non_structurees_et_programmation_fonctionnelle_corrige.ipynb
mit
from jyquickhelper import add_notebook_menu add_notebook_menu() """ Explanation: 2A.i - Données non structurées, programmation fonctionnelle - correction Calculs de moyennes et autres statistiques sur une base twitter au format JSON avec de la programmation fonctionnelle (module cytoolz). End of explanation """ import pyensae.datasource pyensae.datasource.download_data("twitter_for_network_100000.db.zip") import cytoolz as ct import cytoolz.curried as ctc import sqlite3 import pprint try: import ujson as json except: import json conn_sqlite = sqlite3.connect("twitter_for_network_100000.db") cursor_sqlite = conn_sqlite.cursor() """ Explanation: Commencez par télécharger la base de donnée : twitter_for_network_100000.db https://drive.google.com/file/d/0B6jkqYitZ0uTWjFjd3lpREpFYVE/view?usp=sharing Vous pourrez éventuellement télécharger la base complète (3,4 millions d'utilisateurs, plutôt que 100000) ultérieurement si vous souhaitez tester vos fonctions. Ne perdez pas de temps avec ceci dans ce tp. twitter_for_network_full.db https://drive.google.com/file/d/0B6jkqYitZ0uTWkR6cDZQUTlVSWM/view?usp=sharing Vous pouvez consulter l'aide de pytoolz (même interface que cytoolz) ici : http://toolz.readthedocs.org/en/latest/ La section sur l'API est particulièrement utile car elle résume bien les différentes fonctions : http://toolz.readthedocs.org/en/latest/api.html Ensuite exécutez la cellule suivante : Liens alternatifs : twitter_for_network_100000.db.zip twitter_for_network_full.db.zip End of explanation """ # cursor_sqlite.execute( "SELECT user_id, content FROM tw_followers_id") cursor_sqlite.execute( "SELECT id, content, screen_name FROM tw_users") # cursor_sqlite.execute( "SELECT id, content, user_id FROM tw_status") for it_elt in cursor_sqlite: ## do something here pass # ou, pour accéder à un élément : cursor_sqlite.execute( "SELECT id, content, screen_name FROM tw_users") cursor_sqlite.fetchone() """ Explanation: Description de la base de donnée Nous nous intéresserons à 3 tables : tw_users, tw_status et tw_followers_id. La première (tw_users) contient des profils utilisateurs tels que retournés par l'api twitter (à noter que les profils ont été "épurés" d'informations jugées inutiles pour limiter la taille de la base de donnée). La deuxième (tw_status) contient des status twitter (tweet, retweet, ou réponse à un tweet), complets, issus d'une certaine catégorie d'utilisateurs (les tweets sont tous issus d'environ 70 profils). La troisième (tw_followers_id) contient des listes d'id d'users, qui suivent les utilisateurs référencés par la colonne user_id. Là encore ce ne sont les followers que de environ 70 profils. Chaque entrée contient au plus 5000 id de followers (il s'agit d'une limitation de twitter). Elles ont les structures suivantes : Les trois possèdent un champ content, de type json, qui sera celui qui nous interessera le plus. Vous pouvez accédez aux données dans les tables avec les syntaxes suivantes (vous pouvez commenter/décommenter les différentes requêtes). End of explanation """ cursor_sqlite.execute( "SELECT user_id, content FROM tw_followers_id") print( ct.count( cursor_sqlite ) ) cursor_sqlite.execute( "SELECT id, content, user_id FROM tw_status") print( ct.count( cursor_sqlite ) ) cursor_sqlite.execute( "SELECT id, content, screen_name FROM tw_users") print( ct.count( cursor_sqlite ) ) """ Explanation: Toutefois les curseurs de base de donnée en python se comportent comme des "iterables" (i.e. comme une liste ou une séquence, mais sans nécessairement charger toutes les données en mémoire). On peut donc les passer directement en argument aux fonctions de cytoolz. End of explanation """ cursor_sqlite.execute( "SELECT user_id, content FROM tw_followers_id") print( ct.count( cursor_sqlite ) ) print( ct.count( cursor_sqlite ) ) """ Explanation: Attention au fait que le curseur garde un état. Par exemple exécutez le code suivant : End of explanation """ cursor_sqlite.execute( "SELECT user_id, content FROM tw_followers_id") print( ct.count( cursor_sqlite ) ) cursor_sqlite.execute( "SELECT user_id, content FROM tw_followers_id") print( ct.count( cursor_sqlite ) ) """ Explanation: Le deuxième count renvoit 0 car le curseur se rappelle qu'il est déjà arrivé à la fin des données qu'il devait parcourir. Il faut donc réinitialiser le curseur : End of explanation """ def get_tw_followers_id(): return cursor_sqlite.execute( "SELECT user_id, content FROM tw_followers_id") print( ct.count( get_tw_followers_id() ) ) print( ct.count( get_tw_followers_id() ) ) """ Explanation: On peut également mettre la commande execute à l'intérieur d'une fonction, que l'on appelle ensuite : End of explanation """ import cytoolz as ct import cytoolz.curried as ctc list( ct.unique( cursor_sqlite.execute( "SELECT user_id FROM tw_followers_id") ) ) """ Explanation: La commande exécute en elle-même ne prend pas du tout de temps, car elle ne fait que préparer la requête, n'hésitez donc pas à en mettre systématiquement dans vos cellules, plutôt que de risquer d'avoir un curseur dont vous ne vous souvenez plus de l'état. Partie 1 - description de la base de donnée Question 1 - éléments unique d'une table Trouvez la liste des user_id différents dans la table tw_followers_id, en utilisant les fonctions cytoolz. La fonction qui pourra vous être utiles ici : ct.unique(seq) => à partir d'une séquence, renvoit une séquence où tous les doublons ont été supprimés Vous vous rappelez sans doute que nous utilisions systématiquement pluck et map pour les exemples du cours, ceux-ci ne sont pas nécessaires ici. A noter qu'il faudra sans doute utilisez la fonction list( ... ), ou une boucle for pour forcer l'évaluation des fonctions cytoolz. End of explanation """ import cytoolz as ct import cytoolz.curried as ctc ct.count( ct.unique( cursor_sqlite.execute( "SELECT user_id FROM tw_followers_id") ) ) """ Explanation: A noter que si vous voyez apparaître vos résultats sous la forme (79145543,), c'est normal, le curseur sqlite renvoit toujours ces résultats sous forme de tuple : (colonne1, colonne2, colonne3, ...) et ce même si il n'y a qu'une seule colonne dans la requête. Nous utiliserons pluck pour extraire le premier élément du tuple. Question 2 - nombre d'élements unique d'une table Trouvez le nombre de user_id différents dans la table tw_followers_id, en utilisant les fonctions cytoolz. Les fonctions qui pourront vous êtres utiles ici : ct.count(seq) => compte le nombre d'éléments d'une séquence ct.unique(seq) => à partir d'une séquence, renvoit une séquence où tous les doublons ont été supprimés Vous vous rappelez sans doute que nous utilisions systématiquement pluck et map pour les exemples du cours, ceux-ci ne sont pas nécessaires, ici. End of explanation """ import cytoolz as ct import cytoolz.curried as ctc comptez_unique = ct.compose( ct.count, ct.unique ) ## Pour tester votre code, cette ligne doit renvoyer le même nombre qu'à la question 2 comptez_unique( cursor_sqlite.execute( "SELECT user_id FROM tw_followers_id") ) """ Explanation: Question 3 : création d'une fonction comptez_unique A l'aide de ct.compose, créez une fonction comptez_unique qui effectue directement cette opération. Pour rappel, ct.compose( f, g, h, ...) renvoit une fonction qui appelée sur x exécute (f(g(h(x))). ct.compose prend un nombre d'arguments quelconque. A noter que les fonctions données en argument doivent ne prendre qu'un seul argument, ce qui est le cas ici. Pensez bien que comme vous manipulez ici les fonctions elle-même, il ne faut pas mettre de parenthèses après End of explanation """ import cytoolz as ct cursor_sqlite.execute( "SELECT content FROM tw_users") comptez_unique( ct.pluck("location", ct.map(json.loads, ct.pluck(0, cursor_sqlite)))) # Le résultat attendu est 13730 """ Explanation: Question 4 : compte du nombre de valeurs de "location" différentes dans la table tw_users Nous allons utiliser la fonction comptez_unique définie précédemment pour comptez le nombre de "location" différentes dans la table tw_users. Pour cela il faudra faire appel à deux fonctions : ct.pluck pour extraire une valeur de tous les éléments d'une séquence ct.map pour appliquer une fonction (ici json.loads pour transformer une chaîne de caractère au format json en objet python). Il faudra sans doute appliquer ct.pluck deux fois, une fois pour extraire la colonne content du résultat de la requête (même si celle-ci ne comprend qu'une colonne) et une fois pour extraire le champ "location" du json. Les syntaxes de ces fonctions sont les suivantes : ct.pluck( 0, seq ) (cas d'une séquence de liste ou de tuple) ou ct.pluck( key, seq ) (cas d'une séquence de dictionnaire). ct.map( f, seq ) où f est la fonction que l'on souhaite appliquer (ne mettez pas les parenthèses après le f, ici vous faites références à la fonction, pas son résultat) Astuce : dans le cas improbable où vous auriez un ordinateur sensiblement plus lent que le rédacteur du tp, rajoutez LIMIT 10000 à la fin des requêtes End of explanation """ pluck_loc = ctc.pluck("location") map_loads = ctc.map(json.loads) pluck_0 = ctc.pluck(0) """ Explanation: Question 5 : curly fonctions Comme on risque de beaucoup utiliser les fonctions ct.map et ct.pluck, on veut se simplifier la vie en utilisant la notation suivante : End of explanation """ import cytoolz as ct cursor_sqlite.execute( "SELECT content FROM tw_users") comptez_unique( pluck_loc( map_loads( pluck_0(cursor_sqlite)))) # Le résultat attendu est 13730 """ Explanation: Notez bien que nous utilisons ctc.pluck et non pas ct.pluck, car le package cytoolz.curry (ici importé en temps que ctc) contient les versions de ces fonctions qui supportent l'évaluation partielle. Les objets pluck_loc, map_loads, pluck_0 sont donc des fonctions à un argument, construites à partir de fonctions à deux arguments. Utilisez ces 3 fonctions pour simplifier l'écriture de la question 4 End of explanation """ import cytoolz as ct cursor_sqlite.execute( "SELECT content FROM tw_users") get_json_seq = ct.compose( map_loads, pluck_0 ) comptez_unique( pluck_loc( get_json_seq(cursor_sqlite))) """ Explanation: Question 6 : fonction get_json_seq A partir des fonctions précédentes et de la fonction compose, créez une fonction get_json_seq, qui à partir d'un curseur d'une requête dont la colonne content est en première position, renvoit une séquence des objets json loadés. Vous devez pouvoir l'utiliser pour réécrire le code de la question précédente ainsi : End of explanation """ def contains_paris(loc): return "paris" in loc.lower() """ Explanation: Question 7 : liste des localisations avec Paris On peut vérifier si une localisation contient le mot "Paris", avec toutes ces variations de casse possible avec la fonction suivante : End of explanation """ ## Réponse 7.1 import cytoolz as ct cursor_sqlite.execute( "SELECT content FROM tw_users") ct.count( ct.filter( contains_paris, pluck_loc( get_json_seq(cursor_sqlite)))) ## le résultat attendu est 5470 ## Réponse 7.2 import cytoolz as ct cursor_sqlite.execute( "SELECT content FROM tw_users") list(ct.unique( ct.filter( contains_paris, pluck_loc( get_json_seq(cursor_sqlite))))) ## la liste doit contenir 977 éléments """ Explanation: En utilisant cette fonction et la fonction ct.filter, trouvez : le nombre d'utilisateur dont la location contient Paris sous une forme ou une autre (question 7.1) tous les variantes de location contenant Paris (pour info il y en a 977) ct.filter s'utilise avec la syntaxe ct.filter( f, seq ) et renvoit une séquence de tous les éléments de la séquence en entrée pour lesquels f renvoit true. Vous aurez besoin des fonctions ct.unique et ct.count. Si vous avez une sortie du type &lt;cytoolz.itertoolz._unique_identity at 0x7f3e7f3d6d30&gt;, rajouter la fonction list( ... ) autour pour forcer l'évaluation. End of explanation """ ## Réponse 8 import cytoolz as ct # solution 1 def contains_paris_json(loc): return "paris" in loc["location"].lower() cursor_sqlite.execute( "SELECT content FROM tw_users") # Ici j'utilise pipe, car les fonctions sont appelées dans l'ordre indiqué, cela est plus pratique # que compose print( ct.pipe( cursor_sqlite, get_json_seq, ctc.filter(contains_paris_json), ctc.pluck("statuses_count"), sum ) ) # Solution 2 - déconseillée cursor_sqlite.execute( "SELECT content FROM tw_users") # Ici j'utilise pipe, car les fonctions sont appelées dans l'ordre indiqué, cela est plus pratique # que compose print( ct.pipe(cursor_sqlite, ## on récupère les objets json get_json_seq, ## on regroupe par localisation ctc.groupby("location"), ## on ne garde que les entrées dont la clé contient paris ctc.keyfilter(contains_paris), # Pour les valeurs, ont fait la somme des statuses_count ctc.valmap( ctc.compose( sum, ctc.pluck("statuses_count") )), lambda x:x.values(), sum )) # Solution 4 - valable également cursor_sqlite.execute( "SELECT content FROM tw_users") print( ct.pipe(cursor_sqlite, ## on récupère les objets json get_json_seq, ## on garde la location et les nombres de status ctc.pluck(["location","statuses_count"]), ## on applique le filtre, avec contains_paris sur le premier élément ctc.filter( ctc.compose( contains_paris, ctc.get(0)) ), ## une fois le filtre appliqué, on ne garde que les statuses_count ctc.pluck(1), # on fait la somme sum)) """ Explanation: Question 8 : somme des tweets de tous les utilisateurs dont la location contient Paris Calculez le nombre de tweets total par les utilisateurs dont la "location" contient Paris. Dans le json de twitter, la clé pour cela est "statuses_count" Pour cela plusieurs possibilités : la plus simple est de redéfinir une fonction contains_paris, qui prenne en entrée un user json groupby("location", seq) vous renvoit les réponses groupées par location. Cette méthode possède l'inconvénient de charger toutes les données en mémoire reduceby("location", lambda x,y: x + y["statuses_count"], seq, 0) vous renvoit la somme par location, il ne reste plus qu'à filtrer et additionner pluck(["location", "statuses_count"], seq) vous permet de garder les deux informations. Il faudra changer la fonction contains paris pour celle suivante (contains_paris_tuple) Réponse attendue : 9811612 End of explanation """ try: cursor_sqlite.execute("CREATE UNIQUE INDEX tw_users_id_index ON tw_users(id)") print("Index created") except sqlite3.OperationalError as e: if( "index tw_users_id_index already exists" in str(e)): print("Ok, index already exists") else: raise e """ Explanation: Question 9 : comparaison des followers d'homme politique On va maintenant s'intéresser à la proximité / corrélation entre les hommes politiques, que l'on mesurera à partir de la formule : $\frac{1}{2}*( \frac{nbFollowersCommun}{nbFollowersHommePolitique_1} + \frac{nbFollowersCommun}{nbFollowersHommePolitique_2}$) On prend donc la moyenne des ratios des followers de chaque homme politique suivant l'autre (cette formule semble s'accommoder assez bien des différences du nombre de followers entre homme politiques) On s'intéressera notamment aux hommes politiques suivants : benoithamon | 14389177 montebourg | 69255422 alainjuppe | 258345629 De fait vous pouvez prendre n'importe quel homme ou femme politique, les résultats de cette méthode sont assez probants malgré sa rusticité. Important : pensez à appliquer la cellule ci-dessous End of explanation """ part_taken = 2 get_all_followers_set = ctc.compose( ctc.reduce(set.union), ctc.map(set), get_json_seq, ctc.take_nth(part_taken), ct.curry(cursor_sqlite.execute,"SELECT content FROM tw_followers_id WHERE user_id = ?"), lambda x:(x,) ) def proximite(a,b): c = a & b return min(1, 0.5*len(c)*(1/len(a)+1/len(b))*part_taken) users_id_list = [ 14389177, 69255422, 258345629 ] users_id_f_set = list( ct.map( get_all_followers_set, users_id_list ) ) for it_l in users_id_f_set: print( ";".join("{0:.2f}".format(proximite(it,it_l)) for it in users_id_f_set) ) """ Explanation: La façon la plus simple est de charger les listes d'id de followers en mémoire, dans des objets de type set, et de les comparer avec les opérateurs & (intersection) - (différences). On peut aussi chercher une méthode approchée, en comparant de façon aléatoire les listes contenues dans tw_follower_id. End of explanation """ import dask """ Explanation: Partie 2 : avec dask Essayez d'exécuter le code suivant End of explanation """ from pyensae.sql import Database dby = Database("twitter_for_network_100000.db") dby.connect() dby.get_table_list() dby.close() """ Explanation: On affiche la liste des tables de la base sqlite: End of explanation """ import cytoolz as ct # import groupby, valmap, compose import cytoolz.curried as ctc ## pipe, map, filter, get import sqlite3 import pprint try: import ujson as json except: import json conn_sqlite_f = sqlite3.connect("twitter_for_network_100000.db") cursor_sqlite_f = conn_sqlite_f.cursor() cursor_sqlite_f.execute("SELECT content FROM tw_users") for it in range(100): with open( "tw_users_split_{0:02d}.json".format(it), 'w') as f: for it_index, it_json in enumerate( cursor_sqlite_f ): f.write( it_json[0] ) f.write("\n") if it_index == 100000: break else: break """ Explanation: dask peut vous permettre de paralléliser de façon efficace votre code entre plusieurs processeurs. Utilisez le code suivant pour splitter la base 'twitter_for_network_full.db' en plusieurs fichiers plats (NB: pensez à nettoyer votre disque dur après ce tp). End of explanation """ ## Code commun nécessaire import dask.bag as dbag try: import ujson as json except: print("ujson unavailable") import json from operator import add a = dbag.read_text('tw_users_split*.json') # Le nombre total de status a.map(json.loads).pluck("statuses_count").fold(add).compute() # Le nombre moyen de tweet par location. import cytoolz # Solution def mean(l): # la parallélisation n'est pas effectuée de la même manière # sur linux et Windows # dans un process différent, les import # faits au début du programme ne font pas partie # du code parallélisé, c'est pourquoi il faut les ajouter # dans le code de la fonction import cytoolz.curried as ctc return sum( ctc.pluck(1, l) ) / len(l) def function_mapped(*args): # Example of args: # (('Lille', [['Lille', 483]]),) if len(args) == 2: x, y = args else: x, y = args[0] me = mean(y) return x, me result = a.map(json.loads).pluck(["location","statuses_count"]) \ .groupby(0).map(function_mapped).compute() result[:10] # La distribution du nombre de followers par puissance de 10 import math a.map(json.loads) \ .pluck("followers_count") \ .map(lambda x,math=math: int(math.log10(x+1)) ) \ .frequencies() \ .compute() """ Explanation: Calculez maintenant, en utilisant dask.bag : le nombre total de status le nombre de status moyen par location la distribution du nombre de followers par puissance de 10 sur l'ensemble des users End of explanation """
hankcs/HanLP
plugins/hanlp_demo/hanlp_demo/zh/srl_stl.ipynb
apache-2.0
!pip install hanlp -U """ Explanation: <h2 align="center">点击下列图标在线运行HanLP</h2> <div align="center"> <a href="https://colab.research.google.com/github/hankcs/HanLP/blob/doc-zh/plugins/hanlp_demo/hanlp_demo/zh/srl_mtl.ipynb" target="_blank"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> <a href="https://mybinder.org/v2/gh/hankcs/HanLP/doc-zh?filepath=plugins%2Fhanlp_demo%2Fhanlp_demo%2Fzh%2Fsrl_mtl.ipynb" target="_blank"><img src="https://mybinder.org/badge_logo.svg" alt="Open In Binder"/></a> </div> 安装 无论是Windows、Linux还是macOS,HanLP的安装只需一句话搞定: End of explanation """ import hanlp hanlp.pretrained.srl.ALL # 语种见名称最后一个字段或相应语料库 """ Explanation: 加载模型 HanLP的工作流程是先加载模型,模型的标示符存储在hanlp.pretrained这个包中,按照NLP任务归类。 End of explanation """ srl = hanlp.load('CPB3_SRL_ELECTRA_SMALL') """ Explanation: 调用hanlp.load进行加载,模型会自动下载到本地缓存: End of explanation """ srl(['2021年', 'HanLPv2.1', '为', '生产', '环境', '带来', '次', '世代', '最', '先进', '的', '多', '语种', 'NLP', '技术', '。']) """ Explanation: 语义角色分析 为已分词的句子执行语义角色分析: End of explanation """ for i, pas in enumerate(srl(['2021年', 'HanLPv2.1', '为', '生产', '环境', '带来', '次', '世代', '最', '先进', '的', '多', '语种', 'NLP', '技术', '。'])): print(f'第{i+1}个谓词论元结构:') for form, role, begin, end in pas: print(f'{form} = {role} at [{begin}, {end}]') """ Explanation: 语义角色标注结果中每个四元组的格式为[论元或谓词, 语义角色标签, 起始下标, 终止下标]。其中,谓词的语义角色标签为PRED,起止下标对应单词数组。 遍历谓词论元结构: End of explanation """
jart/tensorflow
tensorflow/contrib/eager/python/examples/generative_examples/text_generation.ipynb
apache-2.0
!pip install unidecode """ Explanation: Copyright 2018 The TensorFlow Authors. Licensed under the Apache License, Version 2.0 (the "License"). Text Generation using a RNN <table class="tfo-notebook-buttons" align="left"><td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/generative_examples/text_generation.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td><td> <a target="_blank" href="https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples/generative_examples/text_generation.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on Github</a></td></table> This notebook demonstrates how to generate text using an RNN using tf.keras and eager execution. If you like, you can write a similar model using less code. Here, we show a lower-level impementation that's useful to understand as prework before diving in to deeper examples in a similar, like Neural Machine Translation with Attention. This notebook is an end-to-end example. When you run it, it will download a dataset of Shakespeare's writing. We'll use a collection of plays, borrowed from Andrej Karpathy's excellent The Unreasonable Effectiveness of Recurrent Neural Networks. The notebook will train a model, and use it to generate sample output. Here is the output(with start string='w') after training a single layer GRU for 30 epochs with the default settings below: ``` were to the death of him And nothing of the field in the view of hell, When I said, banish him, I will not burn thee that would live. HENRY BOLINGBROKE: My gracious uncle-- DUKE OF YORK: As much disgraced to the court, the gods them speak, And now in peace himself excuse thee in the world. HORTENSIO: Madam, 'tis not the cause of the counterfeit of the earth, And leave me to the sun that set them on the earth And leave the world and are revenged for thee. GLOUCESTER: I would they were talking with the very name of means To make a puppet of a guest, and therefore, good Grumio, Nor arm'd to prison, o' the clouds, of the whole field, With the admire With the feeding of thy chair, and we have heard it so, I thank you, sir, he is a visor friendship with your silly your bed. SAMPSON: I do desire to live, I pray: some stand of the minds, make thee remedies With the enemies of my soul. MENENIUS: I'll keep the cause of my mistress. POLIXENES: My brother Marcius! Second Servant: Will't ple ``` Of course, while some of the sentences are grammatical, most do not make sense. But, consider: Our model is character based (when we began training, it did not yet know how to spell a valid English word, or that words were even a unit of text). The structure of the output resembles a play (blocks begin with a speaker name, in all caps similar to the original text). Sentences generally end with a period. If you look at the text from a distance (or don't read the invididual words too closely, it appears as if it's an excerpt from a play). As a next step, you can experiment training the model on a different dataset - any large text file(ASCII) will do, and you can modify a single line of code below to make that change. Have fun! Install unidecode library A helpful library to convert unicode to ASCII. End of explanation """ # Import TensorFlow >= 1.9 and enable eager execution import tensorflow as tf # Note: Once you enable eager execution, it cannot be disabled. tf.enable_eager_execution() import numpy as np import re import random import unidecode import time """ Explanation: Import tensorflow and enable eager execution. End of explanation """ path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/yashkatariya/shakespeare.txt') """ Explanation: Download the dataset In this example, we will use the shakespeare dataset. You can use any other dataset that you like. End of explanation """ text = unidecode.unidecode(open(path_to_file).read()) # length of text is the number of characters in it print (len(text)) """ Explanation: Read the dataset End of explanation """ # unique contains all the unique characters in the file unique = sorted(set(text)) # creating a mapping from unique characters to indices char2idx = {u:i for i, u in enumerate(unique)} idx2char = {i:u for i, u in enumerate(unique)} # setting the maximum length sentence we want for a single input in characters max_length = 100 # length of the vocabulary in chars vocab_size = len(unique) # the embedding dimension embedding_dim = 256 # number of RNN (here GRU) units units = 1024 # batch size BATCH_SIZE = 64 # buffer size to shuffle our dataset BUFFER_SIZE = 10000 """ Explanation: Creating dictionaries to map from characters to their indices and vice-versa, which will be used to vectorize the inputs End of explanation """ input_text = [] target_text = [] for f in range(0, len(text)-max_length, max_length): inps = text[f:f+max_length] targ = text[f+1:f+1+max_length] input_text.append([char2idx[i] for i in inps]) target_text.append([char2idx[t] for t in targ]) print (np.array(input_text).shape) print (np.array(target_text).shape) """ Explanation: Creating the input and output tensors Vectorizing the input and the target text because our model cannot understand strings only numbers. But first, we need to create the input and output vectors. Remember the max_length we set above, we will use it here. We are creating max_length chunks of input, where each input vector is all the characters in that chunk except the last and the target vector is all the characters in that chunk except the first. For example, consider that the string = 'tensorflow' and the max_length is 9 So, the input = 'tensorflo' and output = 'ensorflow' After creating the vectors, we convert each character into numbers using the char2idx dictionary we created above. End of explanation """ dataset = tf.data.Dataset.from_tensor_slices((input_text, target_text)).shuffle(BUFFER_SIZE) dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(BATCH_SIZE)) """ Explanation: Creating batches and shuffling them using tf.data End of explanation """ class Model(tf.keras.Model): def __init__(self, vocab_size, embedding_dim, units, batch_size): super(Model, self).__init__() self.units = units self.batch_sz = batch_size self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim) if tf.test.is_gpu_available(): self.gru = tf.keras.layers.CuDNNGRU(self.units, return_sequences=True, return_state=True, recurrent_initializer='glorot_uniform') else: self.gru = tf.keras.layers.GRU(self.units, return_sequences=True, return_state=True, recurrent_activation='sigmoid', recurrent_initializer='glorot_uniform') self.fc = tf.keras.layers.Dense(vocab_size) def call(self, x, hidden): x = self.embedding(x) # output shape == (batch_size, max_length, hidden_size) # states shape == (batch_size, hidden_size) # states variable to preserve the state of the model # this will be used to pass at every step to the model while training output, states = self.gru(x, initial_state=hidden) # reshaping the output so that we can pass it to the Dense layer # after reshaping the shape is (batch_size * max_length, hidden_size) output = tf.reshape(output, (-1, output.shape[2])) # The dense layer will output predictions for every time_steps(max_length) # output shape after the dense layer == (max_length * batch_size, vocab_size) x = self.fc(output) return x, states """ Explanation: Creating the model We use the Model Subclassing API which gives us full flexibility to create the model and change it however we like. We use 3 layers to define our model. Embedding layer GRU layer (you can use an LSTM layer here) Fully connected layer End of explanation """ model = Model(vocab_size, embedding_dim, units, BATCH_SIZE) optimizer = tf.train.AdamOptimizer() # using sparse_softmax_cross_entropy so that we don't have to create one-hot vectors def loss_function(real, preds): return tf.losses.sparse_softmax_cross_entropy(labels=real, logits=preds) """ Explanation: Call the model and set the optimizer and the loss function End of explanation """ # Training step EPOCHS = 30 for epoch in range(EPOCHS): start = time.time() # initializing the hidden state at the start of every epoch hidden = model.reset_states() for (batch, (inp, target)) in enumerate(dataset): with tf.GradientTape() as tape: # feeding the hidden state back into the model # This is the interesting step predictions, hidden = model(inp, hidden) # reshaping the target because that's how the # loss function expects it target = tf.reshape(target, (-1,)) loss = loss_function(target, predictions) grads = tape.gradient(loss, model.variables) optimizer.apply_gradients(zip(grads, model.variables), global_step=tf.train.get_or_create_global_step()) if batch % 100 == 0: print ('Epoch {} Batch {} Loss {:.4f}'.format(epoch+1, batch, loss)) print ('Epoch {} Loss {:.4f}'.format(epoch+1, loss)) print('Time taken for 1 epoch {} sec\n'.format(time.time() - start)) """ Explanation: Train the model Here we will use a custom training loop with the help of GradientTape() We initialize the hidden state of the model with zeros and shape == (batch_size, number of rnn units). We do this by calling the function defined while creating the model. Next, we iterate over the dataset(batch by batch) and calculate the predictions and the hidden states associated with that input. There are a lot of interesting things happening here. The model gets hidden state(initialized with 0), lets call that H0 and the first batch of input, lets call that I0. The model then returns the predictions P1 and H1. For the next batch of input, the model receives I1 and H1. The interesting thing here is that we pass H1 to the model with I1 which is how the model learns. The context learned from batch to batch is contained in the hidden state. We continue doing this until the dataset is exhausted and then we start a new epoch and repeat this. After calculating the predictions, we calculate the loss using the loss function defined above. Then we calculate the gradients of the loss with respect to the model variables(input) Finally, we take a step in that direction with the help of the optimizer using the apply_gradients function. Note:- If you are running this notebook in Colab which has a Tesla K80 GPU it takes about 23 seconds per epoch. End of explanation """ # Evaluation step(generating text using the model learned) # number of characters to generate num_generate = 1000 # You can change the start string to experiment start_string = 'Q' # converting our start string to numbers(vectorizing!) input_eval = [char2idx[s] for s in start_string] input_eval = tf.expand_dims(input_eval, 0) # empty string to store our results text_generated = '' # low temperatures results in more predictable text. # higher temperatures results in more surprising text # experiment to find the best setting temperature = 1.0 # hidden state shape == (batch_size, number of rnn units); here batch size == 1 hidden = [tf.zeros((1, units))] for i in range(num_generate): predictions, hidden = model(input_eval, hidden) # using a multinomial distribution to predict the word returned by the model predictions = predictions / temperature predicted_id = tf.multinomial(tf.exp(predictions), num_samples=1)[0][0].numpy() # We pass the predicted word as the next input to the model # along with the previous hidden state input_eval = tf.expand_dims([predicted_id], 0) text_generated += idx2char[predicted_id] print (start_string + text_generated) """ Explanation: Predicting using our trained model The below code block is used to generated the text We start by choosing a start string and initializing the hidden state and setting the number of characters we want to generate. We get predictions using the start_string and the hidden state Then we use a multinomial distribution to calculate the index of the predicted word. We use this predicted word as our next input to the model The hidden state returned by the model is fed back into the model so that it now has more context rather than just one word. After we predict the next word, the modified hidden states are again fed back into the model, which is how it learns as it gets more context from the previously predicted words. If you see the predictions, the model knows when to capitalize, make paragraphs and the text follows a shakespeare style of writing which is pretty awesome! End of explanation """
a301-teaching/a301_code
notebooks/layertops_demo_solution.ipynb
mit
import glob import h5py import numpy as np import glob from a301lib.cloudsat import get_geo from a301utils.a301_readfile import download from matplotlib import pyplot as plt lidar_name='2006303212128_02702_CS_2B-GEOPROF-LIDAR_GRANULE_P2_R04_E02.h5' download(lidar_name) """ Explanation: Reading the Lidar LayerTops variable 1) First, grab the lidar file End of explanation """ the_file=glob.glob('2006*LIDAR*h5')[0] print(the_file) """ Explanation: 2) use glob.glob wildcards to read the filename from disk without having to get the name exactly right End of explanation """ with h5py.File(the_file,'r') as in_file: layer_tops=in_file['2B-GEOPROF-LIDAR']['Data Fields']['LayerTop'][...] factor=in_file['2B-GEOPROF-LIDAR']['Data Fields']['LayerTop'].attrs['factor'] offset=in_file['2B-GEOPROF-LIDAR']['Data Fields']['LayerTop'].attrs['offset'] units=in_file['2B-GEOPROF-LIDAR']['Data Fields']['LayerTop'].attrs['units'] missing = in_file['2B-GEOPROF-LIDAR']['Data Fields']['LayerTop'].attrs['missing'] # # the next line turns the numpy bytes (b'm') object returned by h5py into a unicode string # for printing # units=units.decode('utf-8') print('missing value, factor, offset and units: {} {} {} {}' .format(missing,factor,offset,units)) """ Explanation: 3) Use hdfview to figure out the path to the LayerTop variable, and to get the factor and offset needed to turn the 16 bit interger data into science values as described on the cloudsat web page End of explanation """ lat,lon,date_times,prof_times,dem_elevation=get_geo(the_file) time_minutes = prof_times/60. """ Explanation: 4) get the time values (in seconds) for the orbit and convert to decimal minutes for plotting End of explanation """ hit = layer_tops < 10 #http://cswww.cira.colostate.edu/dataSpecs.php layer_tops = (layer_tops - offset)/factor layer_tops[hit] = np.nan """ Explanation: 5) turn the missing values (-99) into np.nan ("not a number") so they will be dropped from our plot. Count any cloud height below 10 meters as noise and assign it as missing. End of explanation """ num_times,num_layers = layer_tops.shape text=""" layer number: {0:} cloud fraction is {1:4.2f}% mean height is {2:6.1f} meters """ for the_layer in range(num_layers): missing = np.isnan(layer_tops[:,the_layer]) present = np.logical_not(missing) num_present = np.sum(present) percent_present=num_present/num_times*100. mean_height = np.nanmean(layer_tops[:,the_layer]) print(text.format(the_layer,percent_present,mean_height)) """ Explanation: 6) go through each of the layers and find the mean height (not counting the nan values) and the number of timesteps where there was cloud detected End of explanation """ layer1= layer_tops[:,0] layer2= layer_tops[:,1] layer1_count=0 overlap_count=0 for index,layer1_height in enumerate(layer1): if not np.isnan(layer1_height): layer1_count += 1 if not np.isnan(layer2[index]): overlap_count += 1 overlap_freq=100.*overlap_count/layer1_count print(('when there was cloud in layer 1, there was also cloud in layer2 {:6.2f} ' 'percent of the time') .format(overlap_freq)) """ Explanation: 7) What fraction of the time is there a layer 2 cloud above layer 1 cloud? End of explanation """ %matplotlib inline from IPython.display import display plt.close('all') meters2km=1.e3 seconds2mins=60. def plot_layers(time_secs,layer_tops,ax): ntimes,nlayers=layer_tops.shape time_mins=time_secs/seconds2mins for i in range(nlayers): label='layer {}'.format(i) ax.plot(time_mins,layer_tops[:,i]/meters2km,label=label) ax.legend() return ax fig, ax = plt.subplots(1,1,figsize=(12,4)) ax=plot_layers(prof_times,layer_tops,ax) ax.set(title='Cloudsat Orbit -- lidar/radar cloud tops', xlabel='time (minutes)',ylabel='height (km)'); # # expand to view the 60-70 minute time inteval # hit=np.logical_and(prof_times > 60*60,prof_times < 70*60) fig, ax = plt.subplots(1,1,figsize=(12,3)) ax=plot_layers(prof_times[hit],layer_tops[hit,:],ax) ax.set(title='Cloudsat Orbit -- lidar/radar cloud tops -- zoomed', xlabel='time (minutes)',ylabel='height (km)'); """ Explanation: solution: plot the layers with a legend End of explanation """
BadWizard/Inflation
Disaggregated-Data/weather-like-plot-HICP-by-country.ipynb
mit
df_infl_ctry['min'] = df_infl_ctry.apply(min,axis=1) df_infl_ctry['max'] = df_infl_ctry.apply(max,axis=1) df_infl_ctry['mean'] = df_infl_ctry.apply(np.mean,axis=1) df_infl_ctry['mode'] = df_infl_ctry.quantile(q=0.5, axis=1) df_infl_ctry['10th'] = df_infl_ctry.quantile(q=0.10, axis=1) df_infl_ctry['90th'] = df_infl_ctry.quantile(q=0.90, axis=1) df_infl_ctry['25th'] = df_infl_ctry.quantile(q=0.25, axis=1) df_infl_ctry['75th'] = df_infl_ctry.quantile(q=0.75, axis=1) df_infl_ctry.head() """ Explanation: df_infl_ctry.rename(columns = dic) tt = df_infl_ctry.copy() tt['month'] = tt.index.month tt['year'] = tt.index.year melted_df = pd.melt(tt,id_vars=['month','year']) melted_df.head() End of explanation """ df_infl_ctry.tail() print(df_infl_ctry.describe()) """ Explanation: df_infl_ctry['month'] = df_infl_ctry.index.month df_infl_ctry['year'] = df_infl_ctry.index.year End of explanation """ len(df_infl_ctry) df_infl_ctry.columns df_infl_ctry['month_order'] = range(len(df_infl_ctry)) month_order = df_infl_ctry['month_order'] max_infl = df_infl_ctry['max'].values min_infl = df_infl_ctry['min'].values mean_infl = df_infl_ctry['mean'].values mode_infl = df_infl_ctry['mode'].values p25th = df_infl_ctry['25th'].values p75th = df_infl_ctry['75th'].values p10th = df_infl_ctry['10th'].values p90th = df_infl_ctry['90th'].values inflEA = df_infl_ctry['76451'].values year_begin_df = df_infl_ctry[df_infl_ctry.index.month == 1] year_begin_df; year_beginning_indeces = list(year_begin_df['month_order'].values) year_beginning_indeces year_beginning_names = list(year_begin_df.index.year) year_beginning_names month_order #import seaborn as sns fig, ax1 = plt.subplots(figsize=(15, 7)) # Create the bars showing highs and lows #plt.bar(month_order, max_infl - min_infl, bottom=min_infl, # edgecolor='none', color='#C3BBA4', width=1) plt.bar(month_order, p90th - p10th, bottom=p10th, edgecolor='none', color='#C3BBA4', width=1) # Create the bars showing average highs and lows plt.bar(month_order, p75th - p25th, bottom=p25th, edgecolor='none', color='#9A9180', width=1); #annotations={month_order[50]:'Dividends'} plt.plot(month_order, inflEA, color='#5A3B49',linewidth=2 ); plt.plot(month_order, mode_infl, color='wheat',linewidth=2,alpha=.3); plt.xticks(year_beginning_indeces, year_beginning_names, fontsize=10) #ax2 = ax1.twiny() plt.xticks(year_beginning_indeces, year_beginning_names, fontsize=10); plt.xlim(-5,200) plt.grid(False) ##ax2 = ax1.twiny() plt.ylim(-5, 14) #ax3 = ax1.twinx() plt.yticks(range(-4, 15, 2), [r'{}'.format(x) for x in range(-4, 15, 2)], fontsize=10); plt.grid(axis='both', color='wheat', linewidth=1.5, alpha = .5) plt.title('HICP innflation, annual rate of change, Jan 2000 - March 2016\n\n', fontsize=20); """ Explanation: Generate a bunch of histograms of the data to make sure that all of the data is in an expected range. with plt.style.context('https://gist.githubusercontent.com/rhiever/d0a7332fe0beebfdc3d5/raw/223d70799b48131d5ce2723cd5784f39d7a3a653/tableau10.mplstyle'): for column in df_infl_ctry.columns[:-2]: #if column in ['date']: # continue plt.figure() plt.hist(df_infl_ctry[column].values) plt.title(column) #plt.savefig('{}.png'.format(column)) End of explanation """
ntftrader/ntfdl
examples/notebooks/Historical data.ipynb
mit
%matplotlib inline %pylab inline --no-import-all pylab.rcParams['figure.figsize'] = (18, 10) from ntfdl import Dl stl = Dl('STL', exchange='OSE', download=False) history = stl.get_history() history.tail() fig, ax = plt.subplots() ax.tick_params(labeltop=False, labelright=True) history.close.plot() plt.grid() # Annotate last quote xmin, xmax = ax.get_xlim() plt.annotate(history.iloc[-1].close, xy=(1.005, history.iloc[-1].close), xytext=(0, 0), \ xycoords=('axes fraction', 'data'), textcoords='offset points', backgroundcolor='k', color='w') """ Explanation: Access historical data End of explanation """ history_ma = stl.get_history(mas=[10,20,50,100,200]) history_ma.tail(5) fig, ax = plt.subplots() ax.tick_params(labeltop=False, labelright=True) history_ma[['close','ma10','ma20','ma50','ma100','ma200']].plot(ax=ax) plt.grid() """ Explanation: Including moving averages Moving averages are calculated using pandas rolling.mean() and adds a column for each window size given in the list where each columns is prefixed 'ma' followed by window size. End of explanation """ fig, ax = plt.subplots() ax.tick_params(labeltop=False, labelright=True) history_ma['2008-01-01':'2010-01-01'][['close','ma10','ma20','ma50','ma100','ma200']].plot(ax=ax) plt.grid() history.turnover.plot() """ Explanation: Busy chart, let's instead slice the pandas with the [from:to] syntax looking at some days of horror between 2008 and 2009. End of explanation """
bourneli/deep-learning-notes
DAT236x Deep Learning Explained/.ipynb_checkpoints/Lab6_TextClassification_with_LSTM-checkpoint.ipynb
mit
from __future__ import print_function # Use a function definition from future version (say 3.x from 2.7 interpreter) import requests import os def download(url, filename): """ utility function to download a file """ response = requests.get(url, stream=True) with open(filename, "wb") as handle: for data in response.iter_content(): handle.write(data) locations = ['Tutorials/SLUHandsOn', 'Examples/LanguageUnderstanding/ATIS/BrainScript'] data = { 'train': { 'file': 'atis.train.ctf', 'location': 0 }, 'test': { 'file': 'atis.test.ctf', 'location': 0 }, 'query': { 'file': 'query.wl', 'location': 1 }, 'slots': { 'file': 'slots.wl', 'location': 1 } } for item in data.values(): location = locations[item['location']] path = os.path.join('..', location, item['file']) if os.path.exists(path): print("Reusing locally cached:", item['file']) # Update path item['file'] = path elif os.path.exists(item['file']): print("Reusing locally cached:", item['file']) else: print("Starting download:", item['file']) url = "https://github.com/Microsoft/CNTK/blob/v2.0/%s/%s?raw=true"%(location, item['file']) download(url, item['file']) print("Download completed") """ Explanation: Lab 6: Text Classification with LSTM This lab corresponds to Module 6 of the "Deep Learning Explained" course. This lab shows how to implement a recurrent network to process text, for the Air Travel Information Services (ATIS) task of slot tagging (tag individual words to their respective classes, where the classes are provided as labels in the training data set). Our model will start with a straight-forward (linear) embedding of the words followed by a recurrent LSTM. This will then be extended to include neighboring words and run bidirectionally. Lastly, we will turn this system into an intent classifier. The techniques you will practice are: * model building by composing layer blocks, a convenient way to compose networks/models without requiring the need to write formulas, * creating your own layer block * variables with different sequence lengths in the same network * training the network We assume that you are familiar with basics of deep learning, and these specific concepts: * recurrent networks (Wikipedia page) * text embedding (Wikipedia page) Prerequisites We assume that you have already installed CNTK. This tutorial requires CNTK V2. We strongly recommend to run this tutorial on a machine with a capable CUDA-compatible GPU. Deep learning without GPUs is not fun. Downloading the data In this tutorial we are going to use a (lightly preprocessed) version of the ATIS dataset. You can download the data automatically by running the cells below or by executing the manual instructions. Fallback manual instructions Download the ATIS training and test files and put them at the same folder as this notebook. If you want to see how the model is predicting on new sentences you will also need the vocabulary files for queries and slots End of explanation """ import math import numpy as np import cntk as C """ Explanation: Importing CNTK and other useful libraries CNTK's Python module contains several submodules like io, learner, and layers. We also use NumPy in some cases since the results returned by CNTK work like NumPy arrays. End of explanation """ # Select the right target device when this notebook is being tested: if 'TEST_DEVICE' in os.environ: if os.environ['TEST_DEVICE'] == 'cpu': C.device.try_set_default_device(C.device.cpu()) else: C.device.try_set_default_device(C.device.gpu(0)) # Test for CNTK version if not C.__version__ == "2.0": raise Exception("this notebook was designed to work with 2.0. Current Version: " + C.__version__) """ Explanation: In the block below, we check if we are running this notebook in the CNTK internal test machines by looking for environment variables defined there. We then select the right target device (GPU vs CPU) to test this notebook. In other cases, we use CNTK's default policy to use the best available device (GPU, if available, else CPU). End of explanation """ # setting seed np.random.seed(0) C.cntk_py.set_fixed_random_seed(1) C.cntk_py.force_deterministic_algorithms() # number of words in vocab, slot labels, and intent labels vocab_size = 943 ; num_labels = 129 ; num_intents = 26 # model dimensions input_dim = vocab_size label_dim = num_labels emb_dim = 150 # error 2.25% # emb_dim = 50 # error 2.71% #emb_dim = 300 # error 2.20% #hidden_dim = 100 # error 2.44% hidden_dim = 300 # error 2.25% #hidden_dim = 500 # error 2.09% # Create the containers for input feature (x) and the label (y) x = C.sequence.input_variable(vocab_size) y = C.sequence.input_variable(num_labels) def create_model(): with C.layers.default_options(initial_state=0.1): return C.layers.Sequential([ C.layers.Embedding(emb_dim, name='embed'), C.layers.Recurrence(C.layers.LSTM(hidden_dim), go_backwards=False), C.layers.Dense(num_labels, name='classify') ]) """ Explanation: Task and Model Structure The task we want to approach in this tutorial is slot tagging. We use the ATIS corpus. ATIS contains human-computer queries from the domain of Air Travel Information Services, and our task will be to annotate (tag) each word of a query with the specific item of information (slot) it belongs to, if any. The data in your working folder has already been converted into a CTF (CNTK Text Format) file. Let us look at an example from the test-set file atis.test.ctf: 19 |S0 178:1 |# BOS |S1 14:1 |# flight |S2 128:1 |# O 19 |S0 770:1 |# show |S2 128:1 |# O 19 |S0 429:1 |# flights |S2 128:1 |# O 19 |S0 444:1 |# from |S2 128:1 |# O 19 |S0 272:1 |# burbank |S2 48:1 |# B-fromloc.city_name 19 |S0 851:1 |# to |S2 128:1 |# O 19 |S0 789:1 |# st. |S2 78:1 |# B-toloc.city_name 19 |S0 564:1 |# louis |S2 125:1 |# I-toloc.city_name 19 |S0 654:1 |# on |S2 128:1 |# O 19 |S0 601:1 |# monday |S2 26:1 |# B-depart_date.day_name 19 |S0 179:1 |# EOS |S2 128:1 |# O This file has 5-7 columns per line (each separated by the "|" character): a sequence id (19). There are 11 entries with this sequence id. This means that sequence 19 consists of 11 tokens; column S0, which contains numeric word indices; the input data is encoded in one-hot vectors. There are 943 words in the vocabulary, so each word is a 943 element vector of all 0 with a 1 at a vector index chosen to represent that word. For example the word "from" is represented with a 1 at index 444 and zero everywhere else in the vector. The word "monday" is represented with a 1 at index 601 and zero everywhere else in the vector. a comment column denoted by #, to allow a human reader to know what the numeric word index stands for; Comment columns are ignored by the system. BOS and EOS are special words to denote beginning and end of sentence, respectively; column S1 is an intent label, which we will only use in the last part of the tutorial; another comment column that shows the human-readable label of the numeric intent index; column S2 is the slot label, represented as a numeric index; and another comment column that shows the human-readable label of the numeric label index. The task of the neural network is to look at the query (column S0) and predict the slot label (column S2). As you can see, each word in the input gets assigned either an empty label O or a slot label that begins with B- for the first word, and with I- for any additional consecutive word that belongs to the same slot. The model we will use is a recurrent model consisting of an embedding layer, a recurrent LSTM cell, and a dense layer to compute the posterior probabilities: slot label "O" "O" "O" "O" "B-fromloc.city_name" ^ ^ ^ ^ ^ | | | | | +-------+ +-------+ +-------+ +-------+ +-------+ | Dense | | Dense | | Dense | | Dense | | Dense | ... +-------+ +-------+ +-------+ +-------+ +-------+ ^ ^ ^ ^ ^ | | | | | +------+ +------+ +------+ +------+ +------+ 0 --&gt;| LSTM |--&gt;| LSTM |--&gt;| LSTM |--&gt;| LSTM |--&gt;| LSTM |--&gt;... +------+ +------+ +------+ +------+ +------+ ^ ^ ^ ^ ^ | | | | | +-------+ +-------+ +-------+ +-------+ +-------+ | Embed | | Embed | | Embed | | Embed | | Embed | ... +-------+ +-------+ +-------+ +-------+ +-------+ ^ ^ ^ ^ ^ | | | | | w ------&gt;+---------&gt;+---------&gt;+---------&gt;+---------&gt;+------... BOS "show" "flights" "from" "burbank" Descriptions of the above Layer functions can be found at: the CNTK Layers Reference Documentation. Below, we build the CNTK model for this network. Please have a quick look and match it with the description above. End of explanation """ # peek z = create_model() print(z.embed.E.shape) print(z.classify.b.value) """ Explanation: Now we are ready to create a model and inspect it. Once a model is constructed, its attributes are fully accessible from Python. The first layer named embed is an Embedding layer. Here we use the CNTK default, which is linear embedding. It is a simple matrix with dimension (input word encoding x output projected dimension). You can access its parameter E (where the embeddings are stored) like any other attribute of a Python object. Its shape contains a -1 which indicates that this parameter (with input dimension) is not fully specified yet, while the output dimension is set to emb_dim ( = 150 in this tutorial). Additionally we also inspect the value of the bias vector in the Dense layer named classify. The Dense layer is a fundamental compositional unit of a Multi-Layer Perceptron (as introduced in Lab 3). Each Dense layer has both weight and bias parameters. Bias terms are by default initialized to 0 (but there is a way to change that if you need). As you create the model, one can name the layer component and then access the parameters as shown here. Suggested Exploration: What should be the expected dimension of the weight matrix from the layer named classify. Try printing the weight matrix of the classify layer. Does it match with your expected size? End of explanation """ # Pass an input and check the dimension z = create_model() print(z(x).embed.E.shape) """ Explanation: Our input text words will be encoded as one-hot vectors of length 943 and the output dimension of our model emb_dim is set to 150. In the code below we pass the input variable x to our model z. This binds the model with input data of known shape. In this case, the input shape will be the size of the input vocabulary. With this modification, the parameter returned by the embed layer is completely specified (943, 150). Note: As an alternative to our approach here, you can initialize the Embedding matrix with pre-computed vectors using Word2Vec or GloVe. End of explanation """ def create_reader(path, is_training): return C.io.MinibatchSource(C.io.CTFDeserializer(path, C.io.StreamDefs( query = C.io.StreamDef(field='S0', shape=vocab_size, is_sparse=True), intent_unused = C.io.StreamDef(field='S1', shape=num_intents, is_sparse=True), slot_labels = C.io.StreamDef(field='S2', shape=num_labels, is_sparse=True) )), randomize=is_training, max_sweeps = C.io.INFINITELY_REPEAT if is_training else 1) # peek reader = create_reader(data['train']['file'], is_training=True) reader.streams.keys() """ Explanation: A Brief Look at Data and Data Reading For reading text, this tutorial uses the CNTKTextFormatReader. It expects the input data to be in the CTF format, as described here. But how do you generate this format? For this tutorial, we created the CTF file for you, but it might be helpful to explain how this was accomplished. The data was created in two steps: * convert the raw data into a plain text file that contains of TAB-separated columns of space-separated text. For example: BOS show flights from burbank to st. louis on monday EOS (TAB) flight (TAB) O O O O B-fromloc.city_name O B-toloc.city_name I-toloc.city_name O B-depart_date.day_name O This is meant to be compatible with the output of the paste command. convert it to CNTK Text Format (CTF) with the following command: python [CNTK root]/Scripts/txt2ctf.py --map query.wl intent.wl slots.wl --annotated True --input atis.test.txt --output atis.test.ctf where the three .wl files give the vocabulary as plain text files, one word per line. In these CTF files, our columns are labeled S0, S1, and S2. These are connected to the actual network inputs by the corresponding lines in the reader definition: End of explanation """ def create_criterion_function(model): labels = C.placeholder(name='labels') ce = C.cross_entropy_with_softmax(model, labels) errs = C.classification_error (model, labels) return C.combine ([ce, errs]) # (features, labels) -> (loss, metric) criterion = create_criterion_function(create_model()) criterion.replace_placeholders({criterion.placeholders[0]: C.sequence.input_variable(num_labels)}) """ Explanation: Trainer We also must define the training criterion (loss function), and also an error metric to track the progress of our model's performance. In most tutorials, we know the input dimensions and the corresponding labels. We directly create the loss and the error functions. In this tutorial we will do the same. However, we take a brief detour and learn about placeholders. This concept would be useful for Task 3. Learning note: Introduction to placeholder: Remember that the code we have been writing is not actually executing any heavy computation it is just specifying the function we want to compute on data during training/testing. And in the same way that it is convenient to have names for arguments when you write a regular function in a programming language, it is convenient to have placeholders that refer to arguments (or local computations that need to be reused). Eventually, some other code will replace these placeholders with other known quantities in the same way that in a programming language the function will be called with concrete values bound to its arguments. Specifically, the input variables you have created above x = C.sequence.input_variable(vocab_size) holds data pre-defined by vocab_size. In the case where such instantiations are challenging or not possible, using placeholder is a logical choice. Having the placeholder only allows you to defer the specification of the argument at a later time when you may have the data. Here is an example below that illustrates the use of placeholder. End of explanation """ def create_criterion_function_preferred(model, labels): ce = C.cross_entropy_with_softmax(model, labels) errs = C.classification_error (model, labels) return ce, errs # (model, labels) -> (loss, error metric) """ Explanation: While the cell above works well when one has input parameters defined at network creation, it compromises readability. Hence we prefer creating functions as shown below End of explanation """ def train_test(train_reader, test_reader, model_func, max_epochs=10): # Instantiate the model function; x is the input (feature) variable model = model_func(x) # Instantiate the loss and error function loss, label_error = create_criterion_function_preferred(model, y) # training config epoch_size = 18000 # 18000 samples is half the dataset size minibatch_size = 70 # LR schedule over epochs # In CNTK, an epoch is how often we get out of the minibatch loop to # do other stuff (e.g. checkpointing, adjust learning rate, etc.) # (we don't run this many epochs, but if we did, these are good values) lr_per_sample = [0.003]*4+[0.0015]*24+[0.0003] lr_per_minibatch = [lr * minibatch_size for lr in lr_per_sample] lr_schedule = C.learning_rate_schedule(lr_per_minibatch, C.UnitType.minibatch, epoch_size) # Momentum schedule momentum_as_time_constant = C.momentum_as_time_constant_schedule(700) # adam error 2.25% # sgd error 10% # fsadagrad 3.16% # We use a the Adam optimizer which is known to work well on this dataset # Feel free to try other optimizers from # https://www.cntk.ai/pythondocs/cntk.learner.html#module-cntk.learner learner = C.adam(parameters=model.parameters, lr=lr_schedule, momentum=momentum_as_time_constant, gradient_clipping_threshold_per_sample=15, gradient_clipping_with_truncation=True) # learning_rate = 0.2 # lr_schedule = C.learning_rate_schedule(learning_rate, C.UnitType.minibatch) # learner = C.sgd(z.parameters, lr_schedule) # learning_rate = 0.2 # lr_schedule = C.learning_rate_schedule(learning_rate, C.UnitType.minibatch) # TIMESTEPS = 14 # BATCH_SIZE = TIMESTEPS * 10 # momentum_time_constant = C.momentum_as_time_constant_schedule(BATCH_SIZE / -math.log(0.9)) # learner = C.fsadagrad(z.parameters, # lr = lr_schedule, # momentum = momentum_time_constant) # Setup the progress updater progress_printer = C.logging.ProgressPrinter(tag='Training', num_epochs=max_epochs) # Uncomment below for more detailed logging #progress_printer = ProgressPrinter(freq=100, first=10, tag='Training', num_epochs=max_epochs) # Instantiate the trainer trainer = C.Trainer(model, (loss, label_error), learner, progress_printer) # process minibatches and perform model training C.logging.log_number_of_parameters(model) t = 0 for epoch in range(max_epochs): # loop over epochs epoch_end = (epoch+1) * epoch_size while t < epoch_end: # loop over minibatches on the epoch data = train_reader.next_minibatch(minibatch_size, input_map={ # fetch minibatch x: train_reader.streams.query, y: train_reader.streams.slot_labels }) trainer.train_minibatch(data) # update model with it t += data[y].num_samples # samples so far trainer.summarize_training_progress() while True: minibatch_size = 500 data = test_reader.next_minibatch(minibatch_size, input_map={ # fetch minibatch x: test_reader.streams.query, y: test_reader.streams.slot_labels }) if not data: # until we hit the end break trainer.test_minibatch(data) trainer.summarize_test_progress() def do_train_test(): global z z = create_model() train_reader = create_reader(data['train']['file'], is_training=True) test_reader = create_reader(data['test']['file'], is_training=False) train_test(train_reader, test_reader, z) do_train_test() """ Explanation: Training the model We are using the Progress Printer to display the training loss and classification error throughout training epochs. The training should take less than 2 minutes on a Titan-X or a Surface Book. Once the training completed, you will see an output like this Finished Epoch [10]: [Training] loss = 0.033263 * 18039, metric = 0.9% * 18039 which is the loss (cross entropy) and the metric (classification error) averaged over the final epoch. On a CPU-only machine, it can be 4 or more times slower. You can try setting python emb_dim = 50 hidden_dim = 100 to reduce the time it takes to run on a CPU, but the model will not fit as well as when the hidden and embedding dimension are larger. Testing the model We also use the Progress Printer to display the accuracy on a test set by computing the error over multiple minibatches of test data. For evaluating on a small sample read from a file, you can set a minibatch size reflecting the sample size and run the test_minibatch on that instance of data. To see how to evaluate a single sequence, we provide an instance later in the tutorial. End of explanation """ z.classify.b.value """ Explanation: This shows how learning proceeds over epochs (passes through the data). For example, after four epochs, the loss, which is the cross-entropy criterion, has reached 0.11 as measured on the ~18000 samples of this epoch, and that the error rate is 2.6% on those same 18000 training samples. The epoch size is the number of samples--counted as word tokens, not sentences--to process between model checkpoints. Once the training has completed (a little less than 2 minutes on a Titan-X or a Surface Book), you will see an output like this Finished Epoch [10]: [Training] loss = 0.033263 * 18039, metric = 0.9% * 18039 which is the loss (cross entropy) and the metric (classification error) averaged over the final epoch. On a CPU-only machine, it can be 4 or more times slower. You can try setting python emb_dim = 50 hidden_dim = 100 to reduce the time it takes to run on a CPU, but the model will not fit as well as when the hidden and embedding dimension are larger. End of explanation """ # load dictionaries query_wl = [line.rstrip('\n') for line in open(data['query']['file'])] slots_wl = [line.rstrip('\n') for line in open(data['slots']['file'])] query_dict = {query_wl[i]:i for i in range(len(query_wl))} slots_dict = {slots_wl[i]:i for i in range(len(slots_wl))} # let's run a sequence through seq = 'BOS flights from new york to seattle EOS' w = [query_dict[w] for w in seq.split()] # convert to word indices print(w) onehot = np.zeros([len(w),len(query_dict)], np.float32) for t in range(len(w)): onehot[t,w[t]] = 1 #x = C.sequence.input_variable(vocab_size) pred = z(x).eval({x:[onehot]})[0] print(pred.shape) best = np.argmax(pred,axis=1) print(best) list(zip(seq.split(),[slots_wl[s] for s in best])) """ Explanation: The following block of code illustrates how to evaluate a single sequence. Additionally we show how one can pass in the information using NumPy arrays. End of explanation """ # Your task: Add lookahead def create_model(): with C.layers.default_options(initial_state=0.1): return C.layers.Sequential([ C.layers.Embedding(emb_dim), C.layers.Recurrence(C.layers.LSTM(hidden_dim), go_backwards=False), C.layers.Dense(num_labels) ]) # Enable these when done: #z = create_model() #do_train_test() """ Explanation: A Word About Sequential() Before jumping to the tasks, let's have a look again at the model we just ran. The model is described in what we call function-composition style. python Sequential([ Embedding(emb_dim), Recurrence(LSTM(hidden_dim), go_backwards=False), Dense(num_labels) ]) You may be familiar with the "sequential" notation from other neural-network toolkits. If not, Sequential() is a powerful operation that, in a nutshell, allows to compactly express a very common situation in neural networks where an input is processed by propagating it through a progression of layers. Sequential() takes an list of functions as its argument, and returns a new function that invokes these functions in order, each time passing the output of one to the next. For example, python FGH = Sequential ([F,G,H]) y = FGH (x) means the same as y = H(G(F(x))) This is known as "function composition", and is especially convenient for expressing neural networks, which often have this form: +-------+ +-------+ +-------+ x --&gt;| F |--&gt;| G |--&gt;| H |--&gt; y +-------+ +-------+ +-------+ Coming back to our model at hand, the Sequential expression simply says that our model has this form: +-----------+ +----------------+ +------------+ x --&gt;| Embedding |--&gt;| Recurrent LSTM |--&gt;| DenseLayer |--&gt; y +-----------+ +----------------+ +------------+ Task 1: Add a Lookahead Our recurrent model suffers from a structural deficit: Since the recurrence runs from left to right, the decision for a slot label has no information about upcoming words. The model is a bit lopsided. Your task will be to modify the model such that the input to the recurrence consists not only of the current word, but also of the next one (lookahead). Your solution should be in function-composition style. Hence, you will need to write a Python function that does the following: takes no input arguments creates a placeholder (sequence) variable computes the "next value" in this sequence using the sequence.future_value() operation and concatenates the current and the next value into a vector of twice the embedding dimension using splice() and then insert this function into Sequential()'s list right after the embedding layer. End of explanation """ # Your task: Add bidirectional recurrence def create_model(): with C.layers.default_options(initial_state=0.1): return C.layers.Sequential([ C.layers.Embedding(emb_dim), C.layers.Recurrence(C.layers.LSTM(hidden_dim), go_backwards=False), C.layers.Dense(num_labels) ]) # Enable these when done: #z = create_model() #do_train_test() """ Explanation: Task 2: Bidirectional Recurrent Model Aha, knowledge of future words help. So instead of a one-word lookahead, why not look ahead until all the way to the end of the sentence, through a backward recurrence? Let us create a bidirectional model! Your task is to implement a new layer that performs both a forward and a backward recursion over the data, and concatenates the output vectors. Note, however, that this differs from the previous task in that the bidirectional layer contains learnable model parameters. In function-composition style, the pattern to implement a layer with model parameters is to write a factory function that creates a function object. A function object, also known as functor, is an object that is both a function and an object. Which means nothing else that it contains data yet still can be invoked as if it was a function. For example, Dense(outDim) is a factory function that returns a function object that contains a weight matrix W, a bias b, and another function to compute input @ W + b. (This is using Python 3.5 notation for matrix multiplication. In Numpy syntax it is input.dot(W) + b). E.g. saying Dense(1024) will create this function object, which can then be used like any other function, also immediately: Dense(1024)(x). Let's look at an example for further clarity: Let us implement a new layer that combines a linear layer with a subsequent batch normalization. To allow function composition, the layer needs to be realized as a factory function, which could look like this: python def DenseLayerWithBN(dim): F = Dense(dim) G = BatchNormalization() x = placeholder() apply_x = G(F(x)) return apply_x Invoking this factory function will create F, G, x, and apply_x. In this example, F and G are function objects themselves, and apply_x is the function to be applied to the data. Thus, e.g. calling DenseLayerWithBN(1024) will create an object containing a linear-layer function object called F, a batch-normalization function object G, and apply_x which is the function that implements the actual operation of this layer using F and G. It will then return apply_x. To the outside, apply_x looks and behaves like a function. Under the hood, however, apply_x retains access to its specific instances of F and G. Now back to our task at hand. You will now need to create a factory function, very much like the example above. You shall create a factory function that creates two recurrent layer instances (one forward, one backward), and then defines an apply_x function which applies both layer instances to the same x and concatenate the two results. Alright, give it a try! To know how to realize a backward recursion in CNTK, please take a hint from how the forward recursion is done. Please also do the following: * remove the one-word lookahead you added in the previous task, which we aim to replace; and * make sure each LSTM is using hidden_dim//2 outputs to keep the total number of model parameters limited. End of explanation """
tommyogden/maxwellbloch
docs/examples/mbs-two-weak-square-decay.ipynb
mit
mb_solve_json = """ { "atom": { "decays": [ { "channels": [[0, 1]], "rate": 1.0 } ], "energies": [], "fields": [ { "coupled_levels": [[0, 1]], "detuning": 0.0, "rabi_freq": 1.0e-3, "rabi_freq_t_args": { "ampl": 1.0, "on": -0.5, "off": 0.5 }, "rabi_freq_t_func": "square" } ], "num_states": 2 }, "t_min": -2.0, "t_max": 10.0, "t_steps": 100, "z_min": -0.2, "z_max": 1.2, "z_steps": 20, "interaction_strengths": [ 1.0 ] } """ from maxwellbloch import mb_solve mbs = mb_solve.MBSolve().from_json_str(mb_solve_json) Omegas_zt, states_zt = mbs.mbsolve() import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns import numpy as np sns.set_style('darkgrid') """ Explanation: Two-Level: Weak Square Pulse with Decay Define and Solve End of explanation """ from scipy import interpolate plt.plot(mbs.tlist, Omegas_zt[0,0].real/(2*np.pi)) half_max = np.max(Omegas_zt[0,0].real/(2*np.pi))/2 spline = interpolate.UnivariateSpline(mbs.tlist, (Omegas_zt[0,0].real/(2*np.pi)-half_max), s=0) r1, r2 = spline.roots() # draw line at FWHM plt.hlines(y=half_max, xmin=r1, xmax=r2, linestyle='dotted') plt.annotate('FWHM: ' + '%0.2f'%(r2 - r1), xy=((r2+r1)/2, half_max), xycoords='data', xytext=(25, 0), textcoords='offset points'); """ Explanation: Check the Input Pulse Profile We'll just confirm that the input pulse has the profile that we want: a Gaussian with an amplitude of $1.0 \Gamma$ and a full-width at half maximum (FWHM) of $1.0 \tau$. End of explanation """ fig = plt.figure(1, figsize=(16, 6)) ax = fig.add_subplot(111) cmap_range = np.linspace(0.0, 1.0e-3, 11) cf = ax.contourf(mbs.tlist, mbs.zlist, np.abs(mbs.Omegas_zt[0]/(2*np.pi)), cmap_range, cmap=plt.cm.Blues) ax.set_title('Rabi Frequency ($\Gamma / 2\pi $)') ax.set_xlabel('Time ($1/\Gamma$)') ax.set_ylabel('Distance ($L$)') for y in [0.0, 1.0]: ax.axhline(y, c='grey', lw=1.0, ls='dotted') plt.colorbar(cf); """ Explanation: Field Output End of explanation """
sthuggins/phys202-2015-work
assignments/assignment03/.ipynb_checkpoints/NumpyEx01-checkpoint.ipynb
mit
import numpy as np %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns import antipackage import github.ellisonbg.misc.vizarray as va """ Explanation: Numpy Exercise 1 Imports End of explanation """ def checkerboard(size): """Return a 2d checkboard of 0.0 and 1.0 as a NumPy array""" # YOUR CODE HERE raise NotImplementedError() a = checkerboard(4) assert a[0,0]==1.0 assert a.sum()==8.0 assert a.dtype==np.dtype(float) assert np.all(a[0,0:5:2]==1.0) assert np.all(a[1,0:5:2]==0.0) b = checkerboard(5) assert b[0,0]==1.0 assert b.sum()==13.0 assert np.all(b.ravel()[0:26:2]==1.0) assert np.all(b.ravel()[1:25:2]==0.0) """ Explanation: Checkerboard Write a Python function that creates a square (size,size) 2d Numpy array with the values 0.0 and 1.0: Your function should work for both odd and even size. The 0,0 element should be 1.0. The dtype should be float. End of explanation """ # YOUR CODE HERE raise NotImplementedError() assert True """ Explanation: Use vizarray to visualize a checkerboard of size=20 with a block size of 10px. End of explanation """ # YOUR CODE HERE raise NotImplementedError() assert True """ Explanation: Use vizarray to visualize a checkerboard of size=27 with a block size of 5px. End of explanation """
ingmarschuster/distributions
Transforms_demo.ipynb
lgpl-3.0
from __future__ import division, print_function, absolute_import import numpy as np import scipy as sp import scipy.stats as stats from numpy import exp, log, sqrt from scipy.misc import logsumexp import distributions as dist, distributions.transform as tr import matplotlib.pyplot as plt def apply_to_mg(func, *mg): #apply a function to points on a meshgrid x = np.vstack([e.flat for e in mg]).T return np.array([func(i) for i in x]).reshape(mg[0].shape) def cont(f, coord, grid_density=100): fig = plt.figure() xx = np.linspace(coord[0][0], coord[0][1], grid_density) yy = np.linspace(coord[1][0], coord[1][1], grid_density) X, Y = np.meshgrid(xx,yy) Z = apply_to_mg(f, X, Y) plt.contour(X,Y,exp(Z)) def visualize(f, xin, yin, coord): fig = plt.figure() # plt.scatter(xin, yin) xx = np.linspace(coord[0][0], coord[0][1],100) yy = np.linspace(coord[1][0], coord[1][1],100) X, Y = np.meshgrid(xx,yy) Z = apply_to_mg(f, X, Y) plt.contour(X,Y,exp(Z)) def vis_dist(d, nsamps, coord): samps = d.rvs(nsamps).T visualize(d.logpdf, samps[0], samps[1], coord) try: for x in [np.array((0,8)), np.array((-1,10))]: g = d.logpdf_grad(x) print('d',g) g = x+g plt.arrow(x[0],x[1], g[0], g[1], head_width=0.05, head_length=0.1, fc='k', ec='k') except: pass plt.show() """ Explanation: Ingmars change of variables code First of, some plotting code End of explanation """ vis_dist(tr.Separate(dist.mvnorm(np.zeros(2), np.eye(2)), [0, ], 0., 0.3), 1000, [[-2,2], [-2,2]]) vis_dist(tr.Separate(dist.mvnorm(np.zeros(2), np.eye(2)), [0, 1], 0., 0.3), 1000, [[-2,2], [-2,2]]) """ Explanation: Now lets separate a 2D Gaussian into 2 or 4 modes End of explanation """ vis_dist(tr.Separate(dist.mvnorm(np.zeros(2), np.eye(2)), [0, 1], 0., 0.3), 1000, [[-2,2], [-2,2]]) vis_dist(tr.Separate(dist.mvnorm(np.zeros(2), np.eye(2)), [0, 1], 0., 0.5), 1000, [[-2,2], [-2,2]]) """ Explanation: Playing around with the power parameter of the Separate transform increases and decreases separation End of explanation """ np.random.seed(1) plt.scatter(*dist.mvnorm(np.zeros(2), np.eye(2)).rvs(100).T) np.random.seed(1) vis_dist(tr.Separate(tr.Separate(dist.mvnorm(np.zeros(2), np.eye(2)), [0, 1], 0., 0.3), [0, 1], 0., 1./0.3), 100, [[-2,2], [-2,2]]) np.random.seed() """ Explanation: And the transformation is fully invertible: we can get back to the gaussian we started with (because of a bug in the implementation, the contours are messed up for the second plot) End of explanation """ vis_dist(tr.Softplus(dist.mvnorm(np.zeros(2), np.eye(2)), [0, 1]), 1000, [[-2,2], [-2,2]]) vis_dist(tr.Power(dist.mvnorm(np.zeros(2), np.eye(2)), np.array([1]), 1.5, 0, 2), 1000, [[-7,7], [-5,10]]) vis_dist(tr.TimesFirst(dist.mvnorm(np.zeros(2), np.eye(2)), [ 1]), 1000, [[-4,4], [-2,2]]) #vis_dist(tr.DivByFirst(dist.mvnorm(np.zeros(2), np.eye(2)), [ 1]), 1000, [[-2,2], [-2,2]]) """ Explanation: Demonstration of some of the other implemented transforms (Softplus, power, multiplication of differnt dimensions) End of explanation """
dmitrip/PML
performance_tests/num_clumps.ipynb
apache-2.0
S = 10_000 # support set size p = np.ones(S)/S # distribution # iterate over S unknown, known S_known_list = [False,True] # make sample size list n_list n_min = np.sqrt(S) n_max = 100*S num_n_points = 21 n_list = np.logspace(np.log10(n_min), np.log10(n_max), num_n_points).astype(int) num_trials = 100 # number of trials for each value of n record_num_clumps = {S_known: [[] for n in n_list] for S_known in S_known_list} # num clumps by trial, by n record_inferred_S = {S_known: [[] for n in n_list] for S_known in S_known_list} # inferred support set size by trial, by n record_has_continuous_part = {S_known: [[] for n in n_list] for S_known in S_known_list} # true iff inferred distribution has continuous part for S_known in S_known_list: print('S known:',S_known) for (i,n) in enumerate(n_list): print('n:',n) for trial_num in range(num_trials): # draw empirical histogram hist = util.draw_histogram_from_multinomial(p, n) S_empirical = np.count_nonzero(hist) # get approximate PML if S_known: (p_pml, F0, _, _) = pml.PML_distribution_approximate(hist, S, warn_on_continuous_part=False) else: (p_pml, F0, _, _) = pml.PML_distribution_approximate(hist, warn_on_continuous_part=False) # record num clumps record_num_clumps[S_known][i].append(np.unique(p_pml).size) # record estimated support set size if not np.isinf(F0): record_inferred_S[S_known][i].append(S_empirical + F0) # record whether has continuous part record_has_continuous_part[S_known][i].append(np.isinf(F0)) """ Explanation: test 1 distribution $p$ uniform on ${1,...,S}$ vary $n$ (sample size) between $\sqrt{S}$ and $100S$ $S$ known case, $S$ unknown case plot vs. $n$: number of clumps in approximate PML distribution inferred support set size (for $S$ unknown case) probability to have "continuous" part in inferred distribution get data End of explanation """ colors_by_num_clumps = {1:[0,0,0], 2:[0.6,0,0], 3:[1,0,0]} for S_known in S_known_list: fig = plt.figure(figsize=(15,5)) # plot probability to have certain number of clumps # case (1,2,>=3) for (num_clumps, strict_equal) in zip([1,2,3],[True,True,False]): if strict_equal: y = np.array([np.sum(np.array(record_num_clumps[S_known][i])==num_clumps)/num_trials for i in range(n_list.size)]) label = 'num clumps == {}'.format(num_clumps) else: y = np.array([np.sum(np.array(record_num_clumps[S_known][i])>=num_clumps)/num_trials for i in range(n_list.size)]) label = 'num clumps >= {}'.format(num_clumps) yerr = 2*np.sqrt(y*(1-y)/num_trials) plt.errorbar(n_list,y,yerr=yerr,fmt='o-',color=colors_by_num_clumps[num_clumps], label=label) if not S_known: # plot fraction of trials that result in approx. PML distribution having continuous part y = np.array([np.count_nonzero(np.array(record_has_continuous_part[S_known][i]))/num_trials for i in range(n_list.size)]) yerr = 2*np.sqrt(y*(1-y)/num_trials) plt.errorbar(n_list,y,yerr=yerr,fmt='x:',color=[0,0,1], label='has continuous part') plt.plot([S, S], plt.gca().get_ylim(), '--', label='S = {}'.format(S), color=[0,0.5,0]) plt.plot(plt.gca().get_xlim(),[1,1],'-',linewidth=1, color=[0.5,0.5,0.5]) plt.plot(plt.gca().get_xlim(),[0,0],'-',linewidth=1, color=[0.5,0.5,0.5]) plt.gca().set_xscale('log') plt.legend(fontsize=20) plt.title('$S$ {}known ($S$ = {})\nFraction of trials vs. n'.format({True:'',False:'un'}[S_known], S),fontsize=20) plt.xlabel('$n$',fontsize=20) plt.show() fig = plt.figure(figsize=(15,5)) # plot mean, standard error of inferred support set size y = np.array([np.mean(record_inferred_S[False][i]) for i in range(n_list.size)]) yerr = np.array([2*np.std(record_inferred_S[False][i]) for i in range(n_list.size)]) plt.errorbar(n_list, y, yerr=yerr, fmt='ro', label='mean$\pm 2 \sigma$') plt.plot(plt.gca().get_xlim(),[S,S],'--', label='$S$={}'.format(S),color=[0,0.5,0]) plt.gca().set_xscale('log') plt.legend(fontsize=20) plt.title('Inferred $S$ distribution vs. $n$\n($S$ = {})'.format(S),fontsize=20) plt.xlabel('$n$',fontsize=20) plt.show() """ Explanation: plot End of explanation """ S = 10_000 # support set size p = np.append(np.ones(S)/S, np.zeros(S)) # distribution # make sample size list n_list n_min = np.sqrt(S) n_max = 100*S num_n_points = 21 n_list = np.logspace(np.log10(n_min), np.log10(n_max), num_n_points).astype(int) num_trials = 100 # number of trials for each value of n record_num_clumps = [[] for n in n_list] # num clumps by trial, by n for (i,n) in enumerate(n_list): print('n:',n) for trial_num in range(num_trials): # draw empirical histogram hist = util.draw_histogram_from_multinomial(p, n) S_empirical = np.count_nonzero(hist) # get approximate PML (p_pml, F0, _, _) = pml.PML_distribution_approximate(hist, 2*S, warn_on_continuous_part=False) # record num clumps record_num_clumps[i].append(np.unique(p_pml).size) """ Explanation: test 2 distribution $p = (1/S,\ldots,1/S,0,\ldots,0)$, alphabet size $2S$ vary $n$ (sample size) between $\sqrt{S}$ and $100S$ plot vs. $n$: number of clumps in approximate PML distribution get data End of explanation """ colors_by_num_clumps = {1:[0,0,0], 2:[0.6,0,0], 3:[1,0,0]} fig = plt.figure(figsize=(15,5)) # plot probability to have certain number of clumps # case (1,2,>=3) for (num_clumps, strict_equal) in zip([1,2,3],[True,True,False]): if strict_equal: y = np.array([np.sum(np.array(record_num_clumps[i])==num_clumps)/num_trials for i in range(n_list.size)]) label = 'num clumps == {}'.format(num_clumps) else: y = np.array([np.sum(np.array(record_num_clumps[i])>=num_clumps)/num_trials for i in range(n_list.size)]) label = 'num clumps >= {}'.format(num_clumps) yerr = 2*np.sqrt(y*(1-y)/num_trials) plt.errorbar(n_list,y,yerr=yerr,fmt='o-',color=colors_by_num_clumps[num_clumps], label=label) plt.plot([S, S], plt.gca().get_ylim(), '--', label='S = {}'.format(S), color=[0,0.5,0]) plt.plot(plt.gca().get_xlim(),[1,1],'-',linewidth=1, color=[0.5,0.5,0.5]) plt.plot(plt.gca().get_xlim(),[0,0],'-',linewidth=1, color=[0.5,0.5,0.5]) plt.gca().set_xscale('log') plt.legend(fontsize=20) plt.title('Fraction of trials vs. n',fontsize=20) plt.xlabel('$n$',fontsize=20) plt.show() """ Explanation: plot End of explanation """
MingChen0919/learning-apache-spark
notebooks/01-data-strcture/1.1-rdd.ipynb
mit
# from a list rdd = sc.parallelize([1,2,3]) rdd.collect() # from a tuple rdd = sc.parallelize(('cat', 'dog', 'fish')) rdd.collect() # from a list of tuple list_t = [('cat', 'dog', 'fish'), ('orange', 'apple')] rdd = sc.parallelize(list_t) rdd.collect() # from a set s = {'cat', 'dog', 'fish', 'cat', 'dog', 'dog'} rdd = sc.parallelize(s) rdd.collect() """ Explanation: RDD object The class pyspark.SparkContext creates a client which connects to a Spark cluster. This client can be used to create an RDD object. There are two methods from this class for directly creating RDD objects: * parallelize() * textFile() parallelize() parallelize() distribute a local python collection to form an RDD. Common built-in python collections include dist, list, tuple or set. Examples: End of explanation """ # from a dict d = { 'a': 100, 'b': 200, 'c': 300 } rdd = sc.parallelize(d) rdd.collect() """ Explanation: When it is a dict, only the keys are used to form the RDD. End of explanation """ # read a csv file rdd = sc.textFile('../../data/mtcars.csv') rdd.take(5) # read a txt file rdd = sc.textFile('../../data/twitter.txt') rdd.take(5) """ Explanation: textFile() The textFile() function reads a text file and returns it as an RDD of strings. Usually, you will need to apply some map functions to transform each elements of the RDD to some data structure/type that is suitable for data analysis. When using textFile(), each line of the text file becomes an element in the resulting RDD. Examples: End of explanation """
mne-tools/mne-tools.github.io
0.13/_downloads/plot_eeg_erp.ipynb
bsd-3-clause
import mne from mne.datasets import sample """ Explanation: EEG processing and Event Related Potentials (ERPs) For a generic introduction to the computation of ERP and ERF see tut_epoching_and_averaging. Here we cover the specifics of EEG, namely: - setting the reference - using standard montages :func:`mne.channels.Montage` - Evoked arithmetic (e.g. differences) End of explanation """ data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' raw = mne.io.read_raw_fif(raw_fname, add_eeg_ref=False, preload=True) raw.set_eeg_reference() # set EEG average reference """ Explanation: Setup for reading the raw data End of explanation """ raw.pick_types(meg=False, eeg=True, eog=True) """ Explanation: Let's restrict the data to the EEG channels End of explanation """ print(raw.info) """ Explanation: By looking at the measurement info you will see that we have now 59 EEG channels and 1 EOG channel End of explanation """ raw.set_channel_types(mapping={'EOG 061': 'eeg'}) print(raw.info) """ Explanation: In practice it's quite common to have some EEG channels that are actually EOG channels. To change a channel type you can use the :func:mne.io.Raw.set_channel_types method. For example to treat an EOG channel as EEG you can change its type using End of explanation """ raw.rename_channels(mapping={'EOG 061': 'EOG'}) """ Explanation: And to change the nameo of the EOG channel End of explanation """ raw.set_channel_types(mapping={'EOG': 'eog'}) """ Explanation: Let's reset the EOG channel back to EOG type. End of explanation """ print(raw.info['chs'][0]['loc']) """ Explanation: The EEG channels in the sample dataset already have locations. These locations are available in the 'loc' of each channel description. For the first channel we get End of explanation """ raw.plot_sensors() raw.plot_sensors('3d') # in 3D """ Explanation: And it's actually possible to plot the channel locations using the :func:mne.io.Raw.plot_sensors method End of explanation """ montage = mne.channels.read_montage('standard_1020') print(montage) """ Explanation: Setting EEG montage In the case where your data don't have locations you can set them using a :func:mne.channels.Montage. MNE comes with a set of default montages. To read one of them do: End of explanation """ raw_no_ref, _ = mne.io.set_eeg_reference(raw, []) """ Explanation: To apply a montage on your data use the :func:mne.io.set_montage function. Here don't actually call this function as our demo dataset already contains good EEG channel locations. Next we'll explore the definition of the reference. Setting EEG reference Let's first remove the reference from our Raw object. This explicitly prevents MNE from adding a default EEG average reference required for source localization. End of explanation """ reject = dict(eeg=180e-6, eog=150e-6) event_id, tmin, tmax = {'left/auditory': 1}, -0.2, 0.5 events = mne.read_events(event_fname) epochs_params = dict(events=events, event_id=event_id, tmin=tmin, tmax=tmax, reject=reject, add_eeg_ref=False) evoked_no_ref = mne.Epochs(raw_no_ref, **epochs_params).average() del raw_no_ref # save memory title = 'EEG Original reference' evoked_no_ref.plot(titles=dict(eeg=title)) evoked_no_ref.plot_topomap(times=[0.1], size=3., title=title) """ Explanation: We next define Epochs and compute an ERP for the left auditory condition. End of explanation """ raw_car, _ = mne.io.set_eeg_reference(raw) evoked_car = mne.Epochs(raw_car, **epochs_params).average() del raw_car # save memory title = 'EEG Average reference' evoked_car.plot(titles=dict(eeg=title)) evoked_car.plot_topomap(times=[0.1], size=3., title=title) """ Explanation: Average reference: This is normally added by default, but can also be added explicitly. End of explanation """ raw_custom, _ = mne.io.set_eeg_reference(raw, ['EEG 001', 'EEG 002']) evoked_custom = mne.Epochs(raw_custom, **epochs_params).average() del raw_custom # save memory title = 'EEG Custom reference' evoked_custom.plot(titles=dict(eeg=title)) evoked_custom.plot_topomap(times=[0.1], size=3., title=title) """ Explanation: Custom reference: Use the mean of channels EEG 001 and EEG 002 as a reference End of explanation """ event_id = {'left/auditory': 1, 'right/auditory': 2, 'left/visual': 3, 'right/visual': 4} epochs_params = dict(events=events, event_id=event_id, tmin=tmin, tmax=tmax, reject=reject) epochs = mne.Epochs(raw, **epochs_params) print(epochs) """ Explanation: Evoked arithmetics Trial subsets from Epochs can be selected using 'tags' separated by '/'. Evoked objects support basic arithmetic. First, we create an Epochs object containing 4 conditions. End of explanation """ left, right = epochs["left"].average(), epochs["right"].average() # create and plot difference ERP mne.combine_evoked([left, -right], weights='equal').plot_joint() """ Explanation: Next, we create averages of stimulation-left vs stimulation-right trials. We can use basic arithmetic to, for example, construct and plot difference ERPs. End of explanation """ aud_l = epochs["auditory", "left"].average() aud_r = epochs["auditory", "right"].average() vis_l = epochs["visual", "left"].average() vis_r = epochs["visual", "right"].average() all_evokeds = [aud_l, aud_r, vis_l, vis_r] print(all_evokeds) """ Explanation: This is an equal-weighting difference. If you have imbalanced trial numbers, you could also consider either equalizing the number of events per condition (using :meth:epochs.equalize_epochs_counts &lt;mne.Epochs.equalize_event_counts). As an example, first, we create individual ERPs for each condition. End of explanation """ all_evokeds = [epochs[cond].average() for cond in sorted(event_id.keys())] print(all_evokeds) # Then, we construct and plot an unweighted average of left vs. right trials # this way, too: mne.combine_evoked(all_evokeds, weights=(0.25, -0.25, 0.25, -0.25)).plot_joint() """ Explanation: This can be simplified with a Python list comprehension: End of explanation """ # If they are stored in a list, they can be easily averaged, for example, # for a grand average across subjects (or conditions). grand_average = mne.grand_average(all_evokeds) mne.write_evokeds('/tmp/tmp-ave.fif', all_evokeds) # If Evokeds objects are stored in a dictionary, they can be retrieved by name. all_evokeds = dict((cond, epochs[cond].average()) for cond in event_id) print(all_evokeds['left/auditory']) # Besides for explicit access, this can be used for example to set titles. for cond in all_evokeds: all_evokeds[cond].plot_joint(title=cond) """ Explanation: Often, it makes sense to store Evoked objects in a dictionary or a list - either different conditions, or different subjects. End of explanation """
aleereza/twitterbot
twitterbot.ipynb
apache-2.0
import tweepy import time import sys import pickle import datetime """ Explanation: Twitterbot Here I am going to create a Twitter Bot step by step. First I should create a Twitter application on https://dev.twitter.com/ Install tweepy: #pip install tweepy End of explanation """ path="./data/" filename="auth_data" fileobject=open(path+filename,"rb") auth_data=pickle.load(fileobject) fileobject.close() #account=auth_data1['account'] CONSUMER_KEY=auth_data1['CONSUMER_KEY'] CONSUMER_SECRET=auth_data1['CONSUMER_SECRET'] ACCESS_KEY=auth_data1['ACCESS_KEY'] ACCESS_SECRET=auth_data1['ACCESS_SECRET'] auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) auth.set_access_token(ACCESS_KEY, ACCESS_SECRET) api = tweepy.API(auth) #Error handling if (not api): print ("Problem connecting to API") """ Explanation: Twitter application information: End of explanation """ now = datetime.datetime.now() #time1=datetime.datetime(2017,6,14,0) #time2=datetime.datetime(2017,6,15,0) for uid in users: user=api.get_user(uid) #requestnum=requestnum+1 #print(t.user.screen_name,uid) if (not user.protected and (now-user.created_at).days>30): #print("not protected and older than 30 days") for t in tweepy.Cursor(api.user_timeline,user_id=uid,include_entities=True).items(20): if (now-t.created_at).days>1: break elif (t.favorite_count>1000): print(t.user.screen_name) print(t.text) print ("https://twitter.com/statuses/"+str(t.id)) a=datetime.datetime(2017,6,14,0) print(a) """ Explanation: Search Parameters End of explanation """
GoogleCloudPlatform/training-data-analyst
courses/machine_learning/deepdive2/machine_learning_in_the_enterprise/solutions/gapic-vizier-multi-objective-optimization.ipynb
apache-2.0
# Setup your dependencies import os # The Google Cloud Notebook product has specific requirements IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version") # Google Cloud Notebook requires dependencies to be installed with '--user' USER_FLAG = "" if IS_GOOGLE_CLOUD_NOTEBOOK: USER_FLAG = "--user" # Upgrade the specified package to the newest available version ! pip install {USER_FLAG} --upgrade google-cloud-aiplatform import os if not os.getenv("IS_TESTING"): # Restart the kernel after pip installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) """ Explanation: Using Vertex Vizier to Optimize Multiple Objectives Overview In this lab, you will use Vertex Vizier to perform multi-objective optimization. Multi-objective optimization is concerned with mathematical optimization problems involving more than one objective function to be optimized simultaneously Objective The goal is to minimize the objective metric: y1 = r*sin(theta) and simultaneously maximize the objective metric: y2 = r*cos(theta) that you will evaluate over the parameter space: r in [0,1], theta in [0, pi/2] Introduction In this notebook, you will use Vertex Vizier multi-objective optimization. Multi-objective optimization is concerned with mathematical optimization problems involving more than one objective function to be optimized simultaneously. Each learning objective will correspond to a #TODO in this student lab notebook -- try to complete this notebook first and then review the solution notebook. Make sure to enable the Vertex AI API Install Vertex AI library Download and install Vertex AI library. End of explanation """ # Import necessary libraries import datetime import json from google.cloud import aiplatform_v1beta1 """ Explanation: Import libraries and define constants End of explanation """ # Fill in your project ID and region REGION = "us-central1" # @param {type:"string"} PROJECT_ID = "qwiklabs-gcp-00-866bdf7714fe" # @param {type:"string"} # These will be automatically filled in. STUDY_DISPLAY_NAME = "{}_study_{}".format( PROJECT_ID.replace("-", ""), datetime.datetime.now().strftime("%Y%m%d_%H%M%S") ) # @param {type: 'string'} ENDPOINT = REGION + "-aiplatform.googleapis.com" PARENT = "projects/{}/locations/{}".format(PROJECT_ID, REGION) print("ENDPOINT: {}".format(ENDPOINT)) print("REGION: {}".format(REGION)) print("PARENT: {}".format(PARENT)) # If you don't know your project ID, you might be able to get your project ID # using gcloud command by executing the second cell below. if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "qwiklabs-gcp-00-866bdf7714fe": # Get your GCP project id from gcloud shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_ID """ Explanation: Tutorial This section defines some parameters and util methods to call Vertex Vizier APIs. Please fill in the following information to get started. End of explanation """ # Parameter Configuration param_r = {"parameter_id": "r", "double_value_spec": {"min_value": 0, "max_value": 1}} param_theta = { "parameter_id": "theta", "double_value_spec": {"min_value": 0, "max_value": 1.57}, } # TODO # Objective Metrics metric_y1 = {"metric_id": "y1", "goal": "MINIMIZE"} # TODO # Objective Metrics metric_y2 = {"metric_id": "y2", "goal": "MAXIMIZE"} # Put it all together in a study configuration study = { "display_name": STUDY_DISPLAY_NAME, "study_spec": { "algorithm": "RANDOM_SEARCH", "parameters": [ param_r, param_theta, ], "metrics": [metric_y1, metric_y2], }, } print(json.dumps(study, indent=2, sort_keys=True)) """ Explanation: Create the study configuration The following is a sample study configuration, built as a hierarchical python dictionary. It is already filled out. Run the cell to configure the study. End of explanation """ # TODO # Create the study using study configuration and send request through VizierServiceClient vizier_client = aiplatform_v1beta1.VizierServiceClient( client_options=dict(api_endpoint=ENDPOINT) ) study = vizier_client.create_study(parent=PARENT, study=study) STUDY_ID = study.name print("STUDY_ID: {}".format(STUDY_ID)) """ Explanation: Create the study Next, create the study, which you will subsequently run to optimize the two objectives. End of explanation """ import math # r * sin(theta) def Metric1Evaluation(r, theta): """Evaluate the first metric on the trial.""" return r * math.sin(theta) # r * cos(theta) def Metric2Evaluation(r, theta): """Evaluate the second metric on the trial.""" return r * math.cos(theta) def CreateMetrics(trial_id, r, theta): print(("=========== Start Trial: [{}] =============").format(trial_id)) # TODO # Evaluate both objective metrics for this trial y1 = Metric1Evaluation(r, theta) y2 = Metric2Evaluation(r, theta) print( "[r = {}, theta = {}] => y1 = r*sin(theta) = {}, y2 = r*cos(theta) = {}".format( r, theta, y1, y2 ) ) metric1 = {"metric_id": "y1", "value": y1} metric2 = {"metric_id": "y2", "value": y2} # Return the results for this trial return [metric1, metric2] """ Explanation: Metric evaluation functions Next, define some functions to evaluate the two objective metrics. End of explanation """ client_id = "client1" # @param {type: 'string'} suggestion_count_per_request = 5 # @param {type: 'integer'} max_trial_id_to_stop = 4 # @param {type: 'integer'} print("client_id: {}".format(client_id)) print("suggestion_count_per_request: {}".format(suggestion_count_per_request)) print("max_trial_id_to_stop: {}".format(max_trial_id_to_stop)) """ Explanation: Set configuration parameters for running trials client_id: The identifier of the client that is requesting the suggestion. If multiple SuggestTrialsRequests have the same client_id, the service will return the identical suggested trial if the trial is PENDING, and provide a new trial if the last suggested trial was completed. suggestion_count_per_request: The number of suggestions (trials) requested in a single request. max_trial_id_to_stop: The number of trials to explore before stopping. It is set to 4 to shorten the time to run the code, so don't expect convergence. For convergence, it would likely need to be about 20 (a good rule of thumb is to multiply the total dimensionality by 10). End of explanation """ trial_id = 0 while int(trial_id) < max_trial_id_to_stop: suggest_response = vizier_client.suggest_trials( { "parent": STUDY_ID, "suggestion_count": suggestion_count_per_request, "client_id": client_id, } ) for suggested_trial in suggest_response.result().trials: trial_id = suggested_trial.name.split("/")[-1] trial = vizier_client.get_trial({"name": suggested_trial.name}) if trial.state in ["COMPLETED", "INFEASIBLE"]: continue for param in trial.parameters: if param.parameter_id == "r": r = param.value elif param.parameter_id == "theta": theta = param.value print("Trial : r is {}, theta is {}.".format(r, theta)) # TODO # Store your measurement and send the request vizier_client.add_trial_measurement( { "trial_name": suggested_trial.name, "measurement": { # TODO "metrics": CreateMetrics(suggested_trial.name, r, theta) }, } ) response = vizier_client.complete_trial( {"name": suggested_trial.name, "trial_infeasible": False} ) """ Explanation: Run Vertex Vizier trials Run the trials. End of explanation """ # TODO # List all the pareto-optimal trails optimal_trials = vizier_client.list_optimal_trials({"parent": STUDY_ID}) print("optimal_trials: {}".format(optimal_trials)) """ Explanation: List the optimal solutions list_optimal_trials returns the pareto-optimal Trials for multi-objective Study or the optimal Trials for single-objective Study. In the case, we define mutliple-objective in previeous steps, pareto-optimal trials will be returned. End of explanation """ vizier_client.delete_study({"name": STUDY_ID}) """ Explanation: Cleaning up To clean up all Google Cloud resources used in this project, you can delete the Google Cloud project you used for the tutorial. You can also manually delete resources that you created by running the following code. End of explanation """
keras-team/keras-io
examples/vision/ipynb/convmixer.ipynb
apache-2.0
from tensorflow.keras import layers from tensorflow import keras import matplotlib.pyplot as plt import tensorflow_addons as tfa import tensorflow as tf import numpy as np """ Explanation: Image classification with ConvMixer Author: Sayak Paul<br> Date created: 2021/10/12<br> Last modified: 2021/10/12<br> Description: An all-convolutional network applied to patches of images. Introduction Vision Transformers (ViT; Dosovitskiy et al.) extract small patches from the input images, linearly project them, and then apply the Transformer (Vaswani et al.) blocks. The application of ViTs to image recognition tasks is quickly becoming a promising area of research, because ViTs eliminate the need to have strong inductive biases (such as convolutions) for modeling locality. This presents them as a general computation primititive capable of learning just from the training data with as minimal inductive priors as possible. ViTs yield great downstream performance when trained with proper regularization, data augmentation, and relatively large datasets. In the Patches Are All You Need paper (note: at the time of writing, it is a submission to the ICLR 2022 conference), the authors extend the idea of using patches to train an all-convolutional network and demonstrate competitive results. Their architecture namely ConvMixer uses recipes from the recent isotrophic architectures like ViT, MLP-Mixer (Tolstikhin et al.), such as using the same depth and resolution across different layers in the network, residual connections, and so on. In this example, we will implement the ConvMixer model and demonstrate its performance on the CIFAR-10 dataset. To use the AdamW optimizer, we need to install TensorFlow Addons: shell pip install -U -q tensorflow-addons Imports End of explanation """ learning_rate = 0.001 weight_decay = 0.0001 batch_size = 128 num_epochs = 10 """ Explanation: Hyperparameters To keep run time short, we will train the model for only 10 epochs. To focus on the core ideas of ConvMixer, we will not use other training-specific elements like RandAugment (Cubuk et al.). If you are interested in learning more about those details, please refer to the original paper. End of explanation """ (x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data() val_split = 0.1 val_indices = int(len(x_train) * val_split) new_x_train, new_y_train = x_train[val_indices:], y_train[val_indices:] x_val, y_val = x_train[:val_indices], y_train[:val_indices] print(f"Training data samples: {len(new_x_train)}") print(f"Validation data samples: {len(x_val)}") print(f"Test data samples: {len(x_test)}") """ Explanation: Load the CIFAR-10 dataset End of explanation """ image_size = 32 auto = tf.data.AUTOTUNE data_augmentation = keras.Sequential( [layers.RandomCrop(image_size, image_size), layers.RandomFlip("horizontal"),], name="data_augmentation", ) def make_datasets(images, labels, is_train=False): dataset = tf.data.Dataset.from_tensor_slices((images, labels)) if is_train: dataset = dataset.shuffle(batch_size * 10) dataset = dataset.batch(batch_size) if is_train: dataset = dataset.map( lambda x, y: (data_augmentation(x), y), num_parallel_calls=auto ) return dataset.prefetch(auto) train_dataset = make_datasets(new_x_train, new_y_train, is_train=True) val_dataset = make_datasets(x_val, y_val) test_dataset = make_datasets(x_test, y_test) """ Explanation: Prepare tf.data.Dataset objects Our data augmentation pipeline is different from what the authors used for the CIFAR-10 dataset, which is fine for the purpose of the example. End of explanation """ def activation_block(x): x = layers.Activation("gelu")(x) return layers.BatchNormalization()(x) def conv_stem(x, filters: int, patch_size: int): x = layers.Conv2D(filters, kernel_size=patch_size, strides=patch_size)(x) return activation_block(x) def conv_mixer_block(x, filters: int, kernel_size: int): # Depthwise convolution. x0 = x x = layers.DepthwiseConv2D(kernel_size=kernel_size, padding="same")(x) x = layers.Add()([activation_block(x), x0]) # Residual. # Pointwise convolution. x = layers.Conv2D(filters, kernel_size=1)(x) x = activation_block(x) return x def get_conv_mixer_256_8( image_size=32, filters=256, depth=8, kernel_size=5, patch_size=2, num_classes=10 ): """ConvMixer-256/8: https://openreview.net/pdf?id=TVHS5Y4dNvM. The hyperparameter values are taken from the paper. """ inputs = keras.Input((image_size, image_size, 3)) x = layers.Rescaling(scale=1.0 / 255)(inputs) # Extract patch embeddings. x = conv_stem(x, filters, patch_size) # ConvMixer blocks. for _ in range(depth): x = conv_mixer_block(x, filters, kernel_size) # Classification block. x = layers.GlobalAvgPool2D()(x) outputs = layers.Dense(num_classes, activation="softmax")(x) return keras.Model(inputs, outputs) """ Explanation: ConvMixer utilities The following figure (taken from the original paper) depicts the ConvMixer model: ConvMixer is very similar to the MLP-Mixer, model with the following key differences: Instead of using fully-connected layers, it uses standard convolution layers. Instead of LayerNorm (which is typical for ViTs and MLP-Mixers), it uses BatchNorm. Two types of convolution layers are used in ConvMixer. (1): Depthwise convolutions, for mixing spatial locations of the images, (2): Pointwise convolutions (which follow the depthwise convolutions), for mixing channel-wise information across the patches. Another keypoint is the use of larger kernel sizes to allow a larger receptive field. End of explanation """ # Code reference: # https://keras.io/examples/vision/image_classification_with_vision_transformer/. def run_experiment(model): optimizer = tfa.optimizers.AdamW( learning_rate=learning_rate, weight_decay=weight_decay ) model.compile( optimizer=optimizer, loss="sparse_categorical_crossentropy", metrics=["accuracy"], ) checkpoint_filepath = "/tmp/checkpoint" checkpoint_callback = keras.callbacks.ModelCheckpoint( checkpoint_filepath, monitor="val_accuracy", save_best_only=True, save_weights_only=True, ) history = model.fit( train_dataset, validation_data=val_dataset, epochs=num_epochs, callbacks=[checkpoint_callback], ) model.load_weights(checkpoint_filepath) _, accuracy = model.evaluate(test_dataset) print(f"Test accuracy: {round(accuracy * 100, 2)}%") return history, model """ Explanation: The model used in this experiment is termed as ConvMixer-256/8 where 256 denotes the number of channels and 8 denotes the depth. The resulting model only has 0.8 million parameters. Model training and evaluation utility End of explanation """ conv_mixer_model = get_conv_mixer_256_8() history, conv_mixer_model = run_experiment(conv_mixer_model) """ Explanation: Train and evaluate model End of explanation """ # Code reference: https://bit.ly/3awIRbP. def visualization_plot(weights, idx=1): # First, apply min-max normalization to the # given weights to avoid isotrophic scaling. p_min, p_max = weights.min(), weights.max() weights = (weights - p_min) / (p_max - p_min) # Visualize all the filters. num_filters = 256 plt.figure(figsize=(8, 8)) for i in range(num_filters): current_weight = weights[:, :, :, i] if current_weight.shape[-1] == 1: current_weight = current_weight.squeeze() ax = plt.subplot(16, 16, idx) ax.set_xticks([]) ax.set_yticks([]) plt.imshow(current_weight) idx += 1 # We first visualize the learned patch embeddings. patch_embeddings = conv_mixer_model.layers[2].get_weights()[0] visualization_plot(patch_embeddings) """ Explanation: The gap in training and validation performance can be mitigated by using additional regularization techniques. Nevertheless, being able to get to ~83% accuracy within 10 epochs with 0.8 million parameters is a strong result. Visualizing the internals of ConvMixer We can visualize the patch embeddings and the learned convolution filters. Recall that each patch embedding and intermediate feature map have the same number of channels (256 in this case). This will make our visualization utility easier to implement. End of explanation """ # First, print the indices of the convolution layers that are not # pointwise convolutions. for i, layer in enumerate(conv_mixer_model.layers): if isinstance(layer, layers.DepthwiseConv2D): if layer.get_config()["kernel_size"] == (5, 5): print(i, layer) idx = 26 # Taking a kernel from the middle of the network. kernel = conv_mixer_model.layers[idx].get_weights()[0] kernel = np.expand_dims(kernel.squeeze(), axis=2) visualization_plot(kernel) """ Explanation: Even though we did not train the network to convergence, we can notice that different patches show different patterns. Some share similarity with others while some are very different. These visualizations are more salient with larger image sizes. Similarly, we can visualize the raw convolution kernels. This can help us understand the patterns to which a given kernel is receptive. End of explanation """
slundberg/shap
notebooks/api_examples/plots/waterfall.ipynb
mit
import xgboost import shap # train XGBoost model X,y = shap.datasets.adult() model = xgboost.XGBClassifier().fit(X, y) # compute SHAP values explainer = shap.Explainer(model, X) shap_values = explainer(X) """ Explanation: waterfall plot This notebook is designed to demonstrate (and so document) how to use the shap.plots.waterfall function. It uses an XGBoost model trained on the classic UCI adult income dataset (which is classification task to predict if people made over \$50k in the 90s). End of explanation """ shap.plots.waterfall(shap_values[0]) """ Explanation: Waterfall plots are designed to display explanations for individual predictions, so they expect a single row of an Explanation object as input. The bottom of a waterfall plot starts as the expected value of the model output, and then each row shows how the positive (red) or negative (blue) contribution of each feature moves the value from the expected model output over the background dataset to the model output for this prediction. Below is an example that plots the first explanation. Note that by default SHAP explains XGBoost classifer models in terms of their margin output, before the logistic link function. That means the units on the x-axis are log-odds units, so negative values imply probabilies of less than 0.5 that the person makes over $50k annually. The gray text before the feature names shows the value of each feature for this sample. End of explanation """ shap.plots.waterfall(shap_values[0], max_display=20) """ Explanation: Note that in the above explanation the three least impactful features have been collapsed into a single term so that we don't show more than 10 rows in the plot. The default limit of 10 rows can be changed using the max_display argument: End of explanation """ shap.plots.scatter(shap_values[:,"Capital Gain"]) """ Explanation: It is interesting that having a capital gain of \$2,174 dramatically reduces this person's predicted probability of making over \$50k annually. Since waterfall plots only show a single sample worth of data, we can't see the impact of changing capital gain. To see this we can use a scatter plot, which shows how low values for captial gain are a more negative predictor of income that no captial gain at all. Why this happens would require a deeper dive into the data, and should also involve training a model more carefully and with bootstrap resamples to quantify any uncertainty in the model building process. End of explanation """
gregmedlock/Medusa
docs/stats_compare.ipynb
mit
import medusa from medusa.test import create_test_ensemble ensemble = create_test_ensemble("Staphylococcus aureus") import pandas as pd biolog_base = pd.read_csv("../medusa/test/data/biolog_base_composition.csv", sep=",") biolog_base # convert the biolog base to a dictionary, which we can use to set ensemble.base_model.medium directly. biolog_base = {'EX_'+component:1000 for component in biolog_base['ID']} # Double check that the objective is set to the biomass reaction. # For this model, 'bio1' is the id of the biomass reaction. ensemble.base_model.objective = 'bio1' """ Explanation: Statistical testing for ensemble simulations In traditional COBRA simulations with a single model, most simulations result in a single quantity of interest, thus statistical comparisons usually don't make sense. For example, when simulating growth in two different media conditions, a single model can only output a single predicted growth rate for each condition. When accounting for uncertainty in model structure using an ensemble, these simulations generate a distribution rather than a single value. Because we are no longer comparing two individual values, proper interpretation requires statistical assessment of the distributions our ensemble simulations generate. In this example, we demonstrate this concept and one statistical option for univariate comparisons (e.g. comparisons between two conditions). First, let's load an ensemble for Staphylococcus aureus and the recipe for biolog growth media, which we'll use to simulate growth in single carbon source media. End of explanation """ from medusa.flux_analysis import flux_balance carbon_sources = ["EX_cpd00027_e","EX_cpd00179_e"] fluxes = {} for carbon_source in carbon_sources: biolog_base[carbon_source] = 10 ensemble.base_model.medium = biolog_base fluxes[carbon_source] = flux_balance.optimize_ensemble(ensemble,return_flux='bio1', num_processes = 4) biolog_base[carbon_source] = 0 """ Explanation: Let's simulate growth on two different carbon sources, D-glucose (metabolite id: cpd00027) and maltose (metabolite id: cpd00179). End of explanation """ import matplotlib.pylab as plt import numpy as np fig, ax = plt.subplots() bins=np.histogram(np.hstack((fluxes[carbon_sources[0]]['bio1'],fluxes[carbon_sources[1]]['bio1'])), bins=20)[1] plt.hist(fluxes[carbon_sources[0]]['bio1'], bins = bins, label=carbon_sources[0], color = "red", alpha = 0.5) plt.hist(fluxes[carbon_sources[1]]['bio1'], bins = bins, label=carbon_sources[1], color = "blue", alpha = 0.5) plt.axvline(x=fluxes[carbon_sources[0]]['bio1'].mean(), c = 'red') plt.axvline(x=fluxes[carbon_sources[1]]['bio1'].mean(), c = 'blue') ax.set_ylabel('# ensemble members') ax.set_xlabel('Flux through biomass reaction') ax.legend() plt.show() """ Explanation: Now let's visualize the distributions of predicted flux through biomass using matplotlib. We'll generate a histogram for each condition, and plot the mean using a vertical line: End of explanation """ from scipy.stats import wilcoxon cond1 = fluxes[carbon_sources[0]].copy() cond2 = fluxes[carbon_sources[1]].copy() cond1.columns = [carbon_sources[0]] cond2.columns = [carbon_sources[1]] both_conditions = pd.concat([cond1,cond2], axis = 1, join_axes = [cond1.index]) wilcoxon(x=both_conditions[carbon_sources[0]],y=both_conditions[carbon_sources[1]]) """ Explanation: Visually, we can see the mean for D-glucose (cpd00027) is slightly lower than for maltose (cpd00179). To evaluate this statistically, we'll use the Wilcoxon signed-rank test (implemented in SciPy), which tests the null hypothesis that the difference between paired samples (e.g. growth in D-glucose minus growth in maltose for each ensemble member) is symmetrically distributed around zero. Here, we choose a statistical test meant for paired data because each simulation result in one media condition has a related simulation result in the other condition which was generated using the same ensemble member. The Wilcoxon signed-rank test is suitable for paired univariate comparisons regardless of the distribution of data (e.g. when data are non-normally distributed, replace a paired t-test with the Wolcoxon signed-rank test). End of explanation """
NREL/bifacial_radiance
docs/tutorials/18 - AgriPV - Coffee Plantation with Tree Modeling.ipynb
bsd-3-clause
import bifacial_radiance import os from pathlib import Path import numpy as np import pandas as pd testfolder = str(Path().resolve().parent.parent / 'bifacial_radiance' / 'TEMP' / 'Tutorial_18') if not os.path.exists(testfolder): os.makedirs(testfolder) resultsfolder = os.path.join(testfolder, 'results') """ Explanation: 12d - AgriPV: Designing for adecuate crop shading This journal supports the process of designing a solar panel configuration to appropriately represent ideal shading conditions for coffee production underneath elevated solar panels. The coffee trees would be under and/or in between elevated solar panels (panels would be elevated 6, 8, or 10 ft tall). The light/shade analysis helps determine appropriate panel heights and spacings t0 achieve appropriate shading. The desired level of shading is maximum of 30% (i.e., 70% of normal, unshaded light). Details: * The coffee plants are expected to be \~5 ft tall. (5-6 ft tall and 3 ft wide (<a href="https://realgoodcoffeeco.com/blogs/realgoodblog/how-to-grow-a-coffee-plant-at-home#:~:text=However%2C%20you%20must%20keep%20in,tall%20and%203%20feet%20wide">Reference</a>) * Location: 18.202142, -66.759187; (18°12'07.7"N 66°45'33.1"W) * Desired area of initial analysis: 400-600 ft2 (37-55 m2) * Racking: Fixed-tilt panels * Panel size: 3.3 feet x 5.4 feet (1m x 1.64m) * Analysis variations: <ul> <li> a. Panel height: would like to examine heights of 6 ft, 8 ft, and 10 ft hub height. <li> b. Panel spacing (N/W): would like to look at multiple distances (e.g., 2 ft, 3 ft, 4 ft) </li> <li> c. Inter-Row spacing (E/W): would like to look at multiple distances (e.g., 2 ft, 3 ft, 4 ft)! </li> Steps on this Journal: <ol> <li> <a href='#step1'> <u><b>Loop to Raytrace and sample irradiance at where Three would be located </u></b></li> <li> <a href='#step2'> Calculate GHI for Comparisons </li> <ul><li> <a href='#step2a'> Option 1: Raytrace of Empty Field </li></ul> <ul><li> <a href='#step2b'> Option 2: Weather File </li></ul> <li> <a href='#step3'> Compile Results </li> <li> <a href='#step4'> Plot Results</li> <li> <a href='#step5'> <u><b> Raytrace with Tree Geometry <u></b></li> <ul><li> <a href='#step5a'>Tree Parameters</li></ul> <ul><li> <a href='#step5b'>Loop to Raytrace and Sample Irradiance at Each side of the Tree (N, S, E, W)</li></ul> <ul><li> <a href='#step5c'>Single simulation until MakeOct for Getting a PRETTY IMAGE </li></ul> <li> <a href='#step6'> Compile Results</li> <li> <a href='#step7'> Plot </li> </ol> ![AgriPV Coffee Trees Simulation](../images_wiki/AdvancedJournals/AgriPV_CoffeeTrees.PNG) While we have HPC scripts to do the below simulation, this journals runs all of the above so it might take some time, as there are 109 combinations of parameters explored End of explanation """ lat = 18.202142 lon = -66.759187 albedo = 0.25 # Grass value from Torres Molina, "Measuring UHI in Puerto Rico" 18th LACCEI # International Multi-Conference for Engineering, Education, and Technology ft2m = 0.3048 # Loops clearance_heights = np.array([6.0, 8.0, 10.0])* ft2m xgaps = np.array([2, 3, 4]) * ft2m Ds = np.array([2, 3, 4]) * ft2m # D is a variable that represents the spacing between rows, not-considering the collector areas. tilts = [round(lat), 10] x = 1.64 y = 1 azimuth = 180 nMods = 20 nRows = 7 numpanels = 1 moduletype = 'test-module' hpc = False sim_general_name = 'tutorial_18' if not os.path.exists(os.path.join(testfolder, 'EPWs')): demo = bifacial_radiance.RadianceObj('test',testfolder) epwfile = demo.getEPW(lat,lon) else: epwfile = r'EPWs\PRI_Mercedita.AP.785203_TMY3.epw' """ Explanation: General Parameters and Variables End of explanation """ demo = bifacial_radiance.RadianceObj(sim_general_name,str(testfolder)) demo.setGround(albedo) demo.readWeatherFile(epwfile) demo.genCumSky() for ch in range (0, len(clearance_heights)): clearance_height = clearance_heights[ch] for xx in range (0, len(xgaps)): xgap = xgaps[xx] for tt in range (0, len(tilts)): tilt = tilts[tt] for dd in range (0, len(Ds)): pitch = y * np.cos(np.radians(tilt))+Ds[dd] sim_name = (sim_general_name+'_ch_'+str(round(clearance_height,1))+ '_xgap_'+str(round(xgap,1))+\ '_tilt_'+str(round(tilt,1))+ '_pitch_'+str(round(pitch,1))) # Coffe plant location at: coffeeplant_x = (x+xgap)/2 coffeeplant_y = pitch/2 demo.makeModule(name=moduletype, x=x, y=y, xgap = xgap) sceneDict = {'tilt':tilt,'pitch':pitch,'clearance_height':clearance_height,'azimuth':azimuth, 'nMods': nMods, 'nRows': nRows} scene = demo.makeScene(moduletype=moduletype,sceneDict=sceneDict, hpc=hpc, radname = sim_name) octfile = demo.makeOct(octname = demo.basename , hpc=hpc) analysis = bifacial_radiance.AnalysisObj(octfile=octfile, name=sim_name) # Modify sensor position to coffee plant location frontscan, backscan = analysis.moduleAnalysis(scene=scene, sensorsy=1) groundscan = frontscan.copy() groundscan['xstart'] = coffeeplant_x groundscan['ystart'] = coffeeplant_y groundscan['zstart'] = 0.05 groundscan['orient'] = '0 0 -1' analysis.analysis(octfile, name=sim_name+'_Front&Back', frontscan=frontscan, backscan=backscan) analysis.analysis(octfile, name=sim_name+'_Ground&Back', frontscan=groundscan, backscan=backscan) """ Explanation: <a id='step1'></a> 1. Loop to Raytrace and sample irradiance at where Three would be located End of explanation """ sim_name = 'EMPTY' demo.makeModule(name=moduletype, x=0.001, y=0.001, xgap = 0) sceneDict = {'tilt':0,'pitch':2,'clearance_height':0.005,'azimuth':180, 'nMods': 1, 'nRows': 1} scene = demo.makeScene(moduletype=moduletype,sceneDict=sceneDict, hpc=hpc, radname = sim_name) octfile = demo.makeOct(octname = demo.basename , hpc=hpc) analysis = bifacial_radiance.AnalysisObj(octfile=octfile, name=sim_name) frontscan, backscan = analysis.moduleAnalysis(scene=scene, sensorsy=1) emptyscan = frontscan.copy() emptyscan['xstart'] = 3 emptyscan['ystart'] = 3 emptyscan['zstart'] = 0.05 emptyscan['orient'] = '0 0 -1' emptybackscan = emptyscan.copy() emptybackscan['orient'] = '0 0 1' analysis.analysis(octfile, name='_EMPTYSCAN', frontscan=emptyscan, backscan=emptybackscan) resname = os.path.join(resultsfolder, 'irr__EMPTYSCAN.csv') data = pd.read_csv(resname) puerto_rico_Year = data['Wm2Front'][0] print("YEARLY TOTAL Wh/m2:", puerto_rico_Year) """ Explanation: <a id='step2'></a> 2. Calculate GHI for Comparisons <a id='step2a'></a> Option 1: Raytrace of Empty Field End of explanation """ # Indexes for start of each month of interest in TMY3 8760 hours file #starts = [2881, 3626, 4346, 5090, 5835] #ends = [3621, 4341, 5085, 5829, 6550] starts = [metdata.datetime.index(pd.to_datetime('2021-05-01 6:0:0 -7')), metdata.datetime.index(pd.to_datetime('2021-06-01 6:0:0 -7')), metdata.datetime.index(pd.to_datetime('2021-07-01 6:0:0 -7')), metdata.datetime.index(pd.to_datetime('2021-08-01 6:0:0 -7')), metdata.datetime.index(pd.to_datetime('2021-09-01 6:0:0 -7'))] ends = [metdata.datetime.index(pd.to_datetime('2021-05-31 18:0:0 -7')), metdata.datetime.index(pd.to_datetime('2021-06-30 18:0:0 -7')), metdata.datetime.index(pd.to_datetime('2021-07-31 18:0:0 -7')), metdata.datetime.index(pd.to_datetime('2021-08-31 18:0:0 -7')), metdata.datetime.index(pd.to_datetime('2021-09-30 18:0:0 -7'))] ghi_PR=[] for ii in range(0, len(starts)): start = starts[ii] end = ends[ii] ghi_PR.append(demo.metdata.ghi[start:end].sum()) puerto_Rico_Monthly = ghi_PR # Wh/m2 puerto_Rico_YEAR = demo.metdata.ghi.sum() # Wh/m2 print("Monthly Values May-Sept:", puerto_Rico_Monthly, "Wh/m2") print("Year Values", puerto_Rico_YEAR, "Wh/m2") """ Explanation: <a id='step2b'></a> Option 2: Weather File End of explanation """ ch_all = [] xgap_all = [] tilt_all = [] pitch_all = [] FrontIrrad = [] RearIrrad = [] GroundIrrad = [] for ch in range (0, len(clearance_heights)): clearance_height = clearance_heights[ch] for xx in range (0, len(xgaps)): xgap = xgaps[xx] for tt in range (0, len(tilts)): tilt = tilts[tt] for dd in range (0, len(Ds)): pitch = y * np.cos(np.radians(tilt))+Ds[dd] # irr_Coffee_ch_1.8_xgap_0.6_tilt_18_pitch_1.6_Front&Back.csv sim_name = ('irr_Coffee'+'_ch_'+str(round(clearance_height,1))+ '_xgap_'+str(round(xgap,1))+\ '_tilt_'+str(round(tilt,1))+ '_pitch_'+str(round(pitch,1))+'_Front&Back.csv') sim_name2 = ('irr_Coffee'+'_ch_'+str(round(clearance_height,1))+ '_xgap_'+str(round(xgap,1))+\ '_tilt_'+str(round(tilt,1))+ '_pitch_'+str(round(pitch,1))+'_Ground&Back.csv') ch_all.append(clearance_height) xgap_all.append(xgap) tilt_all.append(tilt) pitch_all.append(pitch) data = pd.read_csv(os.path.join(resultsfolder, sim_name)) FrontIrrad.append(data['Wm2Front'].item()) RearIrrad.append(data['Wm2Back'].item()) data = pd.read_csv(os.path.join(resultsfolder, sim_name2)) GroundIrrad.append(data['Wm2Front'].item()) ch_all = pd.Series(ch_all, name='clearance_height') xgap_all = pd.Series(xgap_all, name='xgap') tilt_all = pd.Series(tilt_all, name='tilt') pitch_all = pd.Series(pitch_all, name='pitch') FrontIrrad = pd.Series(FrontIrrad, name='FrontIrrad') RearIrrad = pd.Series(RearIrrad, name='RearIrrad') GroundIrrad = pd.Series(GroundIrrad, name='GroundIrrad') df = pd.concat([ch_all, xgap_all, tilt_all, pitch_all, FrontIrrad, RearIrrad, GroundIrrad], axis=1) df """ Explanation: <a id='step3'></a> 3. Compile Results End of explanation """ df[['GroundIrrad_percent_GHI']] = df[['GroundIrrad']]*100/puerto_Rico_YEAR df['FrontIrrad_percent_GHI'] = df['FrontIrrad']*100/puerto_Rico_YEAR df['RearIrrad_percent_GHI'] = df['RearIrrad']*100/puerto_Rico_YEAR df['BifacialGain'] = df['RearIrrad']*0.65*100/df['FrontIrrad'] print(df['GroundIrrad_percent_GHI'].min()) print(df['GroundIrrad_percent_GHI'].max()) """ Explanation: Let's calculate some relevant metrics for irradiance End of explanation """ import seaborn as sns import matplotlib.pyplot as plt tilts_l = list(df['tilt'].unique()) ch_l = list(df['clearance_height'].unique()) print(tilts_l) print(ch_l) for tilt in tilts_l: for clearance_height in ch_l: df2=df.loc[df['tilt']==tilts[1]] df3 = df2.loc[df2['clearance_height']==clearance_heights[2]] df3['pitch']=df3['pitch'].round(1) df3['xgap']=df3['xgap'].round(1) sns.set(font_scale=2) table = df3.pivot('pitch', 'xgap', 'GroundIrrad_percent_GHI') ax = sns.heatmap(table, cmap='hot', vmin = 50, vmax= 100, annot=True) ax.invert_yaxis() figtitle = 'Clearance Height ' + str(clearance_height/ft2m)+' ft, Tilt ' + str(tilt) + '$^\circ$' plt.title(figtitle) print(table) plt.show() """ Explanation: <a id='step4'></a> 4. Plot results End of explanation """ tree_albedo = 0.165 # Wikipedia [0.15-0.18] trunk_x = 0.8 * ft2m trunk_y = trunk_x trunk_z = 1 * ft2m tree_x = 3 * ft2m tree_y = tree_x tree_z = 4 * ft2m """ Explanation: <a id='step5'></a> 5. Raytrace with Tree Geometry <a id='step5a'></a> Tree parameters End of explanation """ for ch in range (0, len(clearance_heights)): clearance_height = clearance_heights[ch] for xx in range (0, len(xgaps)): xgap = xgaps[xx] for tt in range (0, len(tilts)): tilt = tilts[tt] for dd in range (0, len(Ds)): pitch = y * np.cos(np.radians(tilt))+Ds[dd] sim_name = (sim_general_name+'_ch_'+str(round(clearance_height,1))+ '_xgap_'+str(round(xgap,1))+\ '_tilt_'+str(round(tilt,1))+ '_pitch_'+str(round(pitch,1))) coffeeplant_x = (x+xgap)/2 coffeeplant_y = pitch demo.makeModule(name=moduletype, x=x, y=y, xgap = xgap) sceneDict = {'tilt':tilt,'pitch':pitch,'clearance_height':clearance_height,'azimuth':azimuth, 'nMods': nMods, 'nRows': nRows} scene = demo.makeScene(moduletype=moduletype,sceneDict=sceneDict, hpc=hpc, radname = sim_name) # Appending the Trees here text = '' for ii in range(0,3): coffeeplant_x = (x+xgap)/2 + (x+xgap)*ii for jj in range(0,3): coffeeplant_y = pitch/2 + pitch*jj name = 'tree'+str(ii)+str(jj) text += '\r\n! genrev Metal_Grey tube{}tree t*{} {} 32 | xform -t {} {} {}'.format('head'+str(ii)+str(jj),tree_z, tree_x/2.0, -trunk_x/2.0 + coffeeplant_x, -trunk_x/2.0 + coffeeplant_y, trunk_z) text += '\r\n! genrev Metal_Grey tube{}tree t*{} {} 32 | xform -t {} {} 0'.format('trunk'+str(ii)+str(jj),trunk_z, trunk_x/2.0, -trunk_x/2.0 + coffeeplant_x, -trunk_x/2.0 + coffeeplant_y) customObject = demo.makeCustomObject(name,text) demo.appendtoScene(radfile=scene.radfiles, customObject=customObject, text="!xform -rz 0") octfile = demo.makeOct(octname = demo.basename , hpc=hpc) analysis = bifacial_radiance.AnalysisObj(octfile=octfile, name=sim_name) ii = 1 jj = 1 coffeeplant_x = (x+xgap)/2 + (x+xgap)*ii coffeeplant_y = pitch/2 + pitch*jj frontscan, backscan = analysis.moduleAnalysis(scene=scene, sensorsy=1) treescan_south = frontscan.copy() treescan_north = frontscan.copy() treescan_east = frontscan.copy() treescan_west = frontscan.copy() treescan_south['xstart'] = coffeeplant_x treescan_south['ystart'] = coffeeplant_y - tree_x/2.0 - 0.05 treescan_south['zstart'] = tree_z treescan_south['orient'] = '0 1 0' treescan_north['xstart'] = coffeeplant_x treescan_north['ystart'] = coffeeplant_y + tree_x/2.0 + 0.05 treescan_north['zstart'] = tree_z treescan_north['orient'] = '0 -1 0' treescan_east['xstart'] = coffeeplant_x + tree_x/2.0 + 0.05 treescan_east['ystart'] = coffeeplant_y treescan_east['zstart'] = tree_z treescan_east['orient'] = '-1 0 0' treescan_west['xstart'] = coffeeplant_x - tree_x/2.0 - 0.05 treescan_west['ystart'] = coffeeplant_y treescan_west['zstart'] = tree_z treescan_west['orient'] = '1 0 0' groundscan = frontscan.copy() groundscan['xstart'] = coffeeplant_x groundscan['ystart'] = coffeeplant_y groundscan['zstart'] = 0.05 groundscan['orient'] = '0 0 -1' analysis.analysis(octfile, name=sim_name+'_North&South', frontscan=treescan_north, backscan=treescan_south) analysis.analysis(octfile, name=sim_name+'_East&West', frontscan=treescan_east, backscan=treescan_west) """ Explanation: <a id='step5b'></a> Loop to Raytrace and Sample Irradiance at Each side of the Tree (N, S, E, W) End of explanation """ tree_albedo = 0.165 # Wikipedia [0.15-0.18] trunk_x = 0.8 * ft2m trunk_y = trunk_x trunk_z = 1 * ft2m tree_x = 3 * ft2m tree_y = tree_x tree_z = 4 * ft2m clearance_height = clearance_heights[0] xgap = xgaps[-1] tilt = tilts[0] pitch = y * np.cos(np.radians(tilt))+Ds[-1] sim_name = (sim_general_name+'_ch_'+str(round(clearance_height,1))+ '_xgap_'+str(round(xgap,1))+\ '_tilt_'+str(round(tilt,1))+ '_pitch_'+str(round(pitch,1))) demo = bifacial_radiance.RadianceObj(sim_name,str(testfolder)) demo.setGround(albedo) demo.readWeatherFile(epwfile) coffeeplant_x = (x+xgap)/2 coffeeplant_y = pitch demo.gendaylit(4020) demo.makeModule(name=moduletype, x=x, y=y, xgap = xgap) sceneDict = {'tilt':tilt,'pitch':pitch,'clearance_height':clearance_height,'azimuth':azimuth, 'nMods': nMods, 'nRows': nRows} scene = demo.makeScene(moduletype=moduletype,sceneDict=sceneDict, hpc=hpc, radname = sim_name) for ii in range(0,3): coffeeplant_x = (x+xgap)/2 + (x+xgap)*ii for jj in range(0,3): coffeeplant_y = pitch/2 + pitch*jj name = 'tree'+str(ii)+str(jj) text = '! genrev litesoil tube{}tree t*{} {} 32 | xform -t {} {} {}'.format('head'+str(ii)+str(jj),tree_z, tree_x/2.0, -trunk_x/2.0 + coffeeplant_x, -trunk_x/2.0 + coffeeplant_y, trunk_z) text += '\r\n! genrev litesoil tube{}tree t*{} {} 32 | xform -t {} {} 0'.format('trunk'+str(ii)+str(jj),trunk_z, trunk_x/2.0, -trunk_x/2.0 + coffeeplant_x, -trunk_x/2.0 + coffeeplant_y) customObject = demo.makeCustomObject(name,text) demo.appendtoScene(radfile=scene.radfiles, customObject=customObject, text="!xform -rz 0") octfile = demo.makeOct(octname = demo.basename , hpc=hpc) """ Explanation: <a id='step5c'></a> Single simulation until MakeOct for Getting a PRETTY IMAGE End of explanation """ # irr_Coffee_ch_1.8_xgap_0.6_tilt_18_pitch_1.6_Front&Back.csv ch_all = [] xgap_all = [] tilt_all = [] pitch_all = [] NorthIrrad = [] SouthIrrad = [] EastIrrad = [] WestIrrad = [] ft2m = 0.3048 clearance_heights = np.array([6.0, 8.0, 10.0])* ft2m xgaps = np.array([2, 3, 4]) * ft2m Ds = np.array([2, 3, 4]) * ft2m # D is a variable that represents the spacing between rows, not-considering the collector areas. tilts = [18, 10] y = 1 for ch in range (0, len(clearance_heights)): clearance_height = clearance_heights[ch] for xx in range (0, len(xgaps)): xgap = xgaps[xx] for tt in range (0, len(tilts)): tilt = tilts[tt] for dd in range (0, len(Ds)): pitch = y * np.cos(np.radians(tilt))+Ds[dd] sim_name = ('irr_Coffee'+'_ch_'+str(round(clearance_height,1))+ '_xgap_'+str(round(xgap,1))+\ '_tilt_'+str(round(tilt,1))+ '_pitch_'+str(round(pitch,1))+'_North&South.csv') sim_name2 = ('irr_Coffee'+'_ch_'+str(round(clearance_height,1))+ '_xgap_'+str(round(xgap,1))+\ '_tilt_'+str(round(tilt,1))+ '_pitch_'+str(round(pitch,1))+'_East&West.csv') ch_all.append(clearance_height) xgap_all.append(xgap) tilt_all.append(tilt) pitch_all.append(pitch) data = pd.read_csv(os.path.join(resultsfolder, sim_name)) NorthIrrad.append(data['Wm2Front'].item()) SouthIrrad.append(data['Wm2Back'].item()) data = pd.read_csv(os.path.join(resultsfolder, sim_name2)) EastIrrad.append(data['Wm2Front'].item()) WestIrrad.append(data['Wm2Back'].item()) ch_all = pd.Series(ch_all, name='clearance_height') xgap_all = pd.Series(xgap_all, name='xgap') tilt_all = pd.Series(tilt_all, name='tilt') pitch_all = pd.Series(pitch_all, name='pitch') NorthIrrad = pd.Series(NorthIrrad, name='NorthIrrad') SouthIrrad = pd.Series(SouthIrrad, name='SouthIrrad') EastIrrad = pd.Series(EastIrrad, name='EastIrrad') WestIrrad = pd.Series(WestIrrad, name='WestIrrad') df = pd.concat([ch_all, xgap_all, tilt_all, pitch_all, NorthIrrad, SouthIrrad, EastIrrad, WestIrrad], axis=1) df.to_csv(os.path.join(resultsfolder,'TREES.csv')) trees = pd.read_csv(os.path.join(resultsfolder, 'TREES.csv')) trees.tail() trees['TreeIrrad_percent_GHI'] = trees[['NorthIrrad','SouthIrrad','EastIrrad','WestIrrad']].mean(axis=1)*100/puerto_Rico_YEAR print(trees['TreeIrrad_percent_GHI'].min()) print(trees['TreeIrrad_percent_GHI'].max()) """ Explanation: Now you can view the Geometry by navigating on the terminal to the testfolder, and using the octfile name generated above rvu -vf views\front.vp -e .0265652 -vp 2 -21 2.5 -vd 0 1 0 Coffee_ch_1.8_xgap_1.2_tilt_18_pitch_2.2.oct <a id='step6'></a> 6. Compile Results Trees End of explanation """ tilts_l = list(trees['tilt'].unique()) ch_l = list(trees['clearance_height'].unique()) print(tilts_l) print(ch_l) for tilt in tilts_l: for clearance_height in ch_l: df2=trees.loc[df['tilt']==tilts[1]] df3 = df2.loc[df2['clearance_height']==clearance_heights[2]] df3['pitch']=df3['pitch'].round(1) df3['xgap']=df3['xgap'].round(1) sns.set(font_scale=2) table = df3.pivot('pitch', 'xgap', 'TreeIrrad_percent_GHI') ax = sns.heatmap(table, cmap='hot', vmin = 22, vmax= 35, annot=True) ax.invert_yaxis() figtitle = 'Clearance Height ' + str(clearance_height/ft2m)+' ft, Tilt ' + str(tilt) + '$^\circ$' plt.title(figtitle) print(table) plt.show() """ Explanation: <a id='step7'></a> 7. Plot End of explanation """
massimo-nocentini/on-python
UniFiCourseSpring2020/introduction.ipynb
mit
__AUTHORS__ = {'am': ("Andrea Marino", "andrea.marino@unifi.it",), 'mn': ("Massimo Nocentini", "massimo.nocentini@unifi.it", "https://github.com/massimo-nocentini/",)} __KEYWORDS__ = ['Python', 'Jupyter', 'notebooks', 'keynote',] """ Explanation: <p> <img src="http://www.cerm.unifi.it/chianti/images/logo%20unifi_positivo.jpg" alt="UniFI logo" style="float: left; width: 20%; height: 20%;"> <div align="right"> <small> Massimo Nocentini, PhD. <br><br> February 7, 2020: init </small> </div> </p> <br> <br> <div align="center"> <b>Abstract</b><br> A (very concise) introduction to the Python ecosystem. </div> End of explanation """ outline = [] outline.append('Hello!') outline.append('Python') outline.append('Whys and refs') outline.append('On the shoulders of giants') outline.append('Course agenda') import this """ Explanation: <center><img src="https://upload.wikimedia.org/wikipedia/commons/c/c3/Python-logo-notext.svg"></center> End of explanation """
sangheestyle/ml2015project
howto/model02_linear_models.ipynb
mit
import gzip import cPickle as pickle with gzip.open("../data/train.pklz", "rb") as train_file: train_set = pickle.load(train_file) with gzip.open("../data/test.pklz", "rb") as test_file: test_set = pickle.load(test_file) with gzip.open("../data/questions.pklz", "rb") as questions_file: questions = pickle.load(questions_file) """ Explanation: model 02 Load train, test, questions data from pklz First of all, we need to read those three data set. End of explanation """ X = [] Y = [] for key in train_set: # We only care about positive case at this time if train_set[key]['position'] < 0: continue uid = train_set[key]['uid'] qid = train_set[key]['qid'] pos = train_set[key]['position'] q_length = max(questions[qid]['pos_token'].keys()) feat = [uid, qid, q_length] X.append(feat) Y.append([pos]) print len(X) print len(Y) print X[0], Y[0] """ Explanation: Make training set For training model, we might need to make feature and lable pair. In this case, we will use only uid, qid, and position for feature. End of explanation """ from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet from sklearn.cross_validation import train_test_split, cross_val_score X_train, X_test, Y_train, Y_test = train_test_split (X, Y) regressor = LinearRegression() scores = cross_val_score(regressor, X, Y, cv=10) print 'Cross validation r-squared scores:', scores.mean() print scores regressor = Ridge() scores = cross_val_score(regressor, X, Y, cv=10) print 'Cross validation r-squared scores:', scores.mean() print scores regressor = Lasso() scores = cross_val_score(regressor, X, Y, cv=10) print 'Cross validation r-squared scores:', scores.mean() print scores regressor = ElasticNet() scores = cross_val_score(regressor, X, Y, cv=10) print 'Cross validation r-squared scores:', scores.mean() print scores from sklearn.linear_model import SGDRegressor from sklearn.cross_validation import cross_val_score from sklearn.preprocessing import StandardScaler from sklearn.cross_validation import train_test_split X_scaler = StandardScaler() Y_scaler = StandardScaler() X_train, X_test, Y_train, Y_test = train_test_split (X, Y) X_train = X_scaler.fit_transform(X_train) Y_train = Y_scaler.fit_transform(Y_train) X_test = X_scaler.fit_transform(X_test) Y_test = Y_scaler.fit_transform(Y_test) """ Explanation: It means that user 0 tried to solve question number 1 which has 77 tokens for question and he or she answered at 61st token. Train model and make predictions Let's train model and make predictions. We will use simple Linear Regression at this moment. End of explanation """ regressor = SGDRegressor(loss='squared_loss', penalty='l1') scores = cross_val_score(regressor, X_train, Y_train, cv=10) print 'Cross validation r-squared scores:', scores.mean() print scores X_test = [] test_id = [] for key in test_set: test_id.append(key) uid = test_set[key]['uid'] qid = test_set[key]['qid'] q_length = max(questions[qid]['pos_token'].keys()) feat = [uid, qid, q_length] X_test.append(feat) X_scaler = StandardScaler() Y_scaler = StandardScaler() X_train = X_scaler.fit_transform(X) Y_train = Y_scaler.fit_transform(Y) X_test = X_scaler.fit_transform(X_test) regressor.fit(X_train, Y_train) predictions = regressor.predict(X_test) predictions = Y_scaler.inverse_transform(predictions) predictions = sorted([[id, predictions[index]] for index, id in enumerate(test_id)]) print len(predictions) predictions[:5] """ Explanation: http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDRegressor.html There has four loss-function. ‘squared_loss’, ‘huber’, ‘epsilon_insensitive’, or ‘squared_epsilon_insensitive’. Among those, squared_loss is the best in this case. End of explanation """ import csv predictions.insert(0,["id", "position"]) with open('guess.csv', 'wb') as fp: writer = csv.writer(fp, delimiter=',') writer.writerows(predictions) """ Explanation: Here is 4749 predictions. Writing submission. OK, let's writing submission into guess.csv file. In the given submission form, we realized that we need to put header. So, we will insert header at the first of predictions, and then make it as a file. End of explanation """
cliburn/sta-663-2017
notebook/11C_IPyParallel.ipynb
mit
import numpy as np """ Explanation: Using ipyparallel Parallel execution is tightly integrated with Jupyter in the ipyparallel package. Install with bash conda install ipyparallel ipcluster nbextension enable Official documentation End of explanation """ from ipyparallel import Client """ Explanation: Starting engines We will only use engines on local cores which does not require any setup - see docs for detailed instructions on how to set up a remote cluster, including setting up to use Amazon EC2 clusters. You can start a cluster on the IPython Clusters tab in the main Jupyter browser window or via the command line with ipcluster start -n &lt;put desired number of engines to run here&gt; The main advantage of developing parallel applications using ipyparallel is that it can be done interactively within Jupyter. Basic concepts of ipyparallel End of explanation """ rc = Client() rc.ids """ Explanation: The client connects to the cluster of "remote" engines that perfrom the actual computation. These engines may be on the same machine or on a cluster. End of explanation """ dv = rc[:] """ Explanation: A view provides access to a subset of the engines available to the client. Jobs are submitted to the engines via the view. A direct view allows the user to explicitly send work specific engines. The load balanced view is like the Pool object in multiprocessing, and manages the scheduling and distribution of jobs for you. Direct view End of explanation """ dv.map_sync(lambda x, y, z: x + y + z, range(10), range(10), range(10)) """ Explanation: Add 10 sets of 3 numbers in parallel using all engines. End of explanation """ rc[::2].map_sync(lambda x, y, z: x + y + z, range(10), range(10), range(10)) """ Explanation: Add 10 sets of 3 numbers in parallel using only alternate engines. End of explanation """ rc[2].map_sync(lambda x, y, z: x + y + z, range(10), range(10), range(10)) """ Explanation: Add 10 sets of 3 numbers using a specific engine. End of explanation """ lv = rc.load_balanced_view() lv.map_sync(lambda x: sum(x), np.random.random((10, 100000))) """ Explanation: Load balanced view Use this when you have many jobs that take different amounts of time to complete. End of explanation """ rc[1:3].apply_sync(lambda x, y: x**2 + y**2, 3, 4) rc[1:3].apply_sync(lambda x, y: x**2 + y**2, x=3, y=4) """ Explanation: Calling functions with apply In contrast to map, apply is just a simple function call run on all remote engines, and has the usual function signature apply(f, *args, **kwargs). It is a primitive on which other more useful functions (such as map) are built upon. End of explanation """ res = dv.map_sync(lambda x, y, z: x + y + z, range(10), range(10), range(10)) res """ Explanation: Synchronous and asynchronous jobs We have used the map_sync and apply_sync methods. The sync suffix indicate that we want to run a synchronous job. Synchronous jobs block until all the computation is done and return the result. End of explanation """ res = dv.map_async(lambda x, y, z: x + y + z, range(10), range(10), range(10)) res res.done() res.get() """ Explanation: In contrast, asynchronous jobs return immediately so that you can do other work, but returns a AsyncMapResult object, similar to the future object returned by the concurrent.futures package. You can query its status, cancel running jobs and retrieve results once they have been computed. End of explanation """ res = dv.map(lambda x, y, z: x + y + z, range(10), range(10), range(10)) res.get() """ Explanation: There is also a map method that by default uses asynchronous mode, but you can change this by setting the block attribute or function argument. End of explanation """ res = dv.map(lambda x, y, z: x + y + z, range(10), range(10), range(10), block=True) res """ Explanation: Change blocking mode for just one job. End of explanation """ dv.block = True res = dv.map(lambda x, y, z: x + y + z, range(10), range(10), range(10)) res """ Explanation: Change blocking mode for this view so that all jobs are synchronous. End of explanation """ @dv.remote(block = True) def f1(n): import numpy as np return np.random.rand(n) f1(4) """ Explanation: Remote function decorators The @remote decorator results in functions that will execute simultaneously on all engines in a view. For example, you can use this decorator if you always want to run $n$ independent parallel MCMC chains. End of explanation """ @dv.parallel(block = True) def f2(x): return x f2(range(15)) @dv.parallel(block = True) def f3(x): return sum(x) f3(range(15)) @dv.parallel(block = True) def f4(x, y): return x + y f4(np.arange(10), np.arange(10)) """ Explanation: The @parallel decorator breaks up elementwise operations and distributes them. End of explanation """ def mandel1(x, y, max_iters=80): c = complex(x, y) z = 0.0j for i in range(max_iters): z = z*z + c if z.real*z.real + z.imag*z.imag >= 4: return i return max_iters @dv.parallel(block = True) def mandel2(x, y, max_iters=80): c = complex(x, y) z = 0.0j for i in range(max_iters): z = z*z + c if z.real*z.real + z.imag*z.imag >= 4: return i return max_iters x = np.arange(-2, 1, 0.01) y = np.arange(-1, 1, 0.01) X, Y = np.meshgrid(x, y) %%time im1 = np.reshape(list(map(mandel1, X.ravel(), Y.ravel())), (len(y), len(x))) %%time im2 = np.reshape(mandel2.map(X.ravel(), Y.ravel()), (len(y), len(x))) fig, axes = plt.subplots(1, 2, figsize=(12, 4)) axes[0].grid(False) axes[0].imshow(im1, cmap='jet') axes[1].grid(False) axes[1].imshow(im2, cmap='jet') pass """ Explanation: Example: Use the @parallel decorator to speed up Mandelbrot calculations End of explanation """ import time import datetime def g1(x): time.sleep(0.1) now = datetime.datetime.now() return (now, x) """ Explanation: Functions with dependencies Modules imported locally are NOT available in the remote engines. End of explanation """ def g2(x): import time, datetime time.sleep(0.1) now = datetime.datetime.now() return (now, x) dv.map_sync(g2, range(5)) """ Explanation: This fails with an Exception because the time and datetime modules are not imported in the remote engines. python dv.map_sync(g1, range(10)) The simplest fix is to import the module(s) within the function End of explanation """ with dv.sync_imports(): import time import datetime """ Explanation: Alternatively, you can simultaneously import both locally and in the remote engines with the sync_import context manager. End of explanation """ dv.map_sync(g1, range(5)) """ Explanation: Now the g1 function will work. End of explanation """ from ipyparallel import require @require('scipy.stats') def g3(x): return scipy.stats.norm(0,1).pdf(x) dv.map(g3, np.arange(-3, 4)) """ Explanation: Finally, there is also a require decorator that can be used. This will force the remote engine to import all packages given. End of explanation """ dv.push(dict(a=3, b=2)) def f(x): global a, b return a*x + b dv.map_sync(f, range(5)) dv.pull(('a', 'b')) """ Explanation: Moving data around We can send data to remote engines with push and retrieve them with pull, or using the dictionary interface. For example, you can use this to distribute a large lookup table to all engines once instead of repeatedly as a function argument. End of explanation """ dv['c'] = 5 dv['a'] dv['c'] """ Explanation: You can also use the dictionary interface as an alternative to push and pull End of explanation """ with dv.sync_imports(): import numba @numba.jit def f_numba(x): return np.sum(x) dv.map(f_numba, np.random.random((6, 4))) """ Explanation: Working with compiled code Numba Using numba.jit is straightforward. End of explanation """ %load_ext cython %%cython -n cylib import cython import numpy as np cimport numpy as np @cython.boundscheck(False) @cython.wraparound(False) def f(np.ndarray[np.float64_t, ndim=1] x): x.setflags(write=True) cdef int i cdef int n = x.shape[0] cdef double s = 0 for i in range(n): s += x[i] return s """ Explanation: Cython We need to do some extra work to make sure the shared libary compiled with cython is available to the remote engines: Compile a named shared module with the -n flag Use np.ndarray[dtype, ndim] in place of memroy views for example, double[:] becomes np.ndarray[np.float64_t, ndim=1] Move the shared library to the site-packages directory Cython magic moules can be found in ~/.ipython/cython Import the modules remtoely in the usual ways End of explanation """ import os import glob import site import shutil src = glob.glob(os.path.join(os.path.expanduser('~/'), '.ipython', 'cython', 'cylib*so'))[0] dst = site.getsitepackages()[0] shutil.copy(src, dst) with dv.sync_imports(): import cylib """ Explanation: Copy the compiled module in site-packages so that the remote engines can import it End of explanation """ dv.map(cylib.f, np.random.random((6, 4))) """ Explanation: Using parallel magic commands In practice, most users will simply use the %px magic to execute code in parallel from within the notebook. This is the simplest way to use ipyparallel. End of explanation """ %px import numpy as np %px a = np.random.random(4) %px a.sum() """ Explanation: %px This sends the command to all targeted engines. End of explanation """ dv.scatter('a', np.random.randint(0, 10, 10)) %px print(a) dv.gather('a') dv.scatter('xs', range(24)) %px y = [x**2 for x in xs] np.array(dv.gather('y')) """ Explanation: List comprehensions in parallel The scatter method partitions and distributes data to all engines. The gather method does the reverse. Together with %px, we can simulate parallel list comprehensions. End of explanation """ %%px --target [1,3] %matplotlib inline import seaborn as sns x = np.random.normal(np.random.randint(-10, 10), 1, 100) sns.kdeplot(x); """ Explanation: Running magic functions in parallel End of explanation """ %%px --target [1,3] --noblock %matplotlib inline import seaborn as sns x = np.random.normal(np.random.randint(-10, 10), 1, 100) sns.kdeplot(x); %pxresult """ Explanation: Running in non-blocking mode End of explanation """
FlorentSilve/Udacity_ML_nanodegree
projects/customer_segments/customer_segments.ipynb
mit
# Import libraries necessary for this project import numpy as np import pandas as pd from IPython.display import display # Allows the use of display() for DataFrames # Import supplementary visualizations code visuals.py import visuals as vs # Pretty display for notebooks %matplotlib inline # Load the wholesale customers dataset try: data = pd.read_csv("customers.csv") data.drop(['Region', 'Channel'], axis = 1, inplace = True) print "Wholesale customers dataset has {} samples with {} features each.".format(*data.shape) except: print "Dataset could not be loaded. Is the dataset missing?" """ Explanation: Machine Learning Engineer Nanodegree Unsupervised Learning Project: Creating Customer Segments Welcome to the third project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been provided for you, and it will be your job to implement the additional functionality necessary to successfully complete this project. Sections that begin with 'Implementation' in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a 'Question X' header. Carefully read each question and provide thorough answers in the following text boxes that begin with 'Answer:'. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide. Note: Code and Markdown cells can be executed using the Shift + Enter keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode. Getting Started In this project, you will analyze a dataset containing data on various customers' annual spending amounts (reported in monetary units) of diverse product categories for internal structure. One goal of this project is to best describe the variation in the different types of customers that a wholesale distributor interacts with. Doing so would equip the distributor with insight into how to best structure their delivery service to meet the needs of each customer. The dataset for this project can be found on the UCI Machine Learning Repository. For the purposes of this project, the features 'Channel' and 'Region' will be excluded in the analysis — with focus instead on the six product categories recorded for customers. Run the code block below to load the wholesale customers dataset, along with a few of the necessary Python libraries required for this project. You will know the dataset loaded successfully if the size of the dataset is reported. End of explanation """ # Display a description of the dataset display(data.describe()) """ Explanation: Data Exploration In this section, you will begin exploring the data through visualizations and code to understand how each feature is related to the others. You will observe a statistical description of the dataset, consider the relevance of each feature, and select a few sample data points from the dataset which you will track through the course of this project. Run the code block below to observe a statistical description of the dataset. Note that the dataset is composed of six important product categories: 'Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', and 'Delicatessen'. Consider what each category represents in terms of products you could purchase. End of explanation """ # TODO: Select three indices of your choice you wish to sample from the dataset indices = [401, 98, 61] # Create a DataFrame of the chosen samples samples = pd.DataFrame(data.loc[indices], columns = data.keys()).reset_index(drop = True) print "Chosen samples of wholesale customers dataset:" display(samples) """ Explanation: Implementation: Selecting Samples To get a better understanding of the customers and how their data will transform through the analysis, it would be best to select a few sample data points and explore them in more detail. In the code block below, add three indices of your choice to the indices list which will represent the customers to track. It is suggested to try different sets of samples until you obtain customers that vary significantly from one another. End of explanation """ # TODO: Make a copy of the DataFrame, using the 'drop' function to drop the given feature new_data = data.drop('Milk', axis=1) print new_data.head() from sklearn.cross_validation import train_test_split from sklearn.tree import DecisionTreeRegressor # TODO: Split the data into training and testing sets using the given feature as the target X_train, X_test, y_train, y_test = train_test_split(new_data, data['Milk'], test_size=0.25, random_state=0) # TODO: Create a decision tree regressor and fit it to the training set regressor = DecisionTreeRegressor(random_state=0) regressor.fit(X_train, y_train) # TODO: Report the score of the prediction using the testing set score = regressor.score(X_test, y_test) print score var_list=list(data) for var in var_list: new_data = data.drop(var, axis=1) X_train, X_test, y_train, y_test = train_test_split(new_data, data[var], test_size=0.25, random_state=0) regressor = DecisionTreeRegressor(random_state=0) regressor.fit(X_train, y_train) score = regressor.score(X_test, y_test) #print 'R^2 for '+'%s'+score %var print "R^2 for {}: ".format(var)+"{}".format(score) """ Explanation: Question 1 Consider the total purchase cost of each product category and the statistical description of the dataset above for your sample customers. What kind of establishment (customer) could each of the three samples you've chosen represent? Hint: Examples of establishments include places like markets, cafes, and retailers, among many others. Avoid using names for establishments, such as saying "McDonalds" when describing a sample customer as a restaurant. Answer: <br/> 0: This sample has a purchases of Fresh produce, Frozen and Delicatessen in the top quartile. However, puchase of Milk is below average, and purchase of grocery and detergents are in the first quartile. This could indicate that this is a Restaurant. <br/> 1: Sample 1 has purchase quantities for all product types well below the first quartile cut-off. This is therefore a small establishment. The ratios of frozen, grocery and fresh food are relatively high, which could indicate that it is a small cafe for instance <br/> 2: The quantities purchased by Sample 2 are in the top quartile for the all categories apart from Frozen food. It has a relatively diversified basket and could be a retailer / supermarket. Implementation: Feature Relevance One interesting thought to consider is if one (or more) of the six product categories is actually relevant for understanding customer purchasing. That is to say, is it possible to determine whether customers purchasing some amount of one category of products will necessarily purchase some proportional amount of another category of products? We can make this determination quite easily by training a supervised regression learner on a subset of the data with one feature removed, and then score how well that model can predict the removed feature. In the code block below, you will need to implement the following: - Assign new_data a copy of the data by removing a feature of your choice using the DataFrame.drop function. - Use sklearn.cross_validation.train_test_split to split the dataset into training and testing sets. - Use the removed feature as your target label. Set a test_size of 0.25 and set a random_state. - Import a decision tree regressor, set a random_state, and fit the learner to the training data. - Report the prediction score of the testing set using the regressor's score function. End of explanation """ # Produce a scatter matrix for each pair of features in the data pd.scatter_matrix(data, alpha = 0.3, figsize = (14,8), diagonal = 'kde'); """ Explanation: Question 2 Which feature did you attempt to predict? What was the reported prediction score? Is this feature is necessary for identifying customers' spending habits? Hint: The coefficient of determination, R^2, is scored between 0 and 1, with 1 being a perfect fit. A negative R^2 implies the model fails to fit the data. Answer: I initially attempted to predict milk purchase based on purchase of other products. The resulting R^2 was relatively small. I then looked at all other products. The best R^2 is obtained for Detergents_Paper. Thus means this feature may not be necessary to identify customers' spending habits. Visualize Feature Distributions To get a better understanding of the dataset, we can construct a scatter matrix of each of the six product features present in the data. If you found that the feature you attempted to predict above is relevant for identifying a specific customer, then the scatter matrix below may not show any correlation between that feature and the others. Conversely, if you believe that feature is not relevant for identifying a specific customer, the scatter matrix might show a correlation between that feature and another feature in the data. Run the code block below to produce a scatter matrix. End of explanation """ # TODO: Scale the data using the natural logarithm log_data = np.log(data) # TODO: Scale the sample data using the natural logarithm log_samples = np.log(samples) # Produce a scatter matrix for each pair of newly-transformed features pd.scatter_matrix(log_data, alpha = 0.3, figsize = (14,8), diagonal = 'kde'); """ Explanation: Question 3 Are there any pairs of features which exhibit some degree of correlation? Does this confirm or deny your suspicions about the relevance of the feature you attempted to predict? How is the data for those features distributed? Hint: Is the data normally distributed? Where do most of the data points lie? Answer: Detergents_Paper seems to have a strong correlation with Grocery, which is likely driving the large R^2 obtained for both of these product categories. For some other products we also see some correlation, for instance between Milk and Grocety. However the distribution of purchase quantities is narrower. The scatter matrix also confirms that there is no clear correlation between purchase volumes of Delicatessen and other products. The distribution of purchase quantities are heavily skewed towards the right (positively skewed) and are not normal. Data Preprocessing In this section, you will preprocess the data to create a better representation of customers by performing a scaling on the data and detecting (and optionally removing) outliers. Preprocessing data is often times a critical step in assuring that results you obtain from your analysis are significant and meaningful. Implementation: Feature Scaling If data is not normally distributed, especially if the mean and median vary significantly (indicating a large skew), it is most often appropriate to apply a non-linear scaling — particularly for financial data. One way to achieve this scaling is by using a Box-Cox test, which calculates the best power transformation of the data that reduces skewness. A simpler approach which can work in most cases would be applying the natural logarithm. In the code block below, you will need to implement the following: - Assign a copy of the data to log_data after applying logarithmic scaling. Use the np.log function for this. - Assign a copy of the sample data to log_samples after applying logarithmic scaling. Again, use np.log. End of explanation """ # Display the log-transformed sample data display(log_samples) """ Explanation: Observation After applying a natural logarithm scaling to the data, the distribution of each feature should appear much more normal. For any pairs of features you may have identified earlier as being correlated, observe here whether that correlation is still present (and whether it is now stronger or weaker than before). Run the code below to see how the sample data has changed after having the natural logarithm applied to it. End of explanation """ # For each feature find the data points with extreme high or low values potential_outliers=[] for feature in log_data.keys(): # TODO: Calculate Q1 (25th percentile of the data) for the given feature Q1 = np.percentile(log_data[feature], 25) # TODO: Calculate Q3 (75th percentile of the data) for the given feature Q3 = np.percentile(log_data[feature], 75) # TODO: Use the interquartile range to calculate an outlier step (1.5 times the interquartile range) step = 1.5*(Q3-Q1) # Display the outliers print "Data points considered outliers for the feature '{}':".format(feature) display(log_data[~((log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step))]) potential_outliers += (log_data[~((log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step))]).index.tolist() print "Potential outliers: {}: ".format(potential_outliers) # OPTIONAL: Select the indices for data points you wish to remove outliers=[65, 66, 75, 128, 154] #outliers=[] # Remove the outliers, if any were specified good_data = log_data.drop(log_data.index[outliers]).reset_index(drop = True) """ Explanation: Implementation: Outlier Detection Detecting outliers in the data is extremely important in the data preprocessing step of any analysis. The presence of outliers can often skew results which take into consideration these data points. There are many "rules of thumb" for what constitutes an outlier in a dataset. Here, we will use Tukey's Method for identfying outliers: An outlier step is calculated as 1.5 times the interquartile range (IQR). A data point with a feature that is beyond an outlier step outside of the IQR for that feature is considered abnormal. In the code block below, you will need to implement the following: - Assign the value of the 25th percentile for the given feature to Q1. Use np.percentile for this. - Assign the value of the 75th percentile for the given feature to Q3. Again, use np.percentile. - Assign the calculation of an outlier step for the given feature to step. - Optionally remove data points from the dataset by adding indices to the outliers list. NOTE: If you choose to remove any outliers, ensure that the sample data does not contain any of these points! Once you have performed this implementation, the dataset will be stored in the variable good_data. End of explanation """ from sklearn.decomposition import PCA # TODO: Apply PCA by fitting the good data with the same number of dimensions as features pca = PCA(n_components=6) pca.fit(good_data) # TODO: Transform log_samples using the PCA fit above pca_samples = pca.transform(log_samples) # Generate PCA results plot pca_results = vs.pca_results(good_data, pca) print pca_results['Explained Variance'].cumsum() """ Explanation: Question 4 Are there any data points considered outliers for more than one feature based on the definition above? Should these data points be removed from the dataset? If any data points were added to the outliers list to be removed, explain why. Answer: 5 data points can be considered as outliers for more than one feature based on the definition above. These may either be the results of erronous entries or of very peculiar type of shop not representative of the rest of the population. I therefore decided to remove them from the dataset. Feature Transformation In this section you will use principal component analysis (PCA) to draw conclusions about the underlying structure of the wholesale customer data. Since using PCA on a dataset calculates the dimensions which best maximize variance, we will find which compound combinations of features best describe customers. Implementation: PCA Now that the data has been scaled to a more normal distribution and has had any necessary outliers removed, we can now apply PCA to the good_data to discover which dimensions about the data best maximize the variance of features involved. In addition to finding these dimensions, PCA will also report the explained variance ratio of each dimension — how much variance within the data is explained by that dimension alone. Note that a component (dimension) from PCA can be considered a new "feature" of the space, however it is a composition of the original features present in the data. In the code block below, you will need to implement the following: - Import sklearn.decomposition.PCA and assign the results of fitting PCA in six dimensions with good_data to pca. - Apply a PCA transformation of log_samples using pca.transform, and assign the results to pca_samples. End of explanation """ # Display sample log-data after having a PCA transformation applied display(pd.DataFrame(np.round(pca_samples, 4), columns = pca_results.index.values)) """ Explanation: Question 5 How much variance in the data is explained in total by the first and second principal component? What about the first four principal components? Using the visualization provided above, discuss what the first four dimensions best represent in terms of customer spending. Hint: A positive increase in a specific dimension corresponds with an increase of the positive-weighted features and a decrease of the negative-weighted features. The rate of increase or decrease is based on the indivdual feature weights. Answer: 70.68% of the variance in the data is explained in total by the first and second principal component. This percentage increases to 93.11% when including the first four principal components. <br/> The first four dimensions could represent different types of customer spending behaviors. Dimension 1 represents a relatively diversified basket with strong positive correlation (larger weights) with milk, grocery and detergents_paper. This may represent a retail store. Dimension 2 has large positive weights (strong positive correlation) on fresh, frozen and delicatessen and could correspond to a restaurant or food market. Dimension 3 represents outlets purchasing almost exclusively delicatessen and frozen products, and purchase much less fresh produce and to a lesser extent detergents_paper (negative weights/correlations for these products). Dimension 4 has a strong positive correlation with frozen food purchase and a heavy negative one with delicatessen (and to a lesser extent on fresh). This could represent retail outlets. Observation Run the code below to see how the log-transformed sample data has changed after having a PCA transformation applied to it in six dimensions. Observe the numerical value for the first four dimensions of the sample points. Consider if this is consistent with your initial interpretation of the sample points. End of explanation """ # TODO: Apply PCA by fitting the good data with only two dimensions pca = PCA(n_components=2) pca.fit(good_data) # TODO: Transform the good data using the PCA fit above reduced_data = pca.transform(good_data) # TODO: Transform log_samples using the PCA fit above pca_samples = pca.transform(log_samples) # Create a DataFrame for the reduced data reduced_data = pd.DataFrame(reduced_data, columns = ['Dimension 1', 'Dimension 2']) """ Explanation: 0 and 2 to match initial intuition. 0 was identified as a potential restaurant, and indeed has a high value for dimension 2. 2 was potentially identified as a retail store, which matches the large value for dimension 1. New values along these 6 dimensions for 1 are however non trivial to interpret. Implementation: Dimensionality Reduction When using principal component analysis, one of the main goals is to reduce the dimensionality of the data — in effect, reducing the complexity of the problem. Dimensionality reduction comes at a cost: Fewer dimensions used implies less of the total variance in the data is being explained. Because of this, the cumulative explained variance ratio is extremely important for knowing how many dimensions are necessary for the problem. Additionally, if a signifiant amount of variance is explained by only two or three dimensions, the reduced data can be visualized afterwards. In the code block below, you will need to implement the following: - Assign the results of fitting PCA in two dimensions with good_data to pca. - Apply a PCA transformation of good_data using pca.transform, and assign the results to reduced_data. - Apply a PCA transformation of log_samples using pca.transform, and assign the results to pca_samples. End of explanation """ # Display sample log-data after applying PCA transformation in two dimensions display(pd.DataFrame(np.round(pca_samples, 4), columns = ['Dimension 1', 'Dimension 2'])) """ Explanation: Observation Run the code below to see how the log-transformed sample data has changed after having a PCA transformation applied to it using only two dimensions. Observe how the values for the first two dimensions remains unchanged when compared to a PCA transformation in six dimensions. End of explanation """ # Create a biplot vs.biplot(good_data, reduced_data, pca) """ Explanation: Visualizing a Biplot A biplot is a scatterplot where each data point is represented by its scores along the principal components. The axes are the principal components (in this case Dimension 1 and Dimension 2). In addition, the biplot shows the projection of the original features along the components. A biplot can help us interpret the reduced dimensions of the data, and discover relationships between the principal components and original features. Run the code cell below to produce a biplot of the reduced-dimension data. End of explanation """ from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score # TODO: Apply your clustering algorithm of choice to the reduced data clusterer = KMeans(n_clusters=2, random_state=0).fit(reduced_data) # TODO: Predict the cluster for each data point preds = clusterer.predict(reduced_data) # TODO: Find the cluster centers centers = clusterer.cluster_centers_ # TODO: Predict the cluster for each transformed sample data point sample_preds = clusterer.predict(pca_samples) # TODO: Calculate the mean silhouette coefficient for the number of clusters chosen labels = clusterer.labels_ score = silhouette_score(reduced_data, labels) print score """ Explanation: Observation Once we have the original feature projections (in red), it is easier to interpret the relative position of each data point in the scatterplot. For instance, a point the lower right corner of the figure will likely correspond to a customer that spends a lot on 'Milk', 'Grocery' and 'Detergents_Paper', but not so much on the other product categories. From the biplot, which of the original features are most strongly correlated with the first component? What about those that are associated with the second component? Do these observations agree with the pca_results plot you obtained earlier? Clustering In this section, you will choose to use either a K-Means clustering algorithm or a Gaussian Mixture Model clustering algorithm to identify the various customer segments hidden in the data. You will then recover specific data points from the clusters to understand their significance by transforming them back into their original dimension and scale. Question 6 What are the advantages to using a K-Means clustering algorithm? What are the advantages to using a Gaussian Mixture Model clustering algorithm? Given your observations about the wholesale customer data so far, which of the two algorithms will you use and why? Answer: K-means clustering aims to segment the data into a given number of groups such that each point is as close to the center of its group as possible. The objective is to find the right number of groups, and the right centers, so that this is achieved. K-means clustering performs a ‘clear-cut’ allocation, whereby each data point is assigned to a single group. On the contrary, a Gaussian Mixture Model (GMM) clustering algorithm performs a soft assignment, with each point having a probability to belong to each group, and with a possibility to obtain density estimation for each group/cluster. K-means is relatively fast to train and easy to visualize in 2D or 3D. It however suffers from the curse of dimensionality. <br/> In our case, we want to separate customers in customer groups that correspond to different types of establishment, and therefore would not benefit from a soft assignment. The dimensionality is also small and therefore a k-means clustering should perform well. Implementation: Creating Clusters Depending on the problem, the number of clusters that you expect to be in the data may already be known. When the number of clusters is not known a priori, there is no guarantee that a given number of clusters best segments the data, since it is unclear what structure exists in the data — if any. However, we can quantify the "goodness" of a clustering by calculating each data point's silhouette coefficient. The silhouette coefficient for a data point measures how similar it is to its assigned cluster from -1 (dissimilar) to 1 (similar). Calculating the mean silhouette coefficient provides for a simple scoring method of a given clustering. In the code block below, you will need to implement the following: - Fit a clustering algorithm to the reduced_data and assign it to clusterer. - Predict the cluster for each data point in reduced_data using clusterer.predict and assign them to preds. - Find the cluster centers using the algorithm's respective attribute and assign them to centers. - Predict the cluster for each sample data point in pca_samples and assign them sample_preds. - Import sklearn.metrics.silhouette_score and calculate the silhouette score of reduced_data against preds. - Assign the silhouette score to score and print the result. End of explanation """ # Display the results of the clustering from implementation vs.cluster_results(reduced_data, preds, centers, pca_samples) """ Explanation: Question 7 Report the silhouette score for several cluster numbers you tried. Of these, which number of clusters has the best silhouette score? Answer: The silouhette scores for 2, 3, 4, 5 and 6 clusters are respectively equal to 0.43, 0.40, 0.33, 0.35, and 0.37. The best silouhette score is achieved for 2 clusters. Cluster Visualization Once you've chosen the optimal number of clusters for your clustering algorithm using the scoring metric above, you can now visualize the results by executing the code block below. Note that, for experimentation purposes, you are welcome to adjust the number of clusters for your clustering algorithm to see various visualizations. The final visualization provided should, however, correspond with the optimal number of clusters. End of explanation """ # TODO: Inverse transform the centers log_centers = pca.inverse_transform(centers) # TODO: Exponentiate the centers true_centers = np.exp(log_centers) # Display the true centers segments = ['Segment {}'.format(i) for i in range(0,len(centers))] true_centers = pd.DataFrame(np.round(true_centers), columns = data.keys()) true_centers.index = segments display(true_centers) true_centers = true_centers.append(data.describe().ix['50%']) true_centers.plot(kind = 'bar', figsize = (16, 4)) """ Explanation: Implementation: Data Recovery Each cluster present in the visualization above has a central point. These centers (or means) are not specifically data points from the data, but rather the averages of all the data points predicted in the respective clusters. For the problem of creating customer segments, a cluster's center point corresponds to the average customer of that segment. Since the data is currently reduced in dimension and scaled by a logarithm, we can recover the representative customer spending from these data points by applying the inverse transformations. In the code block below, you will need to implement the following: - Apply the inverse transform to centers using pca.inverse_transform and assign the new centers to log_centers. - Apply the inverse function of np.log to log_centers using np.exp and assign the true centers to true_centers. End of explanation """ # Display the predictions for i, pred in enumerate(sample_preds): print "Sample point", i, "predicted to be in Cluster", pred """ Explanation: Question 8 Consider the total purchase cost of each product category for the representative data points above, and reference the statistical description of the dataset at the beginning of this project. What set of establishments could each of the customer segments represent? Hint: A customer who is assigned to 'Cluster X' should best identify with the establishments represented by the feature set of 'Segment X'. Answer: The representative data point for Segment 0 corresponds to purchases of fresh food around the average, of milk, grocery, detergents and delicatessen just above the first quartile threashold. Purchases of frozen food is however above average (3rd quartile). <br> The representative data point for Segment 1 corresponds to puchases of milk, grocery and detergents well above the respective sample averages (values in top quartile of the distributions). Purchase values for delicatessen is around the sample average, while the ones for fresh and frozen fall below average (both in 2nd quartile). <br> Segment 0 could represent retaurants whereas Segment 1 could represent the customer segment of retailers. Question 9 For each sample point, which customer segment from Question 8 best represents it? Are the predictions for each sample point consistent with this? Run the code block below to find which cluster each sample point is predicted to be. End of explanation """ # Display the results of the clustering from implementation vs.cluster_results(reduced_data, preds, centers, pca_samples) # Display the clustering results based on 'Channel' data vs.channel_results(reduced_data, outliers, pca_samples) """ Explanation: Answer: We initially noted the following prior to doing any analysis: <br/> Sample point 0: This sample has a purchases of Fresh produce, Frozen and Delicatessen in the top quartile. However, puchase of Milk is below average, and purchase of grocery and detergents are in the first quartile. This could indicate that this is a Restaurant. <br/> Sample point 1: Sample 1 has purchase quantities for all product types well below the first quartile cut-off. This is therefore a small establishment. The ratios of frozen, grocery and fresh food are relatively high, which could indicate that it is a small cafe for instance <br/> Sample point 2: The quantities purchased by Sample 2 are in the top quartile for the all categories apart from Frozen food. It has a relatively diversified basket and could be a retailer / supermarket.<br/> The predictions for 0 and 2 are consistent with this. For instance, we can note that Cluster 0 has purchases of milk, grocery and detergents just above the first quartile threshold, while these values are below average (milk) and in the first quartile (grocery and detergents) for Sample Point 1. Sample Point 2, similar to the representative data point for segment 1 has purchases for milk, grocery and detergents in the top quartiles. Prediction for 1 are less straightforward to comment on, since the absolute values are very small. The metrics of interest are more the ratios as opposed to the absolute purchases. With ratios of frozen, grocery and fresh it is consistent that sample point 1 be associated with cluster/segment 0. Conclusion In this final section, you will investigate ways that you can make use of the clustered data. First, you will consider how the different groups of customers, the customer segments, may be affected differently by a specific delivery scheme. Next, you will consider how giving a label to each customer (which segment that customer belongs to) can provide for additional features about the customer data. Finally, you will compare the customer segments to a hidden variable present in the data, to see whether the clustering identified certain relationships. Question 10 Companies will often run A/B tests when making small changes to their products or services to determine whether making that change will affect its customers positively or negatively. The wholesale distributor is considering changing its delivery service from currently 5 days a week to 3 days a week. However, the distributor will only make this change in delivery service for customers that react positively. How can the wholesale distributor use the customer segments to determine which customers, if any, would react positively to the change in delivery service? Hint: Can we assume the change affects all customers equally? How can we determine which group of customers it affects the most? Answer: For each customer segment, the distributor can change its delivery service from 5 to 3 days for a sub-sample of customers. In effect, the customer segment is used to stratify the sample prior to A/B testing. One can then estimate the impact of the change for each group separately and determine in particular which group of customers is the most affected (based on changes in purchase behavior). Intuitively, given that their purchase volume across the different categories of products are different, we woul expect the effect to be different between the two groups. For instance, reducing delivery frequency is not likely to affect purchase for non-perishable products, but could affect purchases for milk, fresh produce for instance. Question 11 Additional structure is derived from originally unlabeled data when using clustering techniques. Since each customer has a customer segment it best identifies with (depending on the clustering algorithm applied), we can consider 'customer segment' as an engineered feature for the data. Assume the wholesale distributor recently acquired ten new customers and each provided estimates for anticipated annual spending of each product category. Knowing these estimates, the wholesale distributor wants to classify each new customer to a customer segment to determine the most appropriate delivery service. How can the wholesale distributor label the new customers using only their estimated product spending and the customer segment data? Hint: A supervised learner could be used to train on the original customers. What would be the target variable? Answer: The wholesale distributor could train its clustering based on actual purchase of other customers and estimate the segment for each new client using their estimated spending as opposed to actual purchase. Visualizing Underlying Distributions At the beginning of this project, it was discussed that the 'Channel' and 'Region' features would be excluded from the dataset so that the customer product categories were emphasized in the analysis. By reintroducing the 'Channel' feature to the dataset, an interesting structure emerges when considering the same PCA dimensionality reduction applied earlier to the original dataset. Run the code block below to see how each data point is labeled either 'HoReCa' (Hotel/Restaurant/Cafe) or 'Retail' the reduced space. In addition, you will find the sample points are circled in the plot, which will identify their labeling. End of explanation """
uber/pyro
tutorial/source/contrib_funsor_intro_i.ipynb
apache-2.0
from collections import OrderedDict import torch import funsor from pyro import set_rng_seed as pyro_set_rng_seed funsor.set_backend("torch") torch.set_default_dtype(torch.float32) pyro_set_rng_seed(101) """ Explanation: pyro.contrib.funsor, a new backend for Pyro - New primitives (Part 1) Introduction In this tutorial we'll cover the basics of pyro.contrib.funsor, a new backend for the Pyro probabilistic programming system that is intended to replace the current internals of Pyro and significantly expand its capabilities as both a modelling tool and an inference research platform. This tutorial is aimed at readers interested in developing custom inference algorithms and understanding Pyro's current and future internals. As such, the material here assumes some familiarity with the generic Pyro API package pyroapi and with Funsor. Additional documentation for Funsor can be found on the Pyro website, on GitHub, and in the research paper "Functional Tensors for Probabilistic Programming." Those who are less interested in such details should find that they can already use the general-purpose algorithms in contrib.funsor with their existing Pyro models via pyroapi. Reinterpreting existing Pyro models with pyroapi The new backend uses the pyroapi package to integrate with existing Pyro code. First, we import some dependencies: End of explanation """ import pyro.contrib.funsor import pyroapi from pyroapi import handlers, infer, ops, optim, pyro from pyroapi import distributions as dist # this is already done in pyro.contrib.funsor, but we repeat it here pyroapi.register_backend("contrib.funsor", dict( distributions="pyro.distributions", handlers="pyro.contrib.funsor.handlers", infer="pyro.contrib.funsor.infer", ops="torch", optim="pyro.optim", pyro="pyro.contrib.funsor", )) """ Explanation: Importing pyro.contrib.funsor registers the "contrib.funsor" backend with pyroapi, which can now be passed as an argument to the pyroapi.pyro_backend context manager. End of explanation """ funsor_one = funsor.to_funsor(float(1)) print(funsor_one, type(funsor_one)) funsor_two = funsor.to_funsor(torch.tensor(2.)) print(funsor_two, type(funsor_two)) """ Explanation: And we're off! From here on, any pyro.(...) statement should be understood as dispatching to the new backend. Two new primitives: to_funsor and to_data The first and most important new concept in pyro.contrib.funsor is the new pair of primitives pyro.to_funsor and pyro.to_data. These are effectful versions of funsor.to_funsor and funsor.to_data, i.e. versions whose behavior can be intercepted, controlled, or used to trigger side effects by Pyro's library of algebraic effect handlers. Let's briefly review these two underlying functions before diving into the effectful versions in pyro.contrib.funsor. As one might expect from the name, to_funsor takes as inputs objects that are not funsor.Funsors and attempts to convert them into Funsor terms. For example, calling funsor.to_funsor on a Python number converts it to a funsor.terms.Number object: End of explanation """ data_one = funsor.to_data(funsor.terms.Number(float(1), 'real')) print(data_one, type(data_one)) data_two = funsor.to_data(funsor.Tensor(torch.tensor(2.), OrderedDict(), 'real')) print(data_two, type(data_two)) """ Explanation: Similarly ,calling funsor.to_data on an atomic funsor.Funsor converts it to a regular Python object like a float or a torch.Tensor: End of explanation """ var_x = funsor.to_funsor("x", output=funsor.Reals[2]) print(var_x, var_x.inputs, var_x.output) """ Explanation: In many cases it is necessary to provide an output type to uniquely convert a piece of data to a funsor.Funsor. This also means that, strictly speaking, funsor.to_funsor and funsor.to_data are not inverses. For example, funsor.to_funsor will automatically convert Python strings to funsor.Variables, but only when given an output funsor.domains.Domain, which serves as the type of the variable: End of explanation """ ambiguous_tensor = torch.zeros((3, 1, 2)) print("Ambiguous tensor: shape = {}".format(ambiguous_tensor.shape)) # case 1: treat all dimensions as output/event dimensions funsor1 = funsor.to_funsor(ambiguous_tensor, output=funsor.Reals[3, 1, 2]) print("Case 1: inputs = {}, output = {}".format(funsor1.inputs, funsor1.output)) # case 2: treat the leftmost dimension as a batch dimension # note that dimension -1 in dim_to_name here refers to the rightmost *batch dimension*, # i.e. dimension -3 of ambiguous_tensor, the rightmost dimension not included in the output shape. funsor2 = funsor.to_funsor(ambiguous_tensor, output=funsor.Reals[1, 2], dim_to_name={-1: "a"}) print("Case 2: inputs = {}, output = {}".format(funsor2.inputs, funsor2.output)) # case 3: treat the leftmost 2 dimensions as batch dimensions; empty batch dimensions are ignored # note that dimensions -1 and -2 in dim_to_name here refer to the rightmost *batch dimensions*, # i.e. dimensions -2 and -3 of ambiguous_tensor, the rightmost dimensions not included in the output shape. funsor3 = funsor.to_funsor(ambiguous_tensor, output=funsor.Reals[2], dim_to_name={-1: "b", -2: "a"}) print("Case 3: inputs = {}, output = {}".format(funsor3.inputs, funsor3.output)) # case 4: treat all dimensions as batch dimensions; empty batch dimensions are ignored # note that dimensions -1, -2 and -3 in dim_to_name here refer to the rightmost *batch dimensions*, # i.e. dimensions -1, -2 and -3 of ambiguous_tensor, the rightmost dimensions not included in the output shape. funsor4 = funsor.to_funsor(ambiguous_tensor, output=funsor.Real, dim_to_name={-1: "c", -2: "b", -3: "a"}) print("Case 4: inputs = {}, output = {}".format(funsor4.inputs, funsor4.output)) """ Explanation: However, it is often impossible to convert objects to and from Funsor expressions uniquely without additional type information about inputs, as in the following example of a torch.Tensor, which could be converted to a funsor.Tensor in several ways. To resolve this ambiguity, we need to provide to_funsor and to_data with type information that describes how to convert positional dimensions to and from unordered named Funsor dimensions. This information comes in the form of dictionaries mapping batch dimensions to dimension names or vice versa. A key property of these mappings is that use the convention that dimension indices refer to batch dimensions, or dimensions not included in the output shape, which is treated as referring to the rightmost portion of the underlying PyTorch tensor shape, as illustrated in the example below. End of explanation """ ambiguous_funsor = funsor.Tensor(torch.zeros((3, 2)), OrderedDict(a=funsor.Bint[3], b=funsor.Bint[2]), 'real') print("Ambiguous funsor: inputs = {}, shape = {}".format(ambiguous_funsor.inputs, ambiguous_funsor.output)) # case 1: the simplest version tensor1 = funsor.to_data(ambiguous_funsor, name_to_dim={"a": -2, "b": -1}) print("Case 1: shape = {}".format(tensor1.shape)) # case 2: an empty dimension between a and b tensor2 = funsor.to_data(ambiguous_funsor, name_to_dim={"a": -3, "b": -1}) print("Case 2: shape = {}".format(tensor2.shape)) # case 3: permuting the input dimensions tensor3 = funsor.to_data(ambiguous_funsor, name_to_dim={"a": -1, "b": -2}) print("Case 3: shape = {}".format(tensor3.shape)) """ Explanation: Similar ambiguity exists for to_data: the inputs of a funsor.Funsor are ordered arbitrarily, and empty dimensions in the data are squeezed away, so a mapping from names to batch dimensions must be provided to ensure unique conversion: End of explanation """ name_to_dim = OrderedDict() funsor_x = funsor.Tensor(torch.ones((2,)), OrderedDict(x=funsor.Bint[2]), 'real') name_to_dim.update({"x": -1}) tensor_x = funsor.to_data(funsor_x, name_to_dim=name_to_dim) print(name_to_dim, funsor_x.inputs, tensor_x.shape) funsor_y = funsor.Tensor(torch.ones((3, 2)), OrderedDict(y=funsor.Bint[3], x=funsor.Bint[2]), 'real') name_to_dim.update({"y": -2}) tensor_y = funsor.to_data(funsor_y, name_to_dim=name_to_dim) print(name_to_dim, funsor_y.inputs, tensor_y.shape) funsor_z = funsor.Tensor(torch.ones((2, 3)), OrderedDict(z=funsor.Bint[2], y=funsor.Bint[3]), 'real') name_to_dim.update({"z": -3}) tensor_z = funsor.to_data(funsor_z, name_to_dim=name_to_dim) print(name_to_dim, funsor_z.inputs, tensor_z.shape) """ Explanation: Maintaining and updating this information efficiently becomes tedious and error-prone as the number of conversions increases. Fortunately, it can be automated away completely. Consider the following example: End of explanation """ with pyroapi.pyro_backend("contrib.funsor"), handlers.named(): funsor_x = funsor.Tensor(torch.ones((2,)), OrderedDict(x=funsor.Bint[2]), 'real') tensor_x = pyro.to_data(funsor_x) print(funsor_x.inputs, tensor_x.shape) funsor_y = funsor.Tensor(torch.ones((3, 2)), OrderedDict(y=funsor.Bint[3], x=funsor.Bint[2]), 'real') tensor_y = pyro.to_data(funsor_y) print(funsor_y.inputs, tensor_y.shape) funsor_z = funsor.Tensor(torch.ones((2, 3)), OrderedDict(z=funsor.Bint[2], y=funsor.Bint[3]), 'real') tensor_z = pyro.to_data(funsor_z) print(funsor_z.inputs, tensor_z.shape) """ Explanation: This is exactly the functionality provided by pyro.to_funsor and pyro.to_data, as we can see by using them in the previous example and removing the manual updates. We must also wrap the function in a handlers.named effect handler to ensure that the dimension dictionaries do not persist beyond the function body. End of explanation """ with pyroapi.pyro_backend("contrib.funsor"), handlers.named(): probs = funsor.Tensor(torch.tensor([0.5, 0.4, 0.7]), OrderedDict(batch=funsor.Bint[3])) print(type(probs), probs.inputs, probs.output) x = funsor.Tensor(torch.tensor([0., 1., 0., 1.]), OrderedDict(x=funsor.Bint[4])) print(type(x), x.inputs, x.output) dx = dist.Bernoulli(pyro.to_data(probs)) print(type(dx), dx.shape()) px = pyro.to_funsor(dx.log_prob(pyro.to_data(x)), output=funsor.Real) print(type(px), px.inputs, px.output) """ Explanation: Critically, pyro.to_funsor and pyro.to_data use and update the same bidirectional mapping between names and dimensions, allowing them to be combined intuitively. A typical usage pattern, and one that pyro.contrib.funsor uses heavily in its inference algorithm implementations, is to create a funsor.Funsor term directly with a new named dimension and call pyro.to_data on it, perform some PyTorch computations, and call pyro.to_funsor on the result: End of explanation """ with pyroapi.pyro_backend("contrib.funsor"), handlers.named(): x = pyro.to_funsor(torch.tensor([0., 1.]), funsor.Real, dim_to_name={-1: "x"}) print("x: ", type(x), x.inputs, x.output) px = pyro.to_funsor(torch.ones(2, 3), funsor.Real, dim_to_name={-2: "x", -1: "y"}) print("px: ", type(px), px.inputs, px.output) """ Explanation: pyro.to_funsor and pyro.to_data treat the keys in their name-to-dim mappings as references to the input's batch shape, but treats the values as references to the globally consistent name-dim mapping. This may be useful for complicated computations that involve a mixture of PyTorch and Funsor operations. End of explanation """ with pyroapi.pyro_backend("contrib.funsor"), handlers.named(): for i in pyro.markov(range(10)): x = pyro.to_data(funsor.Tensor(torch.tensor([0., 1.]), OrderedDict({"x{}".format(i): funsor.Bint[2]}))) print("Shape of x[{}]: ".format(str(i)), x.shape) """ Explanation: Dealing with large numbers of variables: (re-)introducing pyro.markov So far, so good. However, what if the number of different named dimensions continues to increase? We face two problems: first, reusing the fixed number of available positional dimensions (25 in PyTorch), and second, computing shape information with time complexity that is independent of the number of variables. A fully general automated solution to this problem would require deeper integration with Python or PyTorch. Instead, as an intermediate solution, we introduce the second key concept in pyro.contrib.funsor: the pyro.markov annotation, a way to indicate the shelf life of certain variables. pyro.markov is already part of Pyro (see enumeration tutorial) but the implementation in pyro.contrib.funsor is fresh. The primary constraint on the design of pyro.markov is backwards compatibility: in order for pyro.contrib.funsor to be compatible with the large range of existing Pyro models, the new implementation had to match the shape semantics of Pyro's existing enumeration machinery as closely as possible. End of explanation """ with pyroapi.pyro_backend("contrib.funsor"), handlers.named(): for i in pyro.markov(range(10), history=2): x = pyro.to_data(funsor.Tensor(torch.tensor([0., 1.]), OrderedDict({"x{}".format(i): funsor.Bint[2]}))) print("Shape of x[{}]: ".format(str(i)), x.shape) """ Explanation: pyro.markov is a versatile piece of syntax that can be used as a context manager, a decorator, or an iterator. It is important to understand that pyro.markov's only functionality at present is tracking variable usage, not directly indicating conditional independence properties to inference algorithms, and as such it is only necessary to add enough annotations to ensure that tensors have correct shapes, rather than attempting to manually encode as much dependency information as possible. pyro.markov takes an additional argument history that determines the number of previous pyro.markov contexts to take into account when building the mapping between names and dimensions at a given pyro.to_funsor/pyro.to_data call. End of explanation """ from pyro.contrib.funsor.handlers.runtime import _DIM_STACK, DimType with pyroapi.pyro_backend("contrib.funsor"), handlers.named(): funsor_particle_ids = funsor.Tensor(torch.arange(10), OrderedDict(n=funsor.Bint[10])) tensor_particle_ids = pyro.to_data(funsor_particle_ids, dim_type=DimType.GLOBAL) print("New global dimension: ", funsor_particle_ids.inputs, tensor_particle_ids.shape) """ Explanation: Use cases beyond enumeration: global and visible dimensions Global dimensions It is sometimes useful to have dimensions and variables ignore the pyro.markov structure of a program and remain active in arbitrarily deeply nested markov and named contexts. For example, suppose we wanted to draw a batch of samples from a Pyro model's joint distribution. To accomplish this we indicate to pyro.to_data that a dimension should be treated as "global" (DimType.GLOBAL) via the dim_type keyword argument. End of explanation """ from pyro.contrib.funsor.handlers.runtime import _DIM_STACK, DimType with pyroapi.pyro_backend("contrib.funsor"), handlers.named(): funsor_plate1_ids = funsor.Tensor(torch.arange(10), OrderedDict(plate1=funsor.Bint[10])) tensor_plate1_ids = pyro.to_data(funsor_plate1_ids, dim_type=DimType.GLOBAL) print("New global dimension: ", funsor_plate1_ids.inputs, tensor_plate1_ids.shape) funsor_plate2_ids = funsor.Tensor(torch.arange(9), OrderedDict(plate2=funsor.Bint[9])) tensor_plate2_ids = pyro.to_data(funsor_plate2_ids, dim_type=DimType.GLOBAL) print("Another new global dimension: ", funsor_plate2_ids.inputs, tensor_plate2_ids.shape) del _DIM_STACK.global_frame["plate1"] funsor_plate3_ids = funsor.Tensor(torch.arange(10), OrderedDict(plate3=funsor.Bint[10])) tensor_plate3_ids = pyro.to_data(funsor_plate1_ids, dim_type=DimType.GLOBAL) print("A third new global dimension after recycling: ", funsor_plate3_ids.inputs, tensor_plate3_ids.shape) """ Explanation: pyro.markov does the hard work of automatically managing local dimensions, but because global dimensions ignore this structure, they must be deallocated manually or they will persist until the last active effect handler exits, just as global variables in Python persist until a program execution finishes. End of explanation """ prev_first_available_dim = _DIM_STACK.set_first_available_dim(-2) with pyroapi.pyro_backend("contrib.funsor"), handlers.named(): funsor_local_ids = funsor.Tensor(torch.arange(9), OrderedDict(k=funsor.Bint[9])) tensor_local_ids = pyro.to_data(funsor_local_ids, dim_type=DimType.LOCAL) print("Tensor with new local dimension: ", funsor_local_ids.inputs, tensor_local_ids.shape) funsor_global_ids = funsor.Tensor(torch.arange(10), OrderedDict(n=funsor.Bint[10])) tensor_global_ids = pyro.to_data(funsor_global_ids, dim_type=DimType.GLOBAL) print("Tensor with new global dimension: ", funsor_global_ids.inputs, tensor_global_ids.shape) funsor_data_ids = funsor.Tensor(torch.arange(11), OrderedDict(m=funsor.Bint[11])) tensor_data_ids = pyro.to_data(funsor_data_ids, dim_type=DimType.VISIBLE) print("Tensor with new visible dimension: ", funsor_data_ids.inputs, tensor_data_ids.shape) # we also need to reset the first_available_dim after we're done _DIM_STACK.set_first_available_dim(prev_first_available_dim) """ Explanation: Performing this deallocation directly is often unnecessary, and we include this interaction primarily to illuminate the internals of pyro.contrib.funsor. Instead, effect handlers that introduce global dimensions, like pyro.plate, may inherit from the GlobalNamedMessenger effect handler which deallocates global dimensions generically upon entry and exit. We will see an example of this in the next tutorial. Visible dimensions We might also wish to preserve the meaning of the shape of a tensor of data. For this we indicate to pyro.to_data that a dimension should be treated as not merely global but "visible" (DimTypes.VISIBLE). By default, the 4 rightmost batch dimensions are reserved as "visible" dimensions, but this can be changed by setting the first_available_dim attribute of the global state object _DIM_STACK. Users who have come across pyro.infer.TraceEnum_ELBO's max_plate_nesting argument are already familiar with this distinction. End of explanation """
ucsdlib/python-novice-inflammation
3-lists.ipynb
cc0-1.0
odds = [1,3,5,7] print('odds are:',odds) print('first and last:', odds[0], odds[-1]) for number in odds: print(number) """ Explanation: for loop is a way to do many operations list a way to store many values Unlike numpy, lits are built into the language so we don't need to load [] creates a list End of explanation """ names = ['Newton', 'Darwing', 'Turing'] #typo in Darwins name print('names is orignally: ', names) names[1] = 'Darwin' print('final value of names:', names) """ Explanation: mutable and immutable objects diff in strings and lists: we can change vales in a list, not string cannot in char string End of explanation """ name = 'Bell' name[0] = 'b' """ Explanation: works but how about with a string End of explanation """ #nested lists x = [['pepper', 'zucchini', 'onion'], ['cabbage', 'lettuce', 'garlic'], ['apple', 'pear', 'banana']] """ Explanation: mutable - data which can be changed in place immutable - data which cannot be changed in place strings and numbers are immutable, can only overwrite with new vals lists and arrays are mutable: append, reorder, change elements be careful - modifying data in place if 2 vars refer to same list and you modify list value, it will change both variables! make copies to avoid End of explanation """ print([x[0]]) print(x[0]) print(x[0][1]) """ Explanation: visual representation of indexing nested lists End of explanation """ odds.append(11) print('odds after adding a value: ', odds) del odds[0] print('odds after removing the first element:', odds) odds.reverse() print('odds after reversing:', odds) """ Explanation: Many ways to change the contents of lists besides adding values End of explanation """ whos odds = [1,3,5,7] primes = odds primes += [2] print('primes:', primes) print('odds:', odds) """ Explanation: While modifying in places - good to remember python treats list in a counterintuitive way if we make a list and copy it, then modify in place, trouble ensues End of explanation """ odds = [1,3,5,7] primes = list(odds) primes += [2] print('primes:', primes) print('odds:', odds) """ Explanation: they point to same list! Why? * python stores a list in memory and then can use multiple names to refer to the same list! * if we want a copy instead we can use the list function so we don't do this End of explanation """ my_list = [] for char in "hello": my_list.append(char) print(my_list) """ Explanation: List challenge: Use a for-loop to convert the string “hello” into a list of letters: python ["h", "e", "l", "l", "o"] Hint: You can create an empty list like this: my_list = [] End of explanation """
tpin3694/tpin3694.github.io
machine-learning/save_images.ipynb
mit
# Load library import cv2 import numpy as np from matplotlib import pyplot as plt """ Explanation: Title: Save Images Slug: save_images Summary: How to save images using OpenCV in Python. Date: 2017-09-11 12:00 Category: Machine Learning Tags: Preprocessing Images Authors: Chris Albon Preliminaries End of explanation """ # Load image as grayscale image = cv2.imread('images/plane.jpg', cv2.IMREAD_GRAYSCALE) # Show image plt.imshow(image, cmap='gray'), plt.axis("off") plt.show() """ Explanation: Load Image As Greyscale End of explanation """ # Save image cv2.imwrite('images/plane_new.jpg', image) """ Explanation: Save Image End of explanation """
DS-100/sp17-materials
sp17/hw/hw1/hw1.ipynb
gpl-3.0
!pip install -U okpy """ Explanation: Homework 1: Setup and (Re-)Introduction to Python Course Policies Here are some important course policies. These are also located at http://www.ds100.org/sp17/. Tentative Grading There will be 7 challenging homework assignments. Homeworks must be completed individually and will mix programming and short answer questions. At the end of each week of instruction we will have an online multiple choice quiz ("vitamin") that will help you stay up-to-date with lecture materials. Labs assignments will be graded for completion and are intended to help with the homework assignments. 40% Homeworks 13% Vitamins 7% Labs 15% Midterm 25% Final Collaboration Policy Data science is a collaborative activity. While you may talk with others about the homework, we ask that you write your solutions individually. If you do discuss the assignments with others please include their names at the top of your solution. Keep in mind that content from the homework and vitamins will likely be covered on both the midterm and final. This assignment In this assignment, you'll learn (or review): How to set up Jupyter on your own computer. How to check out and submit assignments for this class. Python basics, like defining functions. How to use the numpy library to compute with arrays of numbers. 1. Setup If you haven't already, read through the instructions at http://www.ds100.org/spring-2017/setup. The instructions for submission are at the end of this notebook. First, let's make sure you have the latest version of okpy. End of explanation """ import math import numpy as np import matplotlib %matplotlib inline import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') from datascience import * from client.api.notebook import Notebook ok = Notebook('hw1.ok') """ Explanation: If you've set up your environment properly, this cell should run without problems: End of explanation """ ok.auth(inline=True) """ Explanation: Now, run this cell to log into OkPy. This is the submission system for the class; you will use this website to confirm that you've submitted your assignment. End of explanation """ 2 + 2 # This is a comment. # In Python, the ** operator performs exponentiation. math.e**(-2) print("Hello" + ",", "world!") "Hello, cell output!" def add2(x): """This docstring explains what this function does: it adds 2 to a number.""" return x + 2 def makeAdder(amount): """Make a function that adds the given amount to a number.""" def addAmount(x): return x + amount return addAmount add3 = makeAdder(3) add3(4) # add4 is very similar to add2, but it's been created using a lambda expression. add4 = lambda x: x + 4 add4(5) sameAsMakeAdder = lambda amount: lambda x: x + amount add5 = sameAsMakeAdder(5) add5(6) def fib(n): if n <= 1: return 1 # Functions can call themselves recursively. return fib(n-1) + fib(n-2) fib(4) # A for loop repeats a block of code once for each # element in a given collection. for i in range(5): if i % 2 == 0: print(2**i) else: print("Odd power of 2") # A list comprehension is a convenient way to apply a function # to each element in a given collection. # The String method join appends together all its arguments # separated by the given string. So we append each element produced # by the list comprehension, each separated by a newline ("\n"). print("\n".join([str(2**i) if i % 2 == 0 else "Odd power of 2" for i in range(5)])) """ Explanation: 2. Python Python is the main programming language we'll use in this course. We assume you have some experience with Python or can learn it yourself, but here is a brief review. Below are some simple Python code fragments. You should feel confident explaining what each fragment is doing. If not, please brush up on your Python. There a number of tutorials online (search for "Python tutorial"). https://docs.python.org/3/tutorial/ is a good place to start. End of explanation """ def nums_reversed(n): ... _ = ok.grade('q01a') _ = ok.backup() """ Explanation: Question 1 Question 1a Write a function nums_reversed that takes in an integer n and returns a string containing the numbers 1 through n including n in reverse order, separated by spaces. For example: &gt;&gt;&gt; nums_reversed(5) '5 4 3 2 1' Note: The ellipsis (...) indicates something you should fill in. It doesn't necessarily imply you should replace it with only one line of code. End of explanation """ def string_splosion(string): ... _ = ok.grade('q01b') _ = ok.backup() """ Explanation: Question 1b Write a function string_splosion that takes in a non-empty string like "Code" and returns a long string containing every prefix of the input. For example: &gt;&gt;&gt; string_splosion('Code') 'CCoCodCode' &gt;&gt;&gt; string_splosion('data!') 'ddadatdatadata!' &gt;&gt;&gt; string_splosion('hi') 'hhi' End of explanation """ def double100(nums): ... _ = ok.grade('q01c') _ = ok.backup() """ Explanation: Question 1c Write a function double100 that takes in a list of integers and returns True only if the list has two 100s next to each other. &gt;&gt;&gt; double100([100, 2, 3, 100]) False &gt;&gt;&gt; double100([2, 3, 100, 100, 5]) True End of explanation """ def median(number_list): ... _ = ok.grade('q01d') _ = ok.backup() """ Explanation: Question 1d Write a function median that takes in a list of numbers and returns the median element of the list. If the list has even length, it returns the mean of the two elements in the middle. &gt;&gt;&gt; median([5, 4, 3, 2, 1]) 3 &gt;&gt;&gt; median([ 40, 30, 10, 20 ]) 25 End of explanation """ array1 = np.array([2, 3, 4, 5]) array2 = np.arange(4) array1, array2 """ Explanation: 3. NumPy The NumPy library lets us do fast, simple computing with numbers in Python. 3.1. Arrays The basic NumPy data type is the array, a homogeneously-typed sequential collection (a list of things that all have the same type). Arrays will most often contain strings, numbers, or other arrays. Let's create some arrays: End of explanation """ array1 * 2 array1 * array2 array1 ** array2 """ Explanation: Math operations on arrays happen element-wise. Here's what we mean: End of explanation """ np.arange? """ Explanation: This is not only very convenient (fewer for loops!) but also fast. NumPy is designed to run operations on arrays much faster than equivalent Python code on lists. Data science sometimes involves working with large datasets where speed is important - even the constant factors! Jupyter pro-tip: Pull up the docs for any function in Jupyter by running a cell with the function name and a ? at the end: End of explanation """ np.linspace """ Explanation: Another Jupyter pro-tip: Pull up the docs for any function in Jupyter by typing the function name, then &lt;Shift&gt;-&lt;Tab&gt; on your keyboard. Super convenient when you forget the order of the arguments to a function. You can press &lt;Tab&gt; multiple tabs to expand the docs. Try it on the function below: End of explanation """ xs = ... ys = ... _ = ok.grade('q02') _ = ok.backup() """ Explanation: Question 2 Using the np.linspace function, create an array called xs that contains 100 evenly spaced points between 0 and 2 * np.pi. Then, create an array called ys that contains the value of $ \sin{x} $ at each of those 100 points. Hint: Use the np.sin function. You should be able to define each variable with one line of code.) End of explanation """ plt.plot(xs, ys) """ Explanation: The plt.plot function from another library called matplotlib lets us make plots. It takes in an array of x-values and a corresponding array of y-values. It makes a scatter plot of the (x, y) pairs and connects points with line segments. If you give it enough points, it will appear to create a smooth curve. Let's plot the points you calculated in the previous question: End of explanation """ # Try plotting cos here. """ Explanation: This is a useful recipe for plotting any function: 1. Use linspace or arange to make a range of x-values. 2. Apply the function to each point to produce y-values. 3. Plot the points. You might remember from calculus that the derivative of the sin function is the cos function. That means that the slope of the curve you plotted above at any point xs[i] is given by cos(xs[i]). You can try verifying this by plotting cos in the next cell. End of explanation """ def derivative(xvals, yvals): ... slopes = ... slopes[:5] _ = ok.grade('q03') _ = ok.backup() """ Explanation: Calculating derivatives is an important operation in data science, but it can be difficult. We can have computers do it for us using a simple idea called numerical differentiation. Consider the ith point (xs[i], ys[i]). The slope of sin at xs[i] is roughly the slope of the line connecting (xs[i], ys[i]) to the nearby point (xs[i+1], ys[i+1]). That slope is: (ys[i+1] - ys[i]) / (xs[i+1] - xs[i]) If the difference between xs[i+1] and xs[i] were infinitessimal, we'd have exactly the derivative. In numerical differentiation we take advantage of the fact that it's often good enough to use "really small" differences instead. Question 3 Define a function called derivative that takes in an array of x-values and their corresponding y-values and computes the slope of the line connecting each point to the next point. &gt;&gt;&gt; derivative(np.array([0, 1, 2]), np.array([2, 4, 6])) np.array([2., 2.]) &gt;&gt;&gt; derivative(np.arange(5), np.arange(5) ** 2) np.array([0., 2., 4., 6.]) Notice that the output array has one less element than the inputs since we can't find the slope for the last point. It's possible to do this in one short line using slicing, but feel free to use whatever method you know. Then, use your derivative function to compute the slopes for each point in xs, ys. Store the slopes in an array called slopes. End of explanation """ ... ... """ Explanation: Question 4 Plot the slopes you computed. Then plot cos on top of your plot, calling plt.plot again in the same cell. Did numerical differentiation work? Note: Since we have only 99 slopes, you'll need to take off the last x-value before plotting to avoid an error. End of explanation """ plt.plot(xs[:-1], slopes, label="Numerical derivative") plt.plot(xs[:-1], np.cos(xs[:-1]), label="True derivative") # You can just call plt.legend(), but the legend will cover up # some of the graph. Use bbox_to_anchor=(x,y) to set the x- # and y-coordinates of the center-left point of the legend, # where, for example, (0, 0) is the bottom-left of the graph # and (1, .5) is all the way to the right and halfway up. plt.legend(bbox_to_anchor=(1, .5), loc="center left"); """ Explanation: In the plot above, it's probably not clear which curve is which. Examine the cell below to see how to plot your results with a legend. End of explanation """ # The zeros function creates an array with the given shape. # For a 2-dimensional array like this one, the first # coordinate says how far the array goes *down*, and the # second says how far it goes *right*. array3 = np.zeros((4, 5)) array3 # The shape attribute returns the dimensions of the array. array3.shape # You can think of array3 as an array containing 4 arrays, each # containing 5 zeros. Accordingly, we can set or get the third # element of the second array in array 3 using standard Python # array indexing syntax twice: array3[1][2] = 7 array3 # This comes up so often that there is special syntax provided # for it. The comma syntax is equivalent to using multiple # brackets: array3[1, 2] = 8 array3 """ Explanation: 3.2. Multidimensional Arrays A multidimensional array is a primitive version of a table, containing only one kind of data and having no column labels. A 2-dimensional array is useful for working with matrices of numbers. End of explanation """ array4 = np.zeros((3, 5)) array4[:, 2] = 5 array4 """ Explanation: Arrays allow you to assign to multiple places at once. The special character : means "everything." End of explanation """ array5 = np.zeros((3, 5)) rows = np.array([1, 0, 2]) cols = np.array([3, 1, 4]) # Indices (1,3), (0,1), and (2,4) will be set. array5[rows, cols] = 3 array5 """ Explanation: In fact, you can use arrays of indices to assign to multiple places. Study the next example and make sure you understand how it works. End of explanation """ twice_identity = ... ... twice_identity _ = ok.grade('q05') _ = ok.backup() """ Explanation: Question 5 Create a 50x50 array called twice_identity that contains all zeros except on the diagonal, where it contains the value 2. Start by making a 50x50 array of all zeros, then set the values. Use indexing, not a for loop! (Don't use np.eye either, though you might find that function useful later.) End of explanation """ def read_file_lines(filename): ... ... file1 = ... file1[:5] _ = ok.grade('q07') _ = ok.backup() """ Explanation: 4. A Picture Puzzle Your boss has given you some strange text files. He says they're images, some of which depict a summer scene and the rest a winter scene. He demands that you figure out how to determine whether a given text file represents a summer scene or a winter scene. You receive 10 files, 1.txt through 10.txt. Peek at the files in a text editor of your choice. Question 6 How do you think the contents of the file are structured? Take your best guess. Write your answer here, replacing this text. Question 7 Create a function called read_file_lines that takes in a filename as its argument. This function should return a Python list containing the lines of the file as strings. That is, if 1.txt contains: 1 2 3 3 4 5 7 8 9 the return value should be: ['1 2 3\n', '3 4 5\n', '7 8 9\n']. Then, use the read_file_lines function on the file 1.txt, reading the contents into a variable called file1. Hint: Check out this Stack Overflow page on reading lines of files. End of explanation """ def lines_to_image(file_lines): ... image_array = ... # Make sure to call astype like this on the 3-dimensional array # you produce, before returning it. return image_array.astype(np.uint8) image1 = ... image1.shape _ = ok.grade('q08') _ = ok.backup() """ Explanation: Each file begins with a line containing two numbers. After checking the length of a file, you could notice that the product of these two numbers equals the number of lines in each file (other than the first one). This suggests the rows represent elements in a 2-dimensional grid. In fact, each dataset represents an image! On the first line, the first of the two numbers is the height of the image (in pixels) and the second is the width (again in pixels). Each line in the rest of the file contains the pixels of the image. Each pixel is a triplet of numbers denoting how much red, green, and blue the pixel contains, respectively. In image processing, each column in one of these image files is called a channel (disregarding line 1). So there are 3 channels: red, green, and blue. Question 8 Define a function called lines_to_image that takes in the contents of a file as a list (such as file1). It should return an array containing integers of shape (n_rows, n_cols, 3). That is, it contains the pixel triplets organized in the correct number of rows and columns. For example, if the file originally contained: 4 2 0 0 0 10 10 10 2 2 2 3 3 3 4 4 4 5 5 5 6 6 6 7 7 7 The resulting array should be a 3-dimensional array that looks like this: array([ [ [0,0,0], [10,10,10] ], [ [2,2,2], [3,3,3] ], [ [4,4,4], [5,5,5] ], [ [6,6,6], [7,7,7] ] ]) The string method split and the function np.reshape might be useful. Important note: You must call .astype(np.uint8) on the final array before returning so that numpy will recognize the array represents an image. Once you've defined the function, set image1 to the result of calling lines_to_image on file1. End of explanation """ def show_images(images, ncols=2, figsize=(10, 7), **kwargs): """ Shows one or more color images. images: Image or list of images. Each image is a 3-dimensional array, where dimension 1 indexes height and dimension 2 the width. Dimension 3 indexes the 3 color values red, blue, and green (so it always has length 3). """ def show_image(image, axis=plt): plt.imshow(image, **kwargs) if not (isinstance(images, list) or isinstance(images, tuple)): images = [images] images = [image.astype(np.uint8) for image in images] nrows = math.ceil(len(images) / ncols) ncols = min(len(images), ncols) plt.figure(figsize=figsize) for i, image in enumerate(images): axis = plt.subplot2grid( (nrows, ncols), (i // ncols, i % ncols), ) axis.tick_params(bottom='off', left='off', top='off', right='off', labelleft='off', labelbottom='off') axis.grid(False) show_image(image, axis) # Show image1 here: ... """ Explanation: Question 9 Images in numpy are simply arrays, but we can also display them them as actual images in this notebook. Use the provided show_images function to display image1. You may call it like show_images(image1). If you later have multiple images to display, you can call show_images([image1, image2]) to display them all at once. The resulting image should look almost completely black. Why do you suppose that is? End of explanation """ # This array is provided for your convenience. transformed = np.array([12, 37, 65, 89, 114, 137, 162, 187, 214, 240, 250]) def expand_image_range(image): ... expanded1 = ... show_images(expanded1) _ = ok.grade('q10') _ = ok.backup() """ Explanation: Question 10 If you look at the data, you'll notice all the numbers lie between 0 and 10. In NumPy, a color intensity is an integer ranging from 0 to 255, where 0 is no color (black). That's why the image is almost black. To see the image, we'll need to rescale the numbers in the data to have a larger range. Define a function expand_image_range that takes in an image. It returns a new copy of the image with the following transformation: old value | new value ========= | ========= 0 | 12 1 | 37 2 | 65 3 | 89 4 | 114 5 | 137 6 | 162 7 | 187 8 | 214 9 | 240 10 | 250 This expands the color range of the image. For example, a pixel that previously had the value [5 5 5] (almost-black) will now have the value [137 137 137] (gray). Set expanded1 to the expanded image1, then display it with show_images. This page from the numpy docs has some useful information that will allow you to use indexing instead of for loops. However, the slickest implementation uses one very short line of code. Hint: If you index an array with another array or list as in question 5, your array (or list) of indices can contain repeats, as in array1[[0, 1, 0]]. Investigate what happens in that case. End of explanation """ def reveal_file(filename): ... filenames = ['1.txt', '2.txt', '3.txt', '4.txt', '5.txt', '6.txt', '7.txt', '8.txt', '9.txt', '10.txt'] expanded_images = ... show_images(expanded_images, ncols=5) """ Explanation: Question 11 Eureka! You've managed to reveal the image that the text file represents. Now, define a function called reveal_file that takes in a filename and returns an expanded image. This should be relatively easy since you've defined functions for each step in the process. Then, set expanded_images to a list of all the revealed images. There are 10 images to reveal (including the one you just revealed). Finally, use show_images to display the expanded_images. End of explanation """ def proportion_by_channel(image): ... image_proportions = ... image_proportions _ = ok.grade('q12') _ = ok.backup() """ Explanation: Notice that 5 of the above images are of summer scenes; the other 5 are of winter. Think about how you'd distinguish between pictures of summer and winter. What qualities of the image seem to signal to your brain that the image is one of summer? Of winter? One trait that seems specific to summer pictures is that the colors are warmer. Let's see if the proportion of pixels of each color in the image can let us distinguish between summer and winter pictures. Question 12 To simplify things, we can categorize each pixel according to its most intense (highest-value) channel. (Remember, red, green, and blue are the 3 channels.) For example, we could just call a [2 4 0] pixel "green." If a pixel has a tie between several channels, let's count it as none of them. Write a function proportion_by_channel. It takes in an image. It assigns each pixel to its greatest-intensity channel: red, green, or blue. Then the function returns an array of length three containing the proportion of pixels categorized as red, the proportion categorized as green, and the proportion categorized as blue (respectively). (Again, don't count pixels that are tied between 2 or 3 colors as any category, but do count them in the denominator when you're computing proportions.) For example: ``` test_im = np.array([ [ [5, 2, 2], [2, 5, 10] ] ]) proportion_by_channel(test_im) array([ 0.5, 0, 0.5 ]) If tied, count neither as the highest test_im = np.array([ [ [5, 2, 5], [2, 50, 50] ] ]) proportion_by_channel(test_im) array([ 0, 0, 0 ]) ``` Then, set image_proportions to the result of proportion_by_channel called on each image in expanded_images as a 2d array. Hint: It's fine to use a for loop, but for a difficult challenge, try avoiding it. (As a side benefit, your code will be much faster.) Our solution uses the NumPy functions np.reshape, np.sort, np.argmax, and np.bincount. End of explanation """ # You'll learn about Pandas and DataFrames soon. import pandas as pd pd.DataFrame({ 'red': image_proportions[:, 0], 'green': image_proportions[:, 1], 'blue': image_proportions[:, 2] }, index=pd.Series(['Image {}'.format(n) for n in range(1, 11)], name='image'))\ .iloc[::-1]\ .plot.barh(); """ Explanation: Let's plot the proportions you computed above on a bar chart: End of explanation """ def summer_or_winter(image): ... _ = ok.grade('q13') _ = ok.backup() """ Explanation: Question 13 What do you notice about the colors present in the summer images compared to the winter ones? Use this info to write a function summer_or_winter. It takes in an image and returns True if the image is a summer image and False if the image is a winter image. Do not hard-code the function to the 10 images you currently have (eg. if image1, return False). We will run your function on other images that we've reserved for testing. You must classify all of the 10 provided images correctly to pass the test for this function. End of explanation """ import skimage as sk import skimage.io as skio def read_image(filename): '''Reads in an image from a filename''' return skio.imread(filename) def compress_image(im): '''Takes an image as an array and compresses it to look black.''' res = im / 25 return res.astype(np.uint8) def to_text_file(im, filename): ''' Takes in an image array and a filename for the resulting text file. Creates the encoded text file for later decoding. ''' h, w, c = im.shape to_rgb = ' '.join to_row = '\n'.join to_lines = '\n'.join rgb = [[to_rgb(triplet) for triplet in row] for row in im.astype(str)] lines = to_lines([to_row(row) for row in rgb]) with open(filename, 'w') as f: f.write('{} {}\n'.format(h, w)) f.write(lines) f.write('\n') summers = skio.imread_collection('orig/summer/*.jpg') winters = skio.imread_collection('orig/winter/*.jpg') len(summers) sum_nums = np.array([ 5, 6, 9, 3, 2, 11, 12]) win_nums = np.array([ 10, 7, 8, 1, 4, 13, 14]) for im, n in zip(summers, sum_nums): to_text_file(compress_image(im), '{}.txt'.format(n)) for im, n in zip(winters, win_nums): to_text_file(compress_image(im), '{}.txt'.format(n)) """ Explanation: Congrats! You've created your very first classifier for this class. Question 14 How do you think your classification function will perform in general? Why do you think it will perform that way? What do you think would most likely give you false positives? False negatives? Write your answer here, replacing this text. Final note: While our approach here is simplistic, skin color segmentation -- figuring out which parts of the image belong to a human body -- is a key step in many algorithms such as face detection. Optional: Our code to encode images Here are the functions we used to generate the text files for this assignment. Feel free to send not-so-secret messages to your friends if you'd like. End of explanation """ _ = ok.grade_all() """ Explanation: 5. Submitting this assignment First, run this cell to run all the autograder tests at once so you can double- check your work. End of explanation """ # Now, we'll submit to okpy _ = ok.submit() """ Explanation: Now, run this code in your terminal to make a git commit that saves a snapshot of your changes in git. The last line of the cell runs git push, which will send your work to your personal Github repo. ``` Tell git to commit all the changes so far git add -A Tell git to make the commit git commit -m "hw1 finished" Send your updates to your personal private repo git push origin master ``` Finally, we'll submit the assignment to OkPy so that the staff will know to grade it. You can submit as many times as you want and you can choose which submission you want us to grade by going to https://okpy.org/cal/data100/sp17/. End of explanation """
halexan/cs231n
assignment1/features.ipynb
mit
import random import numpy as np from cs231n.data_utils import load_CIFAR10 import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # for auto-reloading extenrnal modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 """ Explanation: Image features exercise Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the assignments page on the course website. We have seen that we can achieve reasonable performance on an image classification task by training a linear classifier on the pixels of the input image. In this exercise we will show that we can improve our classification performance by training linear classifiers not on raw pixels but on features that are computed from the raw pixels. All of your work for this exercise will be done in this notebook. End of explanation """ from cs231n.features import color_histogram_hsv, hog_feature def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000): # Load the raw CIFAR-10 data cifar10_dir = 'cs231n/datasets/cifar-10-batches-py' X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # Subsample the data mask = range(num_training, num_training + num_validation) X_val = X_train[mask] y_val = y_train[mask] mask = range(num_training) X_train = X_train[mask] y_train = y_train[mask] mask = range(num_test) X_test = X_test[mask] y_test = y_test[mask] return X_train, y_train, X_val, y_val, X_test, y_test X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data() """ Explanation: Load data Similar to previous exercises, we will load CIFAR-10 data from disk. End of explanation """ from cs231n.features import * num_color_bins = 10 # Number of bins in the color histogram feature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)] X_train_feats = extract_features(X_train, feature_fns, verbose=True) X_val_feats = extract_features(X_val, feature_fns) X_test_feats = extract_features(X_test, feature_fns) # Preprocessing: Subtract the mean feature mean_feat = np.mean(X_train_feats, axis=0, keepdims=True) X_train_feats -= mean_feat X_val_feats -= mean_feat X_test_feats -= mean_feat # Preprocessing: Divide by standard deviation. This ensures that each feature # has roughly the same scale. std_feat = np.std(X_train_feats, axis=0, keepdims=True) X_train_feats /= std_feat X_val_feats /= std_feat X_test_feats /= std_feat # Preprocessing: Add a bias dimension X_train_feats = np.hstack([X_train_feats, np.ones((X_train_feats.shape[0], 1))]) X_val_feats = np.hstack([X_val_feats, np.ones((X_val_feats.shape[0], 1))]) X_test_feats = np.hstack([X_test_feats, np.ones((X_test_feats.shape[0], 1))]) """ Explanation: Extract Features For each image we will compute a Histogram of Oriented Gradients (HOG) as well as a color histogram using the hue channel in HSV color space. We form our final feature vector for each image by concatenating the HOG and color histogram feature vectors. Roughly speaking, HOG should capture the texture of the image while ignoring color information, and the color histogram represents the color of the input image while ignoring texture. As a result, we expect that using both together ought to work better than using either alone. Verifying this assumption would be a good thing to try for the bonus section. The hog_feature and color_histogram_hsv functions both operate on a single image and return a feature vector for that image. The extract_features function takes a set of images and a list of feature functions and evaluates each feature function on each image, storing the results in a matrix where each column is the concatenation of all feature vectors for a single image. End of explanation """ # Use the validation set to tune the learning rate and regularization strength from cs231n.classifiers.linear_classifier import LinearSVM learning_rates = [1e-9, 1e-8, 1e-7] regularization_strengths = [1e5, 1e6, 1e7] results = {} best_val = -1 best_svm = None pass ################################################################################ # TODO: # # Use the validation set to set the learning rate and regularization strength. # # This should be identical to the validation that you did for the SVM; save # # the best trained classifer in best_svm. You might also want to play # # with different numbers of bins in the color histogram. If you are careful # # you should be able to get accuracy of near 0.44 on the validation set. # ################################################################################ pass ################################################################################ # END OF YOUR CODE # ################################################################################ # Print out results. for lr, reg in sorted(results): train_accuracy, val_accuracy = results[(lr, reg)] print 'lr %e reg %e train accuracy: %f val accuracy: %f' % ( lr, reg, train_accuracy, val_accuracy) print 'best validation accuracy achieved during cross-validation: %f' % best_val # Evaluate your trained SVM on the test set y_test_pred = best_svm.predict(X_test_feats) test_accuracy = np.mean(y_test == y_test_pred) print test_accuracy # An important way to gain intuition about how an algorithm works is to # visualize the mistakes that it makes. In this visualization, we show examples # of images that are misclassified by our current system. The first column # shows images that our system labeled as "plane" but whose true label is # something other than "plane". examples_per_class = 8 classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] for cls, cls_name in enumerate(classes): idxs = np.where((y_test != cls) & (y_test_pred == cls))[0] idxs = np.random.choice(idxs, examples_per_class, replace=False) for i, idx in enumerate(idxs): plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1) plt.imshow(X_test[idx].astype('uint8')) plt.axis('off') if i == 0: plt.title(cls_name) plt.show() """ Explanation: Train SVM on features Using the multiclass SVM code developed earlier in the assignment, train SVMs on top of the features extracted above; this should achieve better results than training SVMs directly on top of raw pixels. End of explanation """ print X_train_feats.shape from cs231n.classifiers.neural_net import TwoLayerNet input_dim = X_train_feats.shape[1] hidden_dim = 500 num_classes = 10 net = TwoLayerNet(input_dim, hidden_dim, num_classes) best_net = None ################################################################################ # TODO: Train a two-layer neural network on image features. You may want to # # cross-validate various parameters as in previous sections. Store your best # # model in the best_net variable. # ################################################################################ pass ################################################################################ # END OF YOUR CODE # ################################################################################ # Run your neural net classifier on the test set. You should be able to # get more than 55% accuracy. test_acc = (net.predict(X_test_feats) == y_test).mean() print test_acc """ Explanation: Inline question 1: Describe the misclassification results that you see. Do they make sense? Neural Network on image features Earlier in this assigment we saw that training a two-layer neural network on raw pixels achieved better classification performance than linear classifiers on raw pixels. In this notebook we have seen that linear classifiers on image features outperform linear classifiers on raw pixels. For completeness, we should also try training a neural network on image features. This approach should outperform all previous approaches: you should easily be able to achieve over 55% classification accuracy on the test set; our best model achieves about 60% classification accuracy. End of explanation """
iwansmith/FutureLHCb
notebooks/01_LoadingSmearingPlotting.ipynb
gpl-3.0
import sys sys.path.append('../../FourVector') sys.path.append('../project') from FourVector import FourVector from ThreeVector import ThreeVector from FutureColliderTools import SmearVertex, GetCorrectedMass, GetMissingMass2, GetQ2 from FutureColliderDataLoader import LoadData_KMuNu, LoadData_DsMuNu from FutureColliderVariables import DataTuple, sigma_PV_LHCb, sigma_SV_LHCb import numpy as np import ROOT ROOT.enableJSVis() ROOT.gStyle.SetOptStat(0) """ Explanation: Setup the environment End of explanation """ from prettytable import PrettyTable EventType = "13512010" FileName, TreeName = DataTuple[ EventType ] InputData, K_PE, Mu_PE, B_PE, B_Origin, B_End = LoadData_KMuNu(EventType, (FileName, TreeName) ) DataTable = PrettyTable() DataTable.add_column("", InputData.dtype.names) for x in range(4): DataTable.add_column("Ev {0}".format(x), InputData[x]) print DataTable """ Explanation: Load $B_s \to K^- \mu^+ \nu_\mu$ Data Print a few events so the structure can be seen End of explanation """ K = FourVector( InputData[K_PE ] ) Mu = FourVector( InputData[Mu_PE] ) Y = K+Mu DataType = InputData[B_End[0]].dtype Smeared_SV = SmearVertex( InputData[B_End] .copy().view( (DataType, 3) ), sigma_SV_LHCb("K"))#*0.3 ) Smeared_PV = SmearVertex( InputData[B_Origin].copy().view( (DataType, 3) ), sigma_PV_LHCb("K"))#*0.3 ) B_Dir = ThreeVector( Smeared_PV ) - ThreeVector( Smeared_SV ) """ Explanation: Organise the data into a nice format Generate FourVectors fo the kaon and muon Smear the primary and secondary vertices Generate a ThreeVector of the B direction End of explanation """ MCORR = GetCorrectedMass( Y, B_Dir) MissingM2 = GetMissingMass2(K, Mu, B_Dir) Qsq_1, Qsq_2 = GetQ2(Y, Mu, B_Dir ) """ Explanation: Do some nice things with the data generate the corrected mass of the events generated the missing mass squared of the events generate the two quadratic solutions to the $q^2$ End of explanation """ h_MCORR = ROOT.TH1F("h_MCORR", "B_{s} Corrected Mass", 100, 3000, 6000) h_MM2 = ROOT.TH1F("h_MM2" , "Missing Mass Squared", 100, -4e6, 12e6) h_Qsq_1 = ROOT.TH1F("h_QSQ1" , "q^{2} Quadratic Solutions", 100, 0, 24e6) h_Qsq_2 = ROOT.TH1F("h_QSQ2" , "q^{2} Quadratic Solutions", 100, 0, 24e6) nev = len(K) h_MCORR.FillN(nev, MCORR, np.ones(nev)) h_MM2 .FillN(nev, MissingM2, np.ones(nev)) h_Qsq_1.FillN(nev, Qsq_1, np.ones(nev)) h_Qsq_2.FillN(nev, Qsq_2, np.ones(nev)) canvas_1 = ROOT.TCanvas("c1", "c1", 900,300) canvas_1.Divide(3) canvas_1.cd(1) h_MCORR.GetXaxis().SetTitle("m_{CORR}") h_MCORR.Draw() line = ROOT.TLine(5367,0,5367,h_MCORR.GetMaximum()*1.05); line.SetLineColor(2) line.Draw() canvas_1.cd(2) h_MM2.GetXaxis().SetTitle("m_{Missing}^{2}") h_MM2.Draw() canvas_1.cd(3) h_Qsq_1.Draw() h_Qsq_1.GetXaxis().SetTitle("q^{2}") h_Qsq_2.Draw("SAME") canvas_1.Draw() canvas_1.Print("KMu_1.pdf") """ Explanation: Plot the data End of explanation """
iiasa/xarray_tutorial
xarray-tutorial-egu2017.ipynb
bsd-3-clause
# standard imports import numpy as np import pandas as pd import matplotlib.pyplot as plt import xarray as xr import warnings %matplotlib inline np.set_printoptions(precision=3, linewidth=80, edgeitems=1) # make numpy less verbose xr.set_options(display_width=70) warnings.simplefilter('ignore') # filter some warning messages """ Explanation: SC57 - Working with big, multi-dimensional geoscientific datasets in Python: a tutorial introduction to xarray Original notebook by Stephan Hoyer, Rossbypalooza, 2016. Modified by Edward Byers, Matthew Gidden and Fabien Maussion for EGU General Assembly 2017, Vienna, Austria Thursday, 27th April, 15:30–17:00 / Room -2.91 Convenors * Dr Edward Byers - International Institute for Applied Systems Analysis, Laxenburg, Austria * Dr Matthew Gidden - International Institute for Applied Systems Analysis, Laxenburg, Austria * Dr Fabien Maussion - University of Innsbruck, Innsbruck, Austria With you can reach Structure of this tutorial Introduction to key features of xarray Basic operations in xarray: opening, inspecting, selecting and indexing data Selecting data with named dimensions Operations and computation Groupby and "split-apply-combine" Graphics Out-of-core computation 1. Key features of xarray What is xarray? xarray is an open source project and Python package xarray has been designed to perform labelled data analysis on multi-dimensional arrays the xarray approach adopts the Common Data Model for self-describing scientific data in widespread use in the Earth sciences xarray.Dataset is an in-memory representation of a netCDF file. xarray is built on top of the dataprocessing library Pandas (the best way to work with tabular data (e.g., CSV files) in Python) Our data <img src="./figures/dataset.png" width="50%" align="right"> numeric multi-dimensional labelled (lots of) metadata sometimes (very) large What is xarray good for? Gridded, multi-dimensional and large datasets, commonly used in earth sciences, but also increasingly finance, engineering (signal/image processing), and biological sciences Integration with other data analysis packages such as Pandas I/O operations (NetCDF) Plotting Out of core computation and parallel processing Extensions based on xarray ... Where can I find more info? For more information about xarray Read the online documentation Ask questions on StackOverflow View the source code and file bug reports on GitHub For more doing data analysis with Python: Thomas Wiecki, A modern guide to getting started with Data Science and Python Wes McKinney, Python for Data Analysis (book) Packages building on xarray for the geophysical sciences For analyzing GCM output: xgcm by Ryan Abernathey oogcm by Julien Le Sommer MPAS xarray by Phil Wolfram marc_analysis by Daniel Rothenberg Other tools: windspharm: wind spherical harmonics by Andrew Dawson eofs: empirical orthogonal functions by Andrew Dawson infinite-diff by Spencer Hill aospy by Spencer Hill and Spencer Clark regionmask by Mathias Hauser salem by Fabien Maussion Resources for teaching and learning xarray in geosciences: - Fabien's teaching repo: courses that combine teaching climatology and xarray 2. Basic operations in xarray Import python packages End of explanation """ import numpy as np a = np.array([[1, 3, 9], [2, 8, 4]]) a a[1, 2] a.mean(axis=0) """ Explanation: Basic data arrays in numpy End of explanation """ ds = xr.tutorial.load_dataset('air_temperature') ds ds.air ds.dims ds.attrs ds.air.values type(ds.air.values) ds.air.dims ds.air.attrs ds.air.attrs['tutorial-date'] = 27042017 ds.air.attrs """ Explanation: numpy is a powerful but "low-level" array manipulation tool. Axis only have numbers and no names (it is easy to forget which axis is what, a common source of trivial bugs), arrays can't carry metadata (e.g. units), and the data is unstructured (i.e. the coordinates and/or other related arrays have to be handled separately: another source of bugs). This is where xarray comes in! Properties of xarray.Dataset and xarray.DataArray objects We'll start with the "air_temperature" tutorial dataset. This tutorial comes with the xarray package. Other examples here. End of explanation """ kelvin = ds.air.mean(dim='time') kelvin.plot(); centigrade = kelvin - 273.16 centigrade.plot(); """ Explanation: Let's Do Some Math End of explanation """ # ufuncs work too np.sin(centigrade).plot(); """ Explanation: Notice xarray has changed the colormap according to the dataset (borrowing logic from Seaborn). * With degrees C, the data passes through 0, so a diverging colormap is used * With Kelvin, the default colormap is used. End of explanation """ ds """ Explanation: Adding Data to DataSets End of explanation """ ds['centigrade'] = centigrade ds['kelvin'] = kelvin ds ds.kelvin.attrs # attrs are empty! Let's add some ds.kelvin.attrs['Description'] = 'Mean air tempterature (through time) in kelvin.' ds.kelvin ds.to_netcdf('new file.nc') """ Explanation: Let's add those kelvin and centigrade dataArrays to the dataset. End of explanation """ ds.air[:, 1, 2] # note that the attributes, coordinates are preserved ds.air[:, 1, 2].plot(); """ Explanation: 3. Selecting data with named dimensions In xarray there are many different ways for selecting and indexing data. Positional indexing (old way) This is the "old way", i.e. like numpy: End of explanation """ ds.air.isel(time=0).plot(); # like above, but with a dimension name this time """ Explanation: This selection implies prior knowledge about the structure of the data, and is therefore much less readable than the "xarray methods" presented below. Selection by index Selection based on the index of a coordinate: End of explanation """ ds.air.sel(lat=72.5, lon=205).plot(); """ Explanation: Selection by value Selection based on the value of a coordinate: End of explanation """ ds.air.sel(time='2013-01-02').plot(); # Note that we will extract 4 time steps! 3d data is plotted as histogram ds.air.sel(time='2013-01-02T06:00').plot(); # or look at a single timestep """ Explanation: Selection by value works well for time, too End of explanation """ ds.air.sel(lat=slice(60, 50), lon=slice(200, 270), time='2013-01-02T06:00:00').plot(); """ Explanation: Selecting a range of values The syntax is similar, but you'll need to use a slice: End of explanation """ ds.air.sel(lat=41.8781, lon=360-87.6298, method='nearest', tolerance=5).plot(); """ Explanation: Nearest neighbor lookup End of explanation """ a = xr.DataArray(np.arange(3), dims='time', coords={'time':np.arange(3)}) b = xr.DataArray(np.arange(4), dims='space', coords={'space':np.arange(4)}) a + b """ Explanation: 4. Operations and computation We can do arithmetic directly on Dataset and DataArray objects. Labels are preserved and dataArray dimensions automatically aligned. Broadcasting <img src="./figures/broadcast.png" width="50%" align="left"> End of explanation """ atime = np.arange(3) btime = np.arange(5) + 1 atime, btime a = xr.DataArray(np.arange(3), dims='time', coords={'time':atime}) b = xr.DataArray(np.arange(5), dims='time', coords={'time':btime}) a + b """ Explanation: Alignment <img src="./figures/align.png" width="50%" align="left"> End of explanation """ ds.max() ds.air.median(dim=['lat', 'lon']).plot(); """ Explanation: Aggregation End of explanation """ means = ds.air.mean(dim=['time']) means.where(means > 273.15).plot(); """ Explanation: Masking with .where() End of explanation """ ds.air.groupby('time.season').mean() """ Explanation: 5. Groupby and "split-apply-combine" Xarray implements the "split-apply-combine" paradigm with groupby. This works really well for calculating climatologies: End of explanation """ ds.air.groupby('time.month').mean('time') clim = ds.air.groupby('time.month').mean('time') """ Explanation: <img src="./figures/split_apply-combine.png" alt="split" style="width: 50%;"/> End of explanation """ anomalies = ds.air.groupby('time.month') - clim anomalies anomalies.plot(); anomalies.sel(time= '2013-02').plot(); # Find all the anomolous values for February """ Explanation: You can also do arithmetic with groupby objects, which repeats the arithmetic over each group: End of explanation """ tmin = ds.air.resample('1D', dim='time', how='min') # Resample to one day '1D tmax = ds.air.resample('1D', dim='time', how='max') (tmin.sel(time='2013-02-15') - 273.15).plot(); ds_extremes = xr.Dataset({'tmin': tmin, 'tmax': tmax}) ds_extremes """ Explanation: Resample adjusts a time series to a new resolution: End of explanation """ zonal_t_average = ds.air.mean(dim=['lon', 'time']) - 273.15 zonal_t_average.plot(); # 1D arrays are plotted as line plots """ Explanation: 6. Graphics xarray plotting functions rely on matplotlib internally, but they make use of all available metadata to make the plotting operations more intuitive and interpretable. 1D plots End of explanation """ t_average = ds.air.mean(dim='time') - 273.15 t_average.plot(); # 2D arrays are plotted with pcolormesh t_average.plot.contourf(); # but you can use contour(), contourf() or imshow() if you wish """ Explanation: 2D plots End of explanation """ t_average.plot.contourf(cmap='BrBG_r', vmin=-15, vmax=15); t_average.plot.contourf(cmap='BrBG_r', levels=22, center=False); """ Explanation: Customizing 2d plots End of explanation """ air_outliers = ds.air.isel(time=0).copy() air_outliers[0, 0] = 100 air_outliers[-1, -1] = 400 air_outliers.plot(); # outliers mess with the datarange and colorscale! # Using `robust=True` uses the 2nd and 98th percentiles of the data to compute the color limits. air_outliers.plot(robust=True); """ Explanation: Dealing with Outliers End of explanation """ t_season = ds.air.groupby('time.season').mean(dim='time') - 273.15 # facet plot allows to do multiplot with the same color mappings t_season.plot.contourf(x='lon', y='lat', col='season', col_wrap=2, levels=22); """ Explanation: Facet plots End of explanation """ import cartopy.crs as ccrs f = plt.figure(figsize=(8, 4)) # Define the map projection *on which* you want to plot ax = plt.axes(projection=ccrs.Orthographic(-80, 35)) # ax is an empty plot. We now plot the variable t_average onto ax # the keyword "transform" tells the function in which projection the air temp data is stored t_average.plot(ax=ax, transform=ccrs.PlateCarree()) # Add gridlines and coastlines to the plot ax.coastlines(); ax.gridlines(); """ Explanation: Plotting on maps For plotting on maps, we rely on the excellent cartopy library. End of explanation """ # this time we need to retrieve the plots to do things with the axes later on p = t_season.plot(x='lon', y='lat', col='season', transform=ccrs.PlateCarree(), subplot_kws={'projection': ccrs.Orthographic(-80, 35)}) for ax in p.axes.flat: ax.coastlines() """ Explanation: Facet plots on maps End of explanation """ import seaborn as sns data = (ds_extremes .sel_points(lat=[41.8781, 37.7749], lon=[360-87.6298, 360-122.4194], method='nearest', tolerance=3, dim=xr.DataArray(['Chicago', 'San Francisco'], name='location', dims='location')) .to_dataframe() .reset_index() .assign(month=lambda x: x.time.dt.month)) plt.figure(figsize=(10, 5)) sns.violinplot('month', 'tmax', 'location', data=data, split=True, inner=None); """ Explanation: Seaborn is Cool Statistical visualization with Seaborn: End of explanation """ from glob import glob files = glob('data/*dis*.nc') runoff = xr.open_mfdataset(files) runoff """ Explanation: 7. Out-of-core computation Here's a quick demo of how xarray can leverage dask to work with data that doesn't fit in memory. This lets xarray substitute for tools like cdo and nco. Let's open 10 years of runoff data xarraycan open multiple files at once using string pattern matching. In this case we open all the files that match our filestr, i.e. all the files for the 2080s. Each of these files (compressed) is approximately 80 MB. PS - these files weren't available during the tutorial. The data we used was daily discharge hydrological data from the ISIMIP project (e.g. HadGEM2-ES / PCRGLOBWB / RCP2p6), which we cannot share here but is available for download. End of explanation """ runoff.time """ Explanation: xarray even puts them in the right order for you. End of explanation """ runoff.nbytes / 1e9 # Convert to gigiabytes """ Explanation: How big is all this data uncompressed? Will it fit into memory? End of explanation """ runoff = runoff.chunk({'lat': 60}) runoff.chunks %time ro_seasonal = runoff.groupby('time.season').mean('time') import dask from multiprocessing.pool import ThreadPool dask.set_options(pool=ThreadPool(1)) %time ro_seasonal.compute() dask.set_options(pool=ThreadPool(4)) %time ro_seasonal = runoff.groupby('time.season').mean('time') %time result = ro_seasonal.compute() brazil = dict(lat=slice(10.75, -40.75), lon=slice(-100.25, -25.25)) result.dis.sel(**brazil).plot(col='season', size=4, cmap='Spectral_r') """ Explanation: Working with Big Data This data is too big for our memory. That means we need to process it in chunks. We can do this chunking in xarray very easily. xarray computes data 'lazily'. That means that data is only loaded into memory when it is actually required. This also allows us to inspect datasets without loading all the data into memory. To do this xarray integrates with dask to support streaming computation on datasets that don’t fit into memory. <img src="./figures/dask-array.png" width="40%" align="center"> End of explanation """
icrtiou/coursera-ML
ex3-neural network/2- one vs all logistic regression.ipynb
mit
# add intercept=1 for x0 X = np.insert(raw_X, 0, values=np.ones(raw_X.shape[0]), axis=1) X.shape # y have 10 categories here. 1..10, they represent digit 0 as category 10 because matlab index start at 1 # I'll ditit 0, index 0 again y_matrix = [] for k in range(1, 11): y_matrix.append((raw_y == k).astype(int)) # last one is k==10, it's digit 0, bring it to the first position y_matrix = [y_matrix[-1]] + y_matrix[:-1] y = np.array(y_matrix) y.shape """ Explanation: prepare data End of explanation """ t0 = lr.logistic_regression(X, y[0]) print(t0.shape) y_pred = lr.predict(X, t0) print('Accuracy={}'.format(np.mean(y[0] == y_pred))) """ Explanation: train 1 model End of explanation """ k_theta = np.array([lr.logistic_regression(X, y[k]) for k in range(10)]) print(k_theta.shape) """ Explanation: Is this real...... train k model End of explanation """ prob_matrix = lr.sigmoid(X @ k_theta.T) np.set_printoptions(suppress=True) prob_matrix y_pred = np.argmax(prob_matrix, axis=1) y_answer = raw_y.copy() y_answer[y_answer==10] = 0 print(classification_report(y_answer, y_pred)) """ Explanation: making prediction think about the shape of k_theta, now you are making $X\times\theta^T$ $(5000, 401) \times (10, 401).T = (5000, 10)$ after that, you run sigmoid to get probabilities and for each row, you find the highest prob as the answer End of explanation """
smharper/openmc
examples/jupyter/mgxs-part-iii.ipynb
mit
import math import pickle from IPython.display import Image import matplotlib.pyplot as plt import numpy as np import openmc import openmc.mgxs from openmc.openmoc_compatible import get_openmoc_geometry import openmoc import openmoc.process from openmoc.materialize import load_openmc_mgxs_lib %matplotlib inline """ Explanation: This IPython Notebook illustrates the use of the openmc.mgxs.Library class. The Library class is designed to automate the calculation of multi-group cross sections for use cases with one or more domains, cross section types, and/or nuclides. In particular, this Notebook illustrates the following features: Calculation of multi-group cross sections for a fuel assembly Automated creation, manipulation and storage of MGXS with openmc.mgxs.Library Validation of multi-group cross sections with OpenMOC Steady-state pin-by-pin fission rates comparison between OpenMC and OpenMOC Note: This Notebook was created using OpenMOC to verify the multi-group cross-sections generated by OpenMC. You must install OpenMOC on your system to run this Notebook in its entirety. In addition, this Notebook illustrates the use of Pandas DataFrames to containerize multi-group cross section data. Generate Input Files End of explanation """ # 1.6 enriched fuel fuel = openmc.Material(name='1.6% Fuel') fuel.set_density('g/cm3', 10.31341) fuel.add_nuclide('U235', 3.7503e-4) fuel.add_nuclide('U238', 2.2625e-2) fuel.add_nuclide('O16', 4.6007e-2) # borated water water = openmc.Material(name='Borated Water') water.set_density('g/cm3', 0.740582) water.add_nuclide('H1', 4.9457e-2) water.add_nuclide('O16', 2.4732e-2) water.add_nuclide('B10', 8.0042e-6) # zircaloy zircaloy = openmc.Material(name='Zircaloy') zircaloy.set_density('g/cm3', 6.55) zircaloy.add_nuclide('Zr90', 7.2758e-3) """ Explanation: First we need to define materials that will be used in the problem. We'll create three materials for the fuel, water, and cladding of the fuel pins. End of explanation """ # Instantiate a Materials object materials_file = openmc.Materials([fuel, water, zircaloy]) # Export to "materials.xml" materials_file.export_to_xml() """ Explanation: With our three materials, we can now create a Materials object that can be exported to an actual XML file. End of explanation """ # Create cylinders for the fuel and clad fuel_outer_radius = openmc.ZCylinder(x0=0.0, y0=0.0, r=0.39218) clad_outer_radius = openmc.ZCylinder(x0=0.0, y0=0.0, r=0.45720) # Create boundary planes to surround the geometry min_x = openmc.XPlane(x0=-10.71, boundary_type='reflective') max_x = openmc.XPlane(x0=+10.71, boundary_type='reflective') min_y = openmc.YPlane(y0=-10.71, boundary_type='reflective') max_y = openmc.YPlane(y0=+10.71, boundary_type='reflective') min_z = openmc.ZPlane(z0=-10., boundary_type='reflective') max_z = openmc.ZPlane(z0=+10., boundary_type='reflective') """ Explanation: Now let's move on to the geometry. This problem will be a square array of fuel pins and control rod guide tubes for which we can use OpenMC's lattice/universe feature. The basic universe will have three regions for the fuel, the clad, and the surrounding coolant. The first step is to create the bounding surfaces for fuel and clad, as well as the outer bounding surfaces of the problem. End of explanation """ # Create a Universe to encapsulate a fuel pin fuel_pin_universe = openmc.Universe(name='1.6% Fuel Pin') # Create fuel Cell fuel_cell = openmc.Cell(name='1.6% Fuel') fuel_cell.fill = fuel fuel_cell.region = -fuel_outer_radius fuel_pin_universe.add_cell(fuel_cell) # Create a clad Cell clad_cell = openmc.Cell(name='1.6% Clad') clad_cell.fill = zircaloy clad_cell.region = +fuel_outer_radius & -clad_outer_radius fuel_pin_universe.add_cell(clad_cell) # Create a moderator Cell moderator_cell = openmc.Cell(name='1.6% Moderator') moderator_cell.fill = water moderator_cell.region = +clad_outer_radius fuel_pin_universe.add_cell(moderator_cell) """ Explanation: With the surfaces defined, we can now construct a fuel pin cell from cells that are defined by intersections of half-spaces created by the surfaces. End of explanation """ # Create a Universe to encapsulate a control rod guide tube guide_tube_universe = openmc.Universe(name='Guide Tube') # Create guide tube Cell guide_tube_cell = openmc.Cell(name='Guide Tube Water') guide_tube_cell.fill = water guide_tube_cell.region = -fuel_outer_radius guide_tube_universe.add_cell(guide_tube_cell) # Create a clad Cell clad_cell = openmc.Cell(name='Guide Clad') clad_cell.fill = zircaloy clad_cell.region = +fuel_outer_radius & -clad_outer_radius guide_tube_universe.add_cell(clad_cell) # Create a moderator Cell moderator_cell = openmc.Cell(name='Guide Tube Moderator') moderator_cell.fill = water moderator_cell.region = +clad_outer_radius guide_tube_universe.add_cell(moderator_cell) """ Explanation: Likewise, we can construct a control rod guide tube with the same surfaces. End of explanation """ # Create fuel assembly Lattice assembly = openmc.RectLattice(name='1.6% Fuel Assembly') assembly.pitch = (1.26, 1.26) assembly.lower_left = [-1.26 * 17. / 2.0] * 2 """ Explanation: Using the pin cell universe, we can construct a 17x17 rectangular lattice with a 1.26 cm pitch. End of explanation """ # Create array indices for guide tube locations in lattice template_x = np.array([5, 8, 11, 3, 13, 2, 5, 8, 11, 14, 2, 5, 8, 11, 14, 2, 5, 8, 11, 14, 3, 13, 5, 8, 11]) template_y = np.array([2, 2, 2, 3, 3, 5, 5, 5, 5, 5, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11, 13, 13, 14, 14, 14]) # Initialize an empty 17x17 array of the lattice universes universes = np.empty((17, 17), dtype=openmc.Universe) # Fill the array with the fuel pin and guide tube universes universes[:,:] = fuel_pin_universe universes[template_x, template_y] = guide_tube_universe # Store the array of universes in the lattice assembly.universes = universes """ Explanation: Next, we create a NumPy array of fuel pin and guide tube universes for the lattice. End of explanation """ # Create root Cell root_cell = openmc.Cell(name='root cell') root_cell.fill = assembly # Add boundary planes root_cell.region = +min_x & -max_x & +min_y & -max_y & +min_z & -max_z # Create root Universe root_universe = openmc.Universe(universe_id=0, name='root universe') root_universe.add_cell(root_cell) """ Explanation: OpenMC requires that there is a "root" universe. Let us create a root cell that is filled by the assembly and then assign it to the root universe. End of explanation """ # Create Geometry and set root Universe geometry = openmc.Geometry(root_universe) # Export to "geometry.xml" geometry.export_to_xml() """ Explanation: We now must create a geometry that is assigned a root universe and export it to XML. End of explanation """ # OpenMC simulation parameters batches = 50 inactive = 10 particles = 10000 # Instantiate a Settings object settings_file = openmc.Settings() settings_file.batches = batches settings_file.inactive = inactive settings_file.particles = particles settings_file.output = {'tallies': False} # Create an initial uniform spatial source distribution over fissionable zones bounds = [-10.71, -10.71, -10, 10.71, 10.71, 10.] uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True) settings_file.source = openmc.Source(space=uniform_dist) # Export to "settings.xml" settings_file.export_to_xml() """ Explanation: With the geometry and materials finished, we now just need to define simulation parameters. In this case, we will use 10 inactive batches and 40 active batches each with 2500 particles. End of explanation """ # Instantiate a Plot plot = openmc.Plot.from_geometry(geometry) plot.pixels = (250, 250) plot.color_by = 'material' plot.to_ipython_image() """ Explanation: Let us also create a plot to verify that our fuel assembly geometry was created successfully. End of explanation """ # Instantiate a 2-group EnergyGroups object groups = openmc.mgxs.EnergyGroups() groups.group_edges = np.array([0., 0.625, 20.0e6]) """ Explanation: As we can see from the plot, we have a nice array of fuel and guide tube pin cells with fuel, cladding, and water! Create an MGXS Library Now we are ready to generate multi-group cross sections! First, let's define a 2-group structure using the built-in EnergyGroups class. End of explanation """ # Initialize a 2-group MGXS Library for OpenMOC mgxs_lib = openmc.mgxs.Library(geometry) mgxs_lib.energy_groups = groups """ Explanation: Next, we will instantiate an openmc.mgxs.Library for the energy groups with the fuel assembly geometry. End of explanation """ # Specify multi-group cross section types to compute mgxs_lib.mgxs_types = ['nu-transport', 'nu-fission', 'fission', 'nu-scatter matrix', 'chi'] """ Explanation: Now, we must specify to the Library which types of cross sections to compute. In particular, the following are the multi-group cross section MGXS subclasses that are mapped to string codes accepted by the Library class: TotalXS ("total") TransportXS ("transport" or "nu-transport with nu set to True) AbsorptionXS ("absorption") CaptureXS ("capture") FissionXS ("fission" or "nu-fission" with nu set to True) KappaFissionXS ("kappa-fission") ScatterXS ("scatter" or "nu-scatter" with nu set to True) ScatterMatrixXS ("scatter matrix" or "nu-scatter matrix" with nu set to True) Chi ("chi") ChiPrompt ("chi prompt") InverseVelocity ("inverse-velocity") PromptNuFissionXS ("prompt-nu-fission") DelayedNuFissionXS ("delayed-nu-fission") ChiDelayed ("chi-delayed") Beta ("beta") In this case, let's create the multi-group cross sections needed to run an OpenMOC simulation to verify the accuracy of our cross sections. In particular, we will define "nu-transport", "nu-fission", '"fission", "nu-scatter matrix" and "chi" cross sections for our Library. Note: A variety of different approximate transport-corrected total multi-group cross sections (and corresponding scattering matrices) can be found in the literature. At the present time, the openmc.mgxs module only supports the "P0" transport correction. This correction can be turned on and off through the boolean Library.correction property which may take values of "P0" (default) or None. End of explanation """ # Specify a "cell" domain type for the cross section tally filters mgxs_lib.domain_type = 'cell' # Specify the cell domains over which to compute multi-group cross sections mgxs_lib.domains = geometry.get_all_material_cells().values() """ Explanation: Now we must specify the type of domain over which we would like the Library to compute multi-group cross sections. The domain type corresponds to the type of tally filter to be used in the tallies created to compute multi-group cross sections. At the present time, the Library supports "material", "cell", "universe", and "mesh" domain types. We will use a "cell" domain type here to compute cross sections in each of the cells in the fuel assembly geometry. Note: By default, the Library class will instantiate MGXS objects for each and every domain (material, cell or universe) in the geometry of interest. However, one may specify a subset of these domains to the Library.domains property. In our case, we wish to compute multi-group cross sections in each and every cell since they will be needed in our downstream OpenMOC calculation on the identical combinatorial geometry mesh. End of explanation """ # Compute cross sections on a nuclide-by-nuclide basis mgxs_lib.by_nuclide = True """ Explanation: We can easily instruct the Library to compute multi-group cross sections on a nuclide-by-nuclide basis with the boolean Library.by_nuclide property. By default, by_nuclide is set to False, but we will set it to True here. End of explanation """ # Construct all tallies needed for the multi-group cross section library mgxs_lib.build_library() """ Explanation: Lastly, we use the Library to construct the tallies needed to compute all of the requested multi-group cross sections in each domain and nuclide. End of explanation """ # Create a "tallies.xml" file for the MGXS Library tallies_file = openmc.Tallies() mgxs_lib.add_to_tallies_file(tallies_file, merge=True) """ Explanation: The tallies can now be export to a "tallies.xml" input file for OpenMC. NOTE: At this point the Library has constructed nearly 100 distinct Tally objects. The overhead to tally in OpenMC scales as $O(N)$ for $N$ tallies, which can become a bottleneck for large tally datasets. To compensate for this, the Python API's Tally, Filter and Tallies classes allow for the smart merging of tallies when possible. The Library class supports this runtime optimization with the use of the optional merge paramter (False by default) for the Library.add_to_tallies_file(...) method, as shown below. End of explanation """ # Instantiate a tally Mesh mesh = openmc.RegularMesh(mesh_id=1) mesh.dimension = [17, 17] mesh.lower_left = [-10.71, -10.71] mesh.upper_right = [+10.71, +10.71] # Instantiate tally Filter mesh_filter = openmc.MeshFilter(mesh) # Instantiate the Tally tally = openmc.Tally(name='mesh tally') tally.filters = [mesh_filter] tally.scores = ['fission', 'nu-fission'] # Add tally to collection tallies_file.append(tally) # Export all tallies to a "tallies.xml" file tallies_file.export_to_xml() # Run OpenMC openmc.run() """ Explanation: In addition, we instantiate a fission rate mesh tally to compare with OpenMOC. End of explanation """ # Load the last statepoint file sp = openmc.StatePoint('statepoint.50.h5') """ Explanation: Tally Data Processing Our simulation ran successfully and created statepoint and summary output files. We begin our analysis by instantiating a StatePoint object. End of explanation """ # Initialize MGXS Library with OpenMC statepoint data mgxs_lib.load_from_statepoint(sp) """ Explanation: The statepoint is now ready to be analyzed by the Library. We simply have to load the tallies from the statepoint into the Library and our MGXS objects will compute the cross sections for us under-the-hood. End of explanation """ # Retrieve the NuFissionXS object for the fuel cell from the library fuel_mgxs = mgxs_lib.get_mgxs(fuel_cell, 'nu-fission') """ Explanation: Voila! Our multi-group cross sections are now ready to rock 'n roll! Extracting and Storing MGXS Data The Library supports a rich API to automate a variety of tasks, including multi-group cross section data retrieval and storage. We will highlight a few of these features here. First, the Library.get_mgxs(...) method allows one to extract an MGXS object from the Library for a particular domain and cross section type. The following cell illustrates how one may extract the NuFissionXS object for the fuel cell. Note: The MGXS.get_mgxs(...) method will accept either the domain or the integer domain ID of interest. End of explanation """ df = fuel_mgxs.get_pandas_dataframe() df """ Explanation: The NuFissionXS object supports all of the methods described previously in the openmc.mgxs tutorials, such as Pandas DataFrames: Note that since so few histories were simulated, we should expect a few division-by-error errors as some tallies have not yet scored any results. End of explanation """ fuel_mgxs.print_xs() """ Explanation: Similarly, we can use the MGXS.print_xs(...) method to view a string representation of the multi-group cross section data. End of explanation """ # Store the cross section data in an "mgxs/mgxs.h5" HDF5 binary file mgxs_lib.build_hdf5_store(filename='mgxs.h5', directory='mgxs') """ Explanation: One can export the entire Library to HDF5 with the Library.build_hdf5_store(...) method as follows: End of explanation """ # Store a Library and its MGXS objects in a pickled binary file "mgxs/mgxs.pkl" mgxs_lib.dump_to_file(filename='mgxs', directory='mgxs') # Instantiate a new MGXS Library from the pickled binary file "mgxs/mgxs.pkl" mgxs_lib = openmc.mgxs.Library.load_from_file(filename='mgxs', directory='mgxs') """ Explanation: The HDF5 store will contain the numerical multi-group cross section data indexed by domain, nuclide and cross section type. Some data workflows may be optimized by storing and retrieving binary representations of the MGXS objects in the Library. This feature is supported through the Library.dump_to_file(...) and Library.load_from_file(...) routines which use Python's pickle module. This is illustrated as follows. End of explanation """ # Create a 1-group structure coarse_groups = openmc.mgxs.EnergyGroups(group_edges=[0., 20.0e6]) # Create a new MGXS Library on the coarse 1-group structure coarse_mgxs_lib = mgxs_lib.get_condensed_library(coarse_groups) # Retrieve the NuFissionXS object for the fuel cell from the 1-group library coarse_fuel_mgxs = coarse_mgxs_lib.get_mgxs(fuel_cell, 'nu-fission') # Show the Pandas DataFrame for the 1-group MGXS coarse_fuel_mgxs.get_pandas_dataframe() """ Explanation: The Library class may be used to leverage the energy condensation features supported by the MGXS class. In particular, one can use the Library.get_condensed_library(...) with a coarse group structure which is a subset of the original "fine" group structure as shown below. End of explanation """ # Create an OpenMOC Geometry from the OpenMC Geometry openmoc_geometry = get_openmoc_geometry(mgxs_lib.geometry) """ Explanation: Verification with OpenMOC Of course it is always a good idea to verify that one's cross sections are accurate. We can easily do so here with the deterministic transport code OpenMOC. We first construct an equivalent OpenMOC geometry. End of explanation """ # Load the library into the OpenMOC geometry materials = load_openmc_mgxs_lib(mgxs_lib, openmoc_geometry) """ Explanation: Now, we can inject the multi-group cross sections into the equivalent fuel assembly OpenMOC geometry. The openmoc.materialize module supports the loading of Library objects from OpenMC as illustrated below. End of explanation """ # Generate tracks for OpenMOC track_generator = openmoc.TrackGenerator(openmoc_geometry, num_azim=32, azim_spacing=0.1) track_generator.generateTracks() # Run OpenMOC solver = openmoc.CPUSolver(track_generator) solver.computeEigenvalue() """ Explanation: We are now ready to run OpenMOC to verify our cross-sections from OpenMC. End of explanation """ # Print report of keff and bias with OpenMC openmoc_keff = solver.getKeff() openmc_keff = sp.k_combined.nominal_value bias = (openmoc_keff - openmc_keff) * 1e5 print('openmc keff = {0:1.6f}'.format(openmc_keff)) print('openmoc keff = {0:1.6f}'.format(openmoc_keff)) print('bias [pcm]: {0:1.1f}'.format(bias)) """ Explanation: We report the eigenvalues computed by OpenMC and OpenMOC here together to summarize our results. End of explanation """ # Get the OpenMC fission rate mesh tally data mesh_tally = sp.get_tally(name='mesh tally') openmc_fission_rates = mesh_tally.get_values(scores=['nu-fission']) # Reshape array to 2D for plotting openmc_fission_rates.shape = (17,17) # Normalize to the average pin power openmc_fission_rates /= np.mean(openmc_fission_rates[openmc_fission_rates > 0.]) """ Explanation: There is a non-trivial bias between the eigenvalues computed by OpenMC and OpenMOC. One can show that these biases do not converge to <100 pcm with more particle histories. For heterogeneous geometries, additional measures must be taken to address the following three sources of bias: Appropriate transport-corrected cross sections Spatial discretization of OpenMOC's mesh Constant-in-angle multi-group cross sections Flux and Pin Power Visualizations We will conclude this tutorial by illustrating how to visualize the fission rates computed by OpenMOC and OpenMC. First, we extract volume-integrated fission rates from OpenMC's mesh fission rate tally for each pin cell in the fuel assembly. End of explanation """ # Create OpenMOC Mesh on which to tally fission rates openmoc_mesh = openmoc.process.Mesh() openmoc_mesh.dimension = np.array(mesh.dimension) openmoc_mesh.lower_left = np.array(mesh.lower_left) openmoc_mesh.upper_right = np.array(mesh.upper_right) openmoc_mesh.width = openmoc_mesh.upper_right - openmoc_mesh.lower_left openmoc_mesh.width /= openmoc_mesh.dimension # Tally OpenMOC fission rates on the Mesh openmoc_fission_rates = openmoc_mesh.tally_fission_rates(solver) openmoc_fission_rates = np.squeeze(openmoc_fission_rates) openmoc_fission_rates = np.fliplr(openmoc_fission_rates) # Normalize to the average pin fission rate openmoc_fission_rates /= np.mean(openmoc_fission_rates[openmoc_fission_rates > 0.]) """ Explanation: Next, we extract OpenMOC's volume-averaged fission rates into a 2D 17x17 NumPy array. End of explanation """ # Ignore zero fission rates in guide tubes with Matplotlib color scheme openmc_fission_rates[openmc_fission_rates == 0] = np.nan openmoc_fission_rates[openmoc_fission_rates == 0] = np.nan # Plot OpenMC's fission rates in the left subplot fig = plt.subplot(121) plt.imshow(openmc_fission_rates, interpolation='none', cmap='jet') plt.title('OpenMC Fission Rates') # Plot OpenMOC's fission rates in the right subplot fig2 = plt.subplot(122) plt.imshow(openmoc_fission_rates, interpolation='none', cmap='jet') plt.title('OpenMOC Fission Rates') """ Explanation: Now we can easily use Matplotlib to visualize the fission rates from OpenMC and OpenMOC side-by-side. End of explanation """
jonathf/chaospy
docs/user_guide/main_usage/pseudo_spectral_projection.ipynb
mit
import chaospy from problem_formulation import joint gauss_quads = [ chaospy.generate_quadrature(order, joint, rule="gaussian") for order in range(1, 8) ] sparse_quads = [ chaospy.generate_quadrature( order, joint, rule=["genz_keister_24", "clenshaw_curtis"], sparse=True) for order in range(1, 5) ] from matplotlib import pyplot pyplot.rc("figure", figsize=[12, 4]) nodes, weights = gauss_quads[5] pyplot.subplot(121) pyplot.title("Gaussian") pyplot.scatter(*nodes, s=weights*2e3) nodes, weights = sparse_quads[3] idx = weights > 0 pyplot.subplot(122) pyplot.title("sparse-grid") pyplot.scatter(*nodes[:, idx], s=weights[idx]*2e3) pyplot.scatter(*nodes[:, ~idx], s=-weights[~idx]*2e3, color="grey") pyplot.show() """ Explanation: Pseudo-spectral projection Pseudo-spectral projection method is one of two non-intrusive polynomial chaos expansion methods. (The other being point collocation method.) In a nutshell it can be performed as follows: Generate nodes $Q_1, ..., Q_N$ and weights $W_1, ..., W_N$ from a quadrature integration scheme. Use nodes to create model evaluations $U_1, ..., U_N$. Select an expansion of orthogonal polynomials $P_1, ..., P_M$. Estimate Fourier coefficients $c_1, ..., c_M$, creating model approximation $\hat u(q) = \sum_i c_i P_i$. Perform model analysis on approximation $\hat u(q)$ as a proxy for the real model. Let us go through the steps in more detail. Generate quadrature nodes and weights In pseudo spectral projection, the nodes used to evaluate the model have to be taken from quadrature integration scheme. For example, we can choose full tensor-grid with optimal Gaussian quadrature, and Smolyak sparse-grid with Genz-Keister and Clenshaw-Curtis quadrature: End of explanation """ from problem_formulation import model_solver, coordinates import numpy gauss_evals = [ numpy.array([model_solver(node) for node in nodes.T]) for nodes, weights in gauss_quads ] sparse_evals = [ numpy.array([model_solver(node) for node in nodes.T]) for nodes, weights in sparse_quads ] pyplot.subplot(121) pyplot.plot(coordinates, gauss_evals[6].T, alpha=0.3) pyplot.title("Gaussian") pyplot.subplot(122) pyplot.plot(coordinates, sparse_evals[3].T, alpha=0.3) pyplot.title("sparse-grid") pyplot.show() """ Explanation: Evaluating model solver Like in the case of problem formulation again, evaluation is straight forward: End of explanation """ expansions = [chaospy.generate_expansion(order, joint) for order in range(1, 10)] expansions[0].round(10) """ Explanation: Expansion of orthogonal polynomials Unlike point collocation method, the polynomials in pseudo-spectral projection needs to be orthogonal for the method to work. This can be achieved by using the chaospy.generate_expansion() function: End of explanation """ gauss_model_approx = [ chaospy.fit_quadrature(expansion, nodes, weights, evals) for expansion, (nodes, weights), evals in zip(expansions, gauss_quads, gauss_evals) ] sparse_model_approx = [ chaospy.fit_quadrature(expansion, nodes, weights, evals) for expansion, (nodes, weights), evals in zip(expansions, sparse_quads, sparse_evals) ] model_approx = gauss_model_approx[4] nodes, _ = gauss_quads[4] evals = model_approx(*nodes) pyplot.subplot(121) pyplot.plot(coordinates, evals, alpha=0.3) pyplot.title("Gaussian") model_approx = sparse_model_approx[1] nodes, _ = sparse_quads[1] evals = model_approx(*nodes) pyplot.subplot(122) pyplot.plot(coordinates, evals, alpha=0.3) pyplot.title("sparse-grid") pyplot.show() """ Explanation: Fourier coefficients Polynomial chaos expansion assumes that we can model function $u$ using an expansion: $$ u(q) \approx \hat u(q) = \sum_{i=1}^N c_i P_i $$ Since $P_1, \dots, P_N$ are orthogonal, the optimal selection for $c_1, \dots, c_N$, the Fourier coefficients becomes: $$ c_i = \frac{\left\langle u, P_i\right\rangle}{ \left\langle P_i, P_i\right\rangle} $$ These coefficients can be estimated in pseudo-spectral projection using quadrature integration. In chaospy we calculate the model approximation directly through the chaospy.fit_quadrature() function: End of explanation """ expected = chaospy.E(gauss_model_approx[-2], joint) std = chaospy.Std(gauss_model_approx[-2], joint) expected[:4].round(4), std[:4].round(4) pyplot.rc("figure", figsize=[6, 4]) pyplot.xlabel("coordinates") pyplot.ylabel("model approximation") pyplot.fill_between( coordinates, expected-2*std, expected+2*std, alpha=0.3) pyplot.plot(coordinates, expected) pyplot.show() """ Explanation: Note that if the Fourier coefficients are needed, then they are still accessible by passing retall=True to chaospy.fit_quadrature(). Descriptive statistics The expected value and variance is calculated as follows: End of explanation """ from problem_formulation import error_in_mean, error_in_variance error_in_mean(expected), error_in_variance(std**2) """ Explanation: Error analysis It is hard to assess how well these models are doing from the final estimation alone. They look about the same. So to compare results, we do error analysis. To do so, we use the reference analytical solution and error function as defined in problem formulation. End of explanation """ gauss_sizes = [len(weights) for _, weights in gauss_quads] eps_gauss_mean = [ error_in_mean(chaospy.E(model, joint)) for model in gauss_model_approx ] eps_gauss_var = [ error_in_variance(chaospy.Var(model, joint)) for model in gauss_model_approx ] sparse_sizes = [len(weights) for _, weights in sparse_quads] eps_sparse_mean = [ error_in_mean(chaospy.E(model, joint)) for model in sparse_model_approx ] eps_sparse_var = [ error_in_variance(chaospy.Var(model, joint)) for model in sparse_model_approx ] pyplot.rc("figure", figsize=[12, 4]) pyplot.subplot(121) pyplot.title("Error in mean") pyplot.loglog(gauss_sizes, eps_gauss_mean, "o-", label="Gaussian") pyplot.loglog(sparse_sizes, eps_sparse_mean, "o--", label="sparse") pyplot.legend() pyplot.subplot(122) pyplot.title("Error in variance") pyplot.loglog(gauss_sizes, eps_gauss_var, "o-", label="Gaussian") pyplot.loglog(sparse_sizes, eps_sparse_var, "o--", label="sparse") pyplot.show() """ Explanation: The analysis can be performed as follows: End of explanation """
nholtz/structural-analysis
matrix-methods/frame2d/30-test-Beaufait-9-4-1.ipynb
cc0-1.0
from Frame2D import Frame2D from Frame2D.Members import Member # because units are kips, inches Member.E = 30000. #ksi Member.G = 11500. from IPython import display display.Image('data/Beaufait-9-4-1.d/fig1.jpg') frame = Frame2D('Beaufait-9-4-1') # Example 9.4.1, p. 460 frame.input_all() rs = frame.solve() frame.print_input() frame.print_results(rs,mult=[1.,1.,1./12.]) """ Explanation: Example 9.4.1, Beaufait 1977 From, Basic Concepts of Structural Analysis, Beaufait, Fred W., Prentice-Hall, 1977. End of explanation """ import pandas as pd efs = [('M1',11.77,2.72,33.06,-11.77,-2.72,7.75), # end forces from soln, Beaufait, p 473 ('M2',9.40,8.85,-7.83,0.60,15.15,-74.11), ('M3',14.18,5.27,74.10,-14.18,-5.27,57.81)] BOOK_MEFS = pd.DataFrame(efs,columns='ID,FXJ,FYJ,MZJ,FXK,FYK,MZK'.split(',')).set_index('ID') BOOK_MEFS[['MZJ','MZK']] *= 12. # convert ft-kips to in-kips BOOK_MEFS HERE_MEFS = pd.DataFrame(frame.list_member_end_forces(rs), columns='ID,FXJ,FYJ,MZJ,FXK,FYK,MZK'.split(',')).set_index('ID') HERE_MEFS pdiff = (100*(HERE_MEFS-BOOK_MEFS)/BOOK_MEFS) pdiff.round(2) """ Explanation: Compare Solution Here with that in the Book End of explanation """
eecs445-f16/umich-eecs445-f16
handsOn_lecture00_python_tutorial/lecture00_python_tutorial_exercises_with_solutions.ipynb
mit
# 1. With Loops # OK, not very "Pythonic" def sum_of_multiples_with_loop(l, max_): total = 0 # [1, 1000) for i in range(1, max_): for j in l: if i % j == 0: total += i break return total # With Filter. # Better (At least more "Pythonic") def sum_of_multiples_with_loop1(l, max_): return sum(filter(lambda k: any(k % i == 0 for i in l), list(range(max_)))) print (sum_of_multiples_with_loop([3, 5], 1000)) print (sum_of_multiples_with_loop1([3, 5], 1000)) """ Explanation: Question 1: Numbers and Data Structures Example: If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23. Question: Find the sum of all the multiples of 3 or 5 below 1000 in 3 ways. *Hint 1: * Loop through all the possible numbers, i.e., $x \in \mathbb{N} \wedge x < 1000$ and simply add up the multiples as you go through them. Use two loops first and then try making your code more concise. *Bonus: * Use the inclusion-exclusion principle. Write a function that calculates the sum of the first $n$ numbers in an arithmetic series. Hint: $S_n = \frac{n}{2}(u_1 + u_n) = \frac{n}{2}(2u_1 + (n - 1) \cdot d)$ End of explanation """ # Implement part (a) below def string_repeater(s): return ''.join([s[:i] for i in range(len(s) + 1)]) # Implement part (b) below def string_occurence_remover(a, b, c): return b.replace(a, c) # Implement part (c) below def caeser_decipher(s, n): import string return s.translate(str.maketrans(string.ascii_lowercase, string.ascii_lowercase[n:] + string.ascii_lowercase[:n])) def is_palindrome(s): s = ''.join(c for c in s if c.isalpha()).lower() return s == s[::-1] def filter_long_words(l, n): return list(filter(lambda s: len(s) > n, l)) def look_and_say(s, n): from collections import OrderedDict from itertools import groupby, accumulate temp = [s for i in range(n)] strings = list(accumulate(temp, lambda s_, _: ''.join(str(len(list(g))) + k for k, g in groupby(s_)))) dictionaries = [OrderedDict(sorted({c : strings[seq_index].count(c) for c in list(set(strings[seq_index]))}.items())) for seq_index in range(len(strings))] return list(zip(strings, dictionaries)) # Simple Tests for part (a) assert(string_repeater("Code") == "CCoCodCode") assert(string_repeater("EECS445") == "EEEEECEECSEECS4EECS44EECS445") # Simple Tests for part (b) assert(string_occurence_remover("Boring", "PythonIsBoring", "Fun") == "PythonIsFun") assert(string_occurence_remover("12", "Today is September 12 and 12 is my favorite number.", "9") == \ "Today is September 9 and 9 is my favorite number.") # Simple Tests for part (c) assert(caeser_decipher("vjku ku eqfg", -2) == "this is code") assert(caeser_decipher("h khjd bnlotsdqr", 1) == "i like computers") # Simple Tests for part (d) assert(is_palindrome("Rats live on no evil star.")) assert(is_palindrome("On a clover, if alive, erupts a vast pure evil; a fire volcano")) assert(not is_palindrome("Hello, this is Jupyter Notebook speaking.")) assert(not is_palindrome("I am currently in a hands-on lecture.")) # Simple Tests for part (e) assert(filter_long_words(['a', '', '0', 'a0', 'a0b02030', 'ee', 'cs', 'eecs', 'eeccss'], 2) == ['a0b02030', 'eecs', 'eeccss']) assert(filter_long_words(['1', '2'], 0) == ['1', '2']) # Simple Test for part (f) from collections import OrderedDict assert(look_and_say("aabbcc", 5) == [('aabbcc', OrderedDict([('a', 2), ('b', 2), ('c', 2)])), ('2a2b2c', OrderedDict([('2', 3), ('a', 1), ('b', 1), ('c', 1)])), ('121a121b121c', OrderedDict([('1', 6), ('2', 3), ('a', 1), ('b', 1), ('c', 1)])), ('1112111a1112111b1112111c', OrderedDict([('1', 18), ('2', 3), ('a', 1), ('b', 1), ('c', 1)])), ('3112311a3112311b3112311c', OrderedDict([('1', 12), ('2', 3), ('3', 6), ('a', 1), ('b', 1), ('c', 1)]))]) """ Explanation: Question 2: Strings and Data Structures Note: You are free to use any functions and libraries that come with Python. The aim is not to necessarily implement code from scratch, but to get familiar with Python (specifically the syntax and data structures) and, to some extent, write concise readable working code. (a): Given a non-empty string like "Code" return a string like "CCoCodCode". (b): Given strings a, b and c, write a function that will replace all occurences of a in b with c. (c): Given a string s that has been Caeser enciphered with a numeric shift n. Return the deciphered string. For example, s = "vjku ku c eqfg" and n = -2, returns "this is a code" Note: You can assume the string is all in lowercase without any special characters. (d): Given a string s, return whether s is a palindrome, i.e, it is spelt the same when read from either direction. Note: Ignore case, special characters and spacing. (e): Using the higher order function filter(), define a function filter_long_words() that takes a list of words and an integer n and returns the list of words that are longer than n (in the same order). (f): Given a string s and a number n, return a list of size n consisting of tuples with two elements, the first being the correct string in the look-and-say-sequence and the second being a dictionary of character: count pairs that is sorted (Hint: Use Ordered Dictionaries and sorted()). For example, if s = "1" and n = 5, the look-and-say-sequence is as follows: 1 ("1", {"1" : 1}) 11 ("11", {"1" : 2}) 21 ("21", {"1" : 1, "2" : 1}) 1211 ("1211", {"1" : 3, "2" : 1}) 111221 ("111221", {"1" : 4, "2" : 2}) correct output: [("1", {"1" : 1}), ("11", {"1" : 2}), ("21", {"1" : 1, "2" : 1}), ("1211", {"1" : 3, "2" : 1}), ("111221", {"1" : 4, "2" : 2})] End of explanation """ class StringUppercaseMaker(object): def __init__(self): curr_str = None def getString(self): self.x = input("Please enter a string.") def printString(self): print(self.x.upper()) test_object = StringUppercaseMaker() test_object.getString() test_object.printString() test_object.printString() test_object.getString() test_object.printString() """ Explanation: Question 3: Classes and Interactive I/O (a) Define a class which has at least two methods, getString: to get a string from console input and printString: to print the string in upper case. Also write a simple test to check the functionality class methods. End of explanation """ import random class GuessTheNumber(object): # Create a constructor here (__init__ function) that takes two numbers, a minimum and a maximum for # the range that guesses can take. Save these in variable min_guess and max_guess. Create and # initialize a Boolean called incorrect to be true. (Note: Python booleans use capitalization, >T<rue or >F<alse) def __init__(self, min_guess, max_guess): self.min_guess = min_guess self.max_guess = max_guess self.incorrect = True def play(self): # Write code to get input from the user and save it into a string variable name. name = input("Hello! What is your name? ") print("Well, " + name + ", I am thinking of a number between " + str(self.min_guess) + " and " + str(self.max_guess) + ".\n") answer = random.randrange(self.min_guess, self.max_guess + 1) # Write the main loop to collect guesses and check whether they are correct. Also remember to save the count! # Additionally, if the guess is out of range or input is unexpected (like type mismatch or non-numeric input), # simply print an error message and break from the loop. count = 0 while self.incorrect: x = int(input("Take a guess. ")) if x == answer: self.incorrect = False print("Good job, " + name + "! You guessed my number in " + str(count) + " guesses.") else: count += 1 if x > answer: print("Your guess is too high.\n") elif x < answer: print("Your guess is too low.\n") incorrect = True # Test out your game! g = GuessTheNumber(1, 20) g.play() """ Explanation: (b) Write a program able to play the "Guess the number"-game, where the number to be guessed is randomly chosen between 1 and 20. (Source: http://inventwithpython.com) This is how it should work when run in a terminal: Hello! What is your name? Valli Well, Valli, I am thinking of a number between 1 and 20. Take a guess. 10 Your guess is too low. Take a guess. 15 Your guess is too low. Take a guess. 18 Good job, Valli! You guessed my number in 3 guesses! End of explanation """ # Generate some Data for analysis from sklearn.datasets import make_classification X, y = make_classification(1000, n_features=5, n_informative=2, n_redundant=2, n_classes=2, random_state=0) # (a) Get a glimpse of the data by making a Pandas DataFrame from the data and then printing the first few and last # few rows. import pandas as pd import numpy as np df = pd.DataFrame(np.hstack((X, y[:, None]))) print("First few rows: ") print(df.head()) print("Last few rows: ") print(df.tail()) # (b) Plot a boxplot of each column to visualize the distribution of the data column values. %matplotlib inline ax = df.boxplot() # (c) Try using the describe() function of the DataFrame. df.describe() # (d) Install Seaborn if it is not already installed and import it below. Then, perform a pairwise plot using the data. import seaborn as sns sns.pairplot(df, hue=5) # (e) Now try Seaborn's correlation plot (Heatmap). sns.heatmap(df.corr()) # (Optional) Using the first 70% of the data as a training set and the last 30% as a test set, construct a classifier # and see how well it performs. You will be certainly able to do this at the end of the course! train_test_split = int(len(df) * 0.7) train_df = df[:train_test_split] train_X = train_df[train_df.columns[:-1]] train_y = train_df[train_df.columns[-1]] test_df = df[train_test_split:] test_X = test_df[train_df.columns[:-1]] test_y = test_df[train_df.columns[-1]] from sklearn.linear_model import SGDClassifier clf = SGDClassifier(loss='hinge', penalty='l2') clf.fit(train_X, train_y) y_train_pred = clf.predict(train_X) print("Training Accuracy: " + str(sum(i == j for i, j in zip(list(train_y), list(y_train_pred))) / len(train_y))) y_test_pred = clf.predict(test_X) print("Test Accuracy: " + str(sum(i == j for i, j in zip(list(test_y), list(y_test_pred))) / len(test_y))) """ Explanation: Question 4: Pandas and Data Exploration End of explanation """ # (a) Write a function that takes in a tuple and a string that can either # be 'zero', 'one' or 'gaussian' and correspondingly return a NumPy array that contains those elements. For 'gaussian', # assume sampling with mean = 0, std = 1. def array_with_shape(shape, type_): if type_ == 'zero': return np.zeros(shape) elif type_ == 'one': return np.ones(shape) elif type_ == 'gaussian': return np.random.standard_normal(shape) else: raise NotImplementedError print(array_with_shape((3, 10), 'zero')) print(array_with_shape((5, 2, 3), 'one')) print(array_with_shape((3, 3), 'gaussian')) print(array_with_shape((4, 4, 1), 'uniform')) # (b) Write a function that returns a n x n identity matrix with n as a parameter. def identity(n): return np.eye(n) print(identity(1)) print(identity(5)) print(identity(5) * 3.1415926535) # (c) Write a function that normalizes a matrix to [0, 1] and returns the normalized matrix. def normalizer(arr): return arr if arr.max() == arr.min() else (arr - arr.min()) / (arr.max() - arr.min()) print(normalizer(array_with_shape((5, 5), 'one'))) # Note: Without the if/else, this returns an array of nan's. Try it! print(normalizer(array_with_shape((3, 2, 3), 'gaussian'))) # (d) Write code that creates a NumPy array and makes it immutable. def make_immutable(arr): arr.flags.writeable = False arr = np.array([0, 0, 0]) make_immutable(arr) arr[0] = 1 # (e) Write a function that finds the closest value to a given scalar s. def closest_element(arr, scalar): return arr[np.abs(arr - scalar).argmin()] print(closest_element(np.array([-10, 0, 10]), 40)) print(closest_element(np.array([-10, 0, 10]), -40)) print(closest_element(np.array([-10, 0, 10]), 0)) print(closest_element(np.array([-10, 0, 10]), 5.5)) print(closest_element(np.array([-10, 0, 10]), -5.5)) print(closest_element(np.array([-10, 0, 10]), 3.5)) print(closest_element(np.array([-10, 0, 10]), -3.5)) # (f) Write a function that subtracts the mean of each row from a matrix and returns it. def subtract_row_means(arr): return arr - arr.mean(axis=1, keepdims=True) print(subtract_row_means(np.array([[2, 2], [-1, 1]]))) print(subtract_row_means(np.array([[1.3, 2.4, 5.6], [5.6, 7.8, 8.9]]))) # (g) Write a function that sorts an array by the nth column and returns the sorted array. def sort_by_column(arr, n): return arr[arr[:, n].argsort()] print(sort_by_column(np.array([[-1, 5, 3], [7, 10, -1], [-1, -2, -5]]), 0)) print(sort_by_column(np.array([[-1, 5, 3], [7, 10, -1], [-1, -2, -5]]), 1)) print(sort_by_column(np.array([[-1, 5, 3], [7, 10, -1], [-1, -2, -5]]), 2)) """ Explanation: Question 5: Numpy Exercises End of explanation """ # (a) Write a function that implements Ordinary Least Squares given an input matrix X and a vector of targets y. # We will go over the method in the forthcoming lecture, but the equation is given in # https://en.wikipedia.org/wiki/Linear_regression#Estimation_methods # Note: Use NumPy here, but do NOT make use of library functions that do this for you. def OLS(X_, y_): return np.dot(np.linalg.inv(np.dot(X_.T, X_)), np.dot(X_.T, y_)) from sklearn.datasets import make_regression X, y = make_regression(1000, n_features=1, noise=20, random_state=0) # (b) Run your function on the above data and plot the data as well as the decision boundary (trendline) # generated by your classifer using matplotlib. beta1, beta2 = OLS(np.concatenate((np.ones((len(X), 1)), X.reshape((len(X), 1))), axis=1), y) from matplotlib import pyplot as plt plt.plot(X, y, 'o', color='turquoise', linewidth=5) space = np.linspace(min(X), max(X)) plt.plot(space, (beta1 + beta2 * space).squeeze(), 'r-', linewidth=5) """ Explanation: Question 6: Numpy + First ML algorithm! End of explanation """
Epidemium/RAMP-1
epidemium_01_starting_kit.ipynb
bsd-3-clause
%matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns; sns.set() pd.set_option('display.max_columns', None) """ Explanation: Find this notebook in https://tinyurl.com/epidemium-ramp <div class="page-header"><h1 class="alert alert-info">Epidemium RAMP: Cancer Mortality Prediction<br/> <small>Djalel Benbouzid, Edouard Debonneuil Cancer Baseline contributors: http://wiki.epidemium.cc/wiki/Baseline#Contributors Partner and software provider: <a href="http://www.datascience-paris-saclay.fr">Paris Saclay Center for Data Science</a> </small></h1></div> Introduction Cancer is still a terrible disease. Surprisingly, the rate of cancer incidence and mortality varies substantially across regions worldwide. This raises several questions as of unknown preventive and risk factors. Within the Epidemium initiative, the Cancer Baseline project aims at collecting open-data aggregate cancer mortality risks (y) worldwide and potential explanatory factors (X) to model $y=f(X)$, hence, trying to shed light on new cancer-related factors. For this first RAMP, you will be the first ones to analyze the data collected by more than 30 volunteers over three months and you will compete on the best cancer mortality prediction model $y=f(X$). May you lead to a new generation of solutions against cancer! If you want to join the project after the RAMP http://wiki.epidemium.cc/wiki/Baseline#How_to_start Tools and setup The simple way: Install the Anaconda python distribution https://www.continuum.io/downloads The fine-grained way:. Install each of the following tools - Python - Jupyter - Scikit-learn - Pandas - seaborn Data description Global description $y$: Cancer mortality risk by region, gender, cancer type, year, and origin (only for the USA). $X$: Explanatory variables. They includes the distribution of the population by region, age and gender incidence of cancer by age and gender and other variables, described below. Often, these variables are only present for some years (eg. 2000, 2007, and 2012) but they can extrapolated in-between. Note regarding the names of variables g_* : if the variable takes different values for different genders, the name of the variable starts with g_. Exemple: for Males in Corèze, “smoker_prevalence” is the number of smokers in Corèze whereas “g_smoker_prevalence” is the number of male smokers of Crèze a_* : if the variable takes different values depends on age, the name of the variables starts with a_. Exemple: for Males in Corèze, “smoker_prevalence” is the number of smokers in Corèze “g_smoker_prevalence” is the number of male smokers of Crèze ga_* : if both g_ and a_ are combined. Column description alcool_consumption: average alcohol consumption in liters per person and per year (at this stage of the project, slight adjustments are permitted depending on the country. Exemple: Germany: average alcohol consumption for people for sex X aged 16 and over. USA: alcohol consumption in 2009) alcool_consumption_beer: average beer consumption per person in 2009 - litre/person/year alcool_consumption_spirit: The spirit consumption for each person in 2009 - litre/person/year alcool_consumption_wine: The wine consumption for each person in 2009 - litre/person/year alcool_death: deaths per 100,000 population due to alcohol use in Germany 2011 arsenic_concentration: arsenic concentrations in soil. arsenic_emission: Amount of arsenic from emissions benzo(a)pyren_emission: Amount of benzo(a)pyren from emissions benzo(b)fluoranthen_emission: Amount of benzo(b)fluoranthen from emissions benzo(k)fluoranthen_emissio: Amount of benzo(k)fluoranthen from emissions beryllium_emission: level of beryllium in the environment from facilities that manufacture or process Beryllium in 1990 - kg/capita/year bmi_18.5-: underweight (Body Mass Index < 18.5 kg/m2). Type : rate bmi_18.5_25: healthy weight BMI from 18.5 to 25 kg/m2. Type : rate bmi_25_30: overweight from 25 to 30 kg/m2. Type : rate bmi_30+: obese BMI >= 30 kg/m2. Type : rate bmi_score: BMI (source for many countries: MRC-HPA Centre for Environment and Health) cadmium_emission: Amount of cadmium from emissions cholesterol_prevalence: The percentage of population with high cholesterin levels in Germany chromium_emission: Amount of chromium from emissions coal_to_electricity: Electricity production in 2013, Percentage of electricity produced from coal copper_emission: Amount of copper from emissions dietary_characteristics_alcohol: Use beer, Wine, Sparkling Wine, spirits, Others (eg. Alcopops, alcoholic cocktails) everyday dietary_characteristics_calcis: Quantity of calcis in daily food of Germany dietary_characteristics_cereals_bread: Use bread, Bakery products, Cereal / -products, Dishes based on cereal / products constitute everyday dietary_characteristics_cheese_milk: Use Milk / cheese and products thereof, Meals based on milk / products constitute everyday dietary_characteristics_cholesterol: Quantity of Cholesterin in daily food of Germany dietary_characteristics_coffee_tea: Use water, Coffee and tea (green / black), Herbal, fruit tea, Fruit juices / nectars, sodas everyday dietary_characteristics_confectionery: Use confectionery everyday dietary_characteristics_eggs: Use Egg, dishes based on eggs everyday dietary_characteristics_fat: Use Fats and oils everyday of men dietary_characteristics_fish: Use Fish / crustaceans and products thereof, Dishes based on fish / shellfish everyday dietary_characteristics_fruit: Use Fruit products (Juice) everyday dietary_characteristics_iodine: Quantity of Iodine in daily food of Germany dietary_characteristics_meat: Use Meat / -products and sausages, Dishes based on meat everyday dietary_characteristics_potatoes: Use potato products, Dishes based on potatoes everyday dietary_characteristics_proteins: Quantity of protein in daily food of Germany dietary_characteristics_snacks: Use snack Mixes everyday dietary_characteristics_soup: Use Soups and stews everyday dietary_characteristics_vegetables: Use Vegetables (Juice), mushrooms, legumes, Dishes based on vegetables everyday dietary_characteristics_vitamine_A: Quantity of vitamin A (retinol equivalents) in daily food of Germany dietary_characteristics_vitamine_B1: Quantity of vitamin B1 in daily food of Germany dietary_characteristics_vitamine_B12: Quantity of vitamin B12 in daily food of Germany dietary_characteristics_vitamine_B2: Quantity of vitamin B2 in daily food of Germany dietary_characteristics_vitamine_B6: Quantity of vitamin B6 in daily food of Germany dietary_characteristics_vitamine_C: Quantity of vitamin C in daily food of Germany dietary_characteristics_vitamine_D: Quantity of vitamin D in daily food of Germany dietary_characteristics_vitamine_E: Quantity of vitamin E in daily food of Germany dioxin_emission: Amount of dioxin from emissions diphteria_vacc: The percentage of people vaccinated against diphteria drugs_crack: Crack/cocaine users prevalence estimates pop. Aged 15 to 64. Type : rate drugs_inject: Injecting drugs prevalence estimates pop. Aged 15 to 64. Type : rate drugs_opiates: Opiates users prevalence estimates pop. Aged 15 to 64. Type : rate fastfood_spending: The amount of money for fastfood in 2007 - USD/capita/year first_birth_age: The average age at first birth in 2006 first_menstruation_age: the age at first menstruation in Germany gas_to_electricity: Electricity production in 2013, Percentage of electricity produced from natural gas gov_health_spending_perperson: Government health spending per person (international \$), WHO HAV: The number of cases of acute hepatitis A in 2012 - cases/population/year HBV: The number of cases of acute hepatitis A in 2012 - cases/population/year hcb_emission: Amount of HCB from emissions hepb_vacc: The percentage of people vaccinated against Hepatitis B hib_vacc: The percentage of people vaccinated against hib HIV: The number of AIDS cases in Germany from 1982 to 2009 HPV_vacc_1+, 2+, 3+: completion of HPV vaccination among adolescents ages 13-17 for sex in 2013, equal or greater 1,2,3 HPV vaccination respectively Indeno(1|2|3-cd)pyren_emission: Amount of Indeno(1|2|3-cd)pyren from emissions inequality_index: the Gini index by the World Bank lead_emission: Amount of lead from emissions liver_transplant_prevalence: Living donor liver transplants excluded/Million people/number of centers in 2009-2011 life_expectancy: this is used as a measure of aggregate mortality rates at a given year. Some factor may reduce cancer mortality because it increases the risk to die from other aspects before: as a simple benefit/risk check, a preventive factor should not have a negative impact on life expectancy. g_long_term_unemployment: Male/Female long term unemployment rate (source: International Labour Organization) measles_vacc_1: The percentage of people vaccinated against measles (the first time) measles_vacc_2: The percentage of people vaccinated against measles (the second time) menC_vacc: The percentage of people vaccinated against Meningokokken C mercury_emission: Amount of mercury from emissions metal_water_concentration: Concentrations of metals in rainwater mumps_vacc_1: The percentage of people vaccinated against mumps (the first time) mumps_vacc_2: The percentage of people vaccinated against mumps (the second time) nickel_emission: Amount of nickel from emissions nmvok_emission: Amount of nmvok from emissions nox_emission: Amount of NOx from emissions nuclear_to_electricity: Electricity production in 2013, Percentage of electricity produced from nuclear energy other_to_electricity: Electricity production in 2013, Percentage of electricity produced from others individual_health_spending: Out-of-pocket share of total health spending (%) (source: Global Health Expenditure Database) pah_emission: Amount of PAH from emissions pcb_emission: Amount of PCB from emissions pertussis_vacc: The percentage of people vaccinated against pertussis pm10_emission: Amount of pm10 from emissions pm2.5_emission: Amount of pm2.5 from emissions pneumo_vacc: The percentage of people vaccinated against pneumococcal population: population (male and females combine) g_population_60+: share of males resp. females aged 60+ urban_population: share of persons living in urban areas polio_vacc: The percentage of people vaccinated against Poliomyelitis radon_level: average radon level in 2015 - Bq/m3 renewable_to_electricity: Electricity production in 2013, Percentage of electricity produced from renewable energy rubella_vacc_1: The percentage of people vaccinated against rubella (the first time) rubella_vacc_2: The percentage of people vaccinated against rubella (the second time) shale_oil: Shale Production in 2012 - kg/capita/year smoker_prevalence: The percentage of people smoking. Type: Rate smoking_10_19cigarets: Moderate smoking (10 - 19 cigarets per day). Type : rate smoking_20+cigarets: Heavy smoking (20 cigarets or more per day). Type : rate so2_emission: Amount of SO2 from emissions tetanus_vacc: The percentage of people vaccinated against tetanus transplants_cases: The number of transplants in 2012 - cases/population/year tsp_emission: Amount of tsp from emissions uninsured: The estimated number of uninsured individuals under age 65 in the county in 2006 - cases/population/year varicella_vacc_1: The percentage of people vaccinated against varicella (the first time) varicella_vacc_2: The percentage of people vaccinated against varicella (the second time) Challenge The vast majority of the variables are extremely sparse and part of the challange will be to: - select the right variables to include in the model, - potentialy impute missing values, or extrapolate between different dates. Imports & config End of explanation """ filename = 'data/public/train.csv' df = pd.read_csv(filename) df.shape df.head(3) df.describe() """ Explanation: Meta data analysis Let's start with a meta-data analysis, ie. number of missing values, number of unique categories etc. End of explanation """ def meta_dataframe(df, uniq_examples=7): from collections import defaultdict res = defaultdict(list) for i in range(df.shape[1]): res['col_name'].append(df.columns[i]) uniques = df.iloc[:,i].unique() notnull_rate = df.iloc[:,i].dropna().size / df.iloc[:,i].size res['n_uniques'].append(uniques.size) res['n_notnull'].append(notnull_rate) res['dtype'].append(df.iloc[:,i].dtype) for j in range(1, uniq_examples + 1): v = uniques[j-1] if j <= uniques.size else '' res['value_' + str(j)].append(v) return pd.DataFrame(res, columns=sorted(res.keys())).set_index('col_name') meta_df = meta_dataframe(df.dropna(how='all', axis=[0, 1])) meta_df.sort_values('n_notnull', ascending=False) """ Explanation: The following utility function will help visualize the sparsity of the dataset End of explanation """ meta_df.n_notnull.sort_values(ascending=True, inplace=False).plot.barh(figsize=(10, 6), fontsize=1, alpha=.7); meta_df.n_notnull.plot.hist(figsize=(10, 6), bins=30, alpha=.7); """ Explanation: How much sparse is the data End of explanation """ from sklearn.base import BaseEstimator from sklearn.cross_validation import train_test_split from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.cross_validation import cross_val_score, ShuffleSplit from sklearn.preprocessing import Imputer from sklearn.metrics import mean_squared_error from sklearn.pipeline import make_pipeline df_tmp = df[df['Part of'] == 'France'] df.isnull().any(how='all', axis=0).sum() df = df.drop(df_tmp.index) class FeatureExtractor(object): # The columns you want to include without pre-processing core_cols = ['Year'] # These columns must be discarded. They are only useful in case you would like to # do joins with external data region_cols = ['RegionType', 'Part of', 'Region'] # Categorical columns. They must be processed (use pd.get_dummies for the simplest way) categ_cols = ['Gender', 'Age', 'MainOrigin'] # the different factors to include in the model additional_cols = ['HIV_15_49'] def __init__(self): pass def fit(self, X_df, y_array): pass def transform(self, X_df): ret = X_df[self.core_cols].copy() # dummify the categorical variables for col in self.categ_cols: ret = ret.join(pd.get_dummies(X_df[col], prefix=col[:3])) # add extra information for col in self.additional_cols: ret[col] = X_df[col] return ret.values class Regressor(BaseEstimator): def __init__(self): self.clf = make_pipeline( Imputer(strategy='median'), RandomForestRegressor(n_estimators=20, max_depth=None)) def fit(self, X, y): return self.clf.fit(X, y) def predict(self, X): return self.clf.predict(X) df_features = df.drop('target', axis=1) y = df.target.values df_train, df_test, y_train, y_test = train_test_split(df_features, y, test_size=0.5, random_state=42) """ Explanation: Prediction model We are going to follow the scikit-learn API specs. Basically, - inherit from BaseEstimator, - initiate all of the arguments and configurations in the __init__() function, - implement a fit() and a predict() function. More information in the official documentation. End of explanation """ feature_extractor = FeatureExtractor() model = Regressor() """ Explanation: Instanciating our model End of explanation """ X_train = feature_extractor.transform(df_train) model.fit(X_train, y_train) """ Explanation: Feature processing and training End of explanation """ X_test = feature_extractor.transform(df_test) y_pred = model.predict(X_test) print('RMSE = ', np.sqrt(mean_squared_error(y_test, y_pred))) """ Explanation: Testing our model End of explanation """
zhouqifanbdh/liupengyuan.github.io
chapter2/homework/computer/3-29/201611680697-3.29作业.ipynb
mit
def factorial_sum(end): i = 0 factorial_n = 1 while i < end: i = i + 1 factorial_n = factorial_n *i return factorial_n m= int(input('请输入第1个整数,以回车结束。')) n= int(input('请输入第2个整数,以回车结束。')) k = int(input('请输入第3个整数,以回车结束。')) print('最终的和是:', factorial_sum(m) + factorial_sum(n) + factorial_sum(k)) """ Explanation: 仿照求$ \sum_{i=1}^mi + \sum_{i=1}^ni + \sum_{i=1}^ki$的完整代码,写程序,可求m!+n!+k! End of explanation """ def number_sum(end): i=0 total=0 while i < end: i=i+1 if i%2!=0: total=total+(1/(2*i-1)) else: total=total-(1/(2*i-1)) return total n=1000 m=100000 print('当n=1000时,最终值为:',4*number_sum(n)) print('当n=100000时,最终值为:',4*number_sum(m)) """ Explanation: 写函数可返回1 - 1/3 + 1/5 - 1/7...的前n项的和。在主程序中,分别令n=1000及100000,打印4倍该函数的和。 End of explanation """ def constellation(name,month,day): if month==1 and day<21: print(name,'你是摩羯座!') if month==12 and day>21: print(name,'你是摩羯座!') if month==2 and day<19: print(name,'你是水瓶座!') if month==1 and day>20: print(name,'你是水瓶座!') if month==3 and day<21: print(name,'你是双鱼座!') if month==2 and day>18: print(name,'你是双鱼座!') if month==4 and day<21: print(name,'你是白羊座!') if month==3 and day>20: print(name,'你是白羊座!') if month==5 and day<22: print(name,'你是金牛座!') if month==4 and day>20: print(name,'你是金牛座!') if month==6 and day<23: print(name,'你是摩羯座!') if month==5 and day>21: print(name,'你是摩羯座!') if month==7 and day<24: print(name,'你是双子座!') if month==6 and day>22: print(name,'你是双子座!') if month==8 and day<24: print(name,'你是巨蟹座!') if month==7 and day>23: print(name,'你是巨蟹座!') if month==9 and day<24: print(name,'你是狮子座!') if month==8 and day>23: print(name,'你是狮子座!') if month==10 and day<24: print(name,'你是处女座!') if month==9 and day>23: print(name,'你是处女座!') if month==11 and day<23: print(name,'你是天秤座!') if month==10 and day>23: print(name,'你是天秤座!') if month==12 and day<23: print(name,'你是射手座!') if month==11 and day>22: print(name,'你是射手座!') a=str(input('your name:')) b=int(input('your birth month:')) c=int(input('your birth day')) constellation(a,b,c) def change(word): if word.endswith(('s','sh','ch','x','o')): print(word+'es') elif word.endswith(('by','cy','dy','fy','gy','hy','jy','ky','ly','my','ny','py','qy','ry','sy','ty','vy','wy','xy','zy')): print('把y改i再加es') else: print(word+'s') a=str(input()) change(a) """ Explanation: 将task3中的练习1及练习4改写为函数,并进行调用 写程序,可由键盘读入用户姓名例如Mr. right,让用户输入出生的月份与日期,判断用户星座,假设用户是金牛座,则输出,Mr. right,你是非常有性格的金牛座!。 英文单词单数转复数,要求输入一个英文动词(单数形式),能够得到其复数形式,或给出单数转复数形式的建议(提示,some_string.endswith(some_letter)函数可以判断某字符串结尾字符,可尝试运行:'myname'.endswith('me'),liupengyuan'.endswith('n'))。 End of explanation """ def number_sum(a,c): i=0 total=a while i<c+1: i=i+1 a=a+1 total=total+a return total m=int(input('请输入m的值,以回车结束。')) n=int(input('请输入n的值,以回车结束。')) k=int(input('请输入k的值,以回车结束。')) print('从整数m到整数n累加和为:',number_sum(m,k)) """ Explanation: 写程序,可以求从整数m到整数n累加的和,间隔为k,求和部分需用函数实现,主程序中由用户输入m,n,k调用函数验证正确性。 End of explanation """
xpmanoj/content
HW0.ipynb
mit
x = [10, 20, 30, 40, 50] for item in x: print "Item is ", item """ Explanation: Homework 0 Due Tuesday, September 10 (but no submission is required) Welcome to CS109 / STAT121 / AC209 / E-109 (http://cs109.org/). In this class, we will be using a variety of tools that will require some initial configuration. To ensure everything goes smoothly moving forward, we will setup the majority of those tools in this homework. While some of this will likely be dull, doing it now will enable us to do more exciting work in the weeks that follow without getting bogged down in further software configuration. This homework will not be graded, however it is essential that you complete it timely since it will enable us to set up your accounts. You do not have to hand anything in, with the exception of filling out the online survey. Class Survey, Piazza, and Introduction Class Survey Please complete the mandatory course survey located here. It should only take a few moments of your time. Once you fill in the survey we will sign you up to the course forum on Piazza and the dropbox system that you will use to hand in the homework. It is imperative that you fill out the survey on time as we use the provided information to sign you up for these services. Piazza Go to Piazza and sign up for the class using your Harvard e-mail address. You will use Piazza as a forum for discussion, to find team members, to arrange appointments, and to ask questions. Piazza should be your primary form of communication with the staff. Use the staff e-mail (staff@cs109.org) only for individual requests, e.g., to excuse yourself from a mandatory guest lecture. All readings, homeworks, and project descriptions will be announced on Piazza first. Introduction Once you are signed up to the Piazza course forum, introduce yourself to your classmates and course staff with a follow-up post in the introduction thread. Include your name/nickname, your affiliation, why you are taking this course, and tell us something interesting about yourself (e.g., an industry job, an unusual hobby, past travels, or a cool project you did, etc.). Also tell us whether you have experience with data science. Programming expectations All the assignments and labs for this class will use Python and, for the most part, the browser-based IPython notebook format you are currently viewing. Knowledge of Python is not a prerequisite for this course, provided you are comfortable learning on your own as needed. While we have strived to make the programming component of this course straightforward, we will not devote much time to teaching prorgramming or Python syntax. Basically, you should feel comfortable with: How to look up Python syntax on Google and StackOverflow. Basic programming concepts like functions, loops, arrays, dictionaries, strings, and if statements. How to learn new libraries by reading documentation. Asking questions on StackOverflow or Piazza. There are many online tutorials to introduce you to scientific python programming. Here is one that is very nice. Lectures 1-4 are most relevant to this class. Getting Python You will be using Python throughout the course, including many popular 3rd party Python libraries for scientific computing. Anaconda is an easy-to-install bundle of Python and most of these libraries. We recommend that you use Anaconda for this course. Please visit this page and follow the instructions to set up Python <hline> Hello, Python The IPython notebook is an application to build interactive computational notebooks. You'll be using them to complete labs and homework. Once you've set up Python, please <a href=https://raw.github.com/cs109/content/master/HW0.ipynb download="HW0.ipynb">download this page</a>, and open it with IPython by typing ipython notebook &lt;name_of_downloaded_file&gt; For the rest of the assignment, use your local copy of this page, running on IPython. Notebooks are composed of many "cells", which can contain text (like this one), or code (like the one below). Double click on the cell below, and evaluate it by clicking the "play" button above, for by hitting shift + enter Note: This HW was completed in Python 3. Run time errors are expected upon running with Python 2.x. End of explanation """ #IPython is what you are using now to run the notebook import IPython print ("IPython version: %6.6s (need at least 1.0)" % IPython.__version__) # Numpy is a library for working with Arrays import numpy as np print ("Numpy version: %6.6s (need at least 1.7.1)" % np.__version__) # SciPy implements many different numerical algorithms import scipy as sp print ("SciPy version: %6.6s (need at least 0.12.0)" % sp.__version__) # Pandas makes working with data tables easier import pandas as pd print ("Pandas version: %6.6s (need at least 0.11.0)" % pd.__version__) # Module for plotting import matplotlib print ("Mapltolib version: %6.6s (need at least 1.2.1)" % matplotlib.__version__) # SciKit Learn implements several Machine Learning algorithms import sklearn print ("Scikit-Learn version: %6.6s (need at least 0.13.1)" % sklearn.__version__) # Requests is a library for getting data from the Web import requests print ("requests version: %6.6s (need at least 1.2.3)" % requests.__version__) # Networkx is a library for working with networks import networkx as nx print ("NetworkX version: %6.6s (need at least 1.7)" % nx.__version__) #BeautifulSoup is a library to parse HTML and XML documents #import beautifulsoup4 import bs4 print ("BeautifulSoup version:%6.6s (need at least 3.2)" % bs4.__version__) #MrJob is a library to run map reduce jobs on Amazon's computers import mrjob print ("Mr Job version: %6.6s (need at least 0.4)" % mrjob.__version__) #Pattern has lots of tools for working with data from the internet import pattern print ("Pattern version: %6.6s (need at least 2.6)" % pattern.__version__) """ Explanation: Python Libraries We will be using a several different libraries throughout this course. If you've successfully completed the installation instructions, all of the following statements should run. End of explanation """ #this line prepares IPython for working with matplotlib %matplotlib inline # this actually imports matplotlib import matplotlib.pyplot as plt x = np.linspace(0, 10, 30) #array of 30 points from 0 to 10 y = np.sin(x) z = y + np.random.normal(size=30) * .2 plt.plot(x, y, 'ro-', label='A sine wave') plt.plot(x, z, 'b-', label='Noisy sine') plt.legend(loc = 'lower right') plt.xlabel("X axis") plt.ylabel("Y axis") """ Explanation: If any of these libraries are missing or out of date, you will need to install them and restart IPython Hello matplotlib The notebook integrates nicely with Matplotlib, the primary plotting package for python. This should embed a figure of a sine wave: End of explanation """ print ("Make a 3 row x 4 column array of random numbers") x = np.random.random((3, 4)) print (x) print print ("Add 1 to every element") x = x + 1 print (x) print print ("Get the element at row 1, column 2") print (x[1, 2]) print # The colon syntax is called "slicing" the array. print ("Get the first row") print (x[0, :]) print print ("Get every 2nd column of the first row") print (x[0, ::2]) print x = np.random """ Explanation: If that last cell complained about the %matplotlib line, you need to update IPython to v1.0, and restart the notebook. See the installation page Hello Numpy The Numpy array processing library is the basis of nearly all numerical computing in Python. Here's a 30 second crash course. For more details, consult Chapter 4 of Python for Data Analysis, or the Numpy User's Guide End of explanation """ #your code here print (x.max()) print (x.min()) print (x.mean()) """ Explanation: Print the maximum, minimum, and mean of the array. This does not require writing a loop. In the code cell below, type x.m&lt;TAB&gt;, to find built-in operations for common array statistics like this End of explanation """ #your code here x.max(axis = 1) """ Explanation: Call the x.max function again, but use the axis keyword to print the maximum of each row in x. End of explanation """ x = np.random.binomial(500, .5) x = np.random.binomial? print ("number of heads:", x) x = np.random.binomial """ Explanation: Here's a way to quickly simulate 500 coin "fair" coin tosses (where the probabily of getting Heads is 50%, or 0.5) End of explanation """ #your code here heads = [] for i in range(1,500): heads.append(np.random.binomial(500, .5)) plt.hist(heads, bins =500) """ Explanation: Repeat this simulation 500 times, and use the plt.hist() function to plot a histogram of the number of Heads (1s) in each simulation End of explanation """ """ Function -------- simulate_prizedoor Generate a random array of 0s, 1s, and 2s, representing hiding a prize between door 0, door 1, and door 2 Parameters ---------- nsim : int The number of simulations to run Returns ------- sims : array Random array of 0s, 1s, and 2s Example ------- >>> print simulate_prizedoor(3) array([0, 0, 2]) """ def simulate_prizedoor(nsim): #compute here answer =np.random.random_integers(0,2,nsim) return answer #your code here print (simulate_prizedoor(10)) """ Explanation: The Monty Hall Problem Here's a fun and perhaps surprising statistical riddle, and a good way to get some practice writing python functions In a gameshow, contestants try to guess which of 3 closed doors contain a cash prize (goats are behind the other two doors). Of course, the odds of choosing the correct door are 1 in 3. As a twist, the host of the show occasionally opens a door after a contestant makes his or her choice. This door is always one of the two the contestant did not pick, and is also always one of the goat doors (note that it is always possible to do this, since there are two goat doors). At this point, the contestant has the option of keeping his or her original choice, or swtiching to the other unopened door. The question is: is there any benefit to switching doors? The answer surprises many people who haven't heard the question before. We can answer the problem by running simulations in Python. We'll do it in several parts. First, write a function called simulate_prizedoor. This function will simulate the location of the prize in many games -- see the detailed specification below: End of explanation """ """ Function -------- simulate_guess Return any strategy for guessing which door a prize is behind. This could be a random strategy, one that always guesses 2, whatever. Parameters ---------- nsim : int The number of simulations to generate guesses for Returns ------- guesses : array An array of guesses. Each guess is a 0, 1, or 2 Example ------- >>> print simulate_guess(5) array([0, 0, 0, 0, 0]) """ #your code here def simulate_guess(nsim): #compute here answer =np.random.random_integers(0,1,nsim) return answer #your code here print (simulate_guess(10)) """ Explanation: Next, write a function that simulates the contestant's guesses for nsim simulations. Call this function simulate_guess. The specs: End of explanation """ """ Function -------- goat_door Simulate the opening of a "goat door" that doesn't contain the prize, and is different from the contestants guess Parameters ---------- prizedoors : array The door that the prize is behind in each simulation guesses : array THe door that the contestant guessed in each simulation Returns ------- goats : array The goat door that is opened for each simulation. Each item is 0, 1, or 2, and is different from both prizedoors and guesses Examples -------- >>> print goat_door(np.array([0, 1, 2]), np.array([1, 1, 1])) >>> array([2, 2, 0]) """ def goat_door(prizedoors, guesses): #strategy: generate random answers, and #keep updating until they satisfy the rule #that they aren't a prizedoor or a guess result = np.random.randint(0, 3, prizedoors.size) while True: bad = (result == prizedoors) | (result == guesses) if not bad.any(): return result result[bad] = np.random.randint(0, 3, bad.sum()) print (goat_door(np.array([0, 1, 2,1]), np.array([1, 1, 1,1]))) """ Explanation: Next, write a function, goat_door, to simulate randomly revealing one of the goat doors that a contestant didn't pick. End of explanation """ """ Function -------- switch_guess The strategy that always switches a guess after the goat door is opened Parameters ---------- guesses : array Array of original guesses, for each simulation goatdoors : array Array of revealed goat doors for each simulation Returns ------- The new door after switching. Should be different from both guesses and goatdoors Examples -------- >>> print switch_guess(np.array([0, 1, 2]), np.array([1, 2, 1])) >>> array([2, 0, 0]) """ #your code here def switch_guess(guesses, goatdoors): #strategy: generate random answers, and #keep updating until they satisfy the rule #that they aren't a guess or a goatdoor result = np.random.randint(0, 3, guesses.size) while True: bad = (result == guesses) | (result == goatdoors) if not bad.any(): return result result[bad] = np.random.randint(0, 3, bad.sum()) print (switch_guess(np.array([0, 1, 2]), np.array([1, 2, 1]))) """ Explanation: Write a function, switch_guess, that represents the strategy of always switching a guess after the goat door is opened. End of explanation """ """ Function -------- win_percentage Calculate the percent of times that a simulation of guesses is correct Parameters ----------- guesses : array Guesses for each simulation prizedoors : array Location of prize for each simulation Returns -------- percentage : number between 0 and 100 The win percentage Examples --------- >>> print win_percentage(np.array([0, 1, 2]), np.array([0, 0, 0])) 33.333 """ #your code here def win_percentage(guesses, prizedoors): correct_guesses = guesses == prizedoors answer = correct_guesses.sum()/guesses.size*100 return answer print (win_percentage(np.array([0, 1, 2]), np.array([0, 2, 2]))) """ Explanation: Last function: write a win_percentage function that takes an array of guesses and prizedoors, and returns the percent of correct guesses End of explanation """ #your code here #simulate prizedoors, guesses, goatdoors prizedoors = simulate_prizedoor(10000) guesses = simulate_guess(10000) goatdoors = goat_door(prizedoors, guesses) #compute win percent if keep original print("Win Percentage - keep original") print(win_percentage(guesses,prizedoors)) #compute win percent if switch guess switchedguesses = switch_guess(guesses,goatdoors) print("Win Percentage - switch guess") print(win_percentage(switchedguesses,prizedoors)) """ Explanation: Now, put it together. Simulate 10000 games where contestant keeps his original guess, and 10000 games where the contestant switches his door after a goat door is revealed. Compute the percentage of time the contestant wins under either strategy. Is one strategy better than the other? End of explanation """
Kaggle/learntools
notebooks/feature_engineering/raw/ex2.ipynb
apache-2.0
# Set up code checking # This can take a few seconds from learntools.core import binder binder.bind(globals()) from learntools.feature_engineering.ex2 import * """ Explanation: Introduction In this exercise you'll apply more advanced encodings to encode the categorical variables ito improve your classifier model. The encodings you will implement are: Count Encoding Target Encoding CatBoost Encoding You'll refit the classifier after each encoding to check its performance on hold-out data. Begin by running the next code cell to set up the notebook. End of explanation """ import numpy as np import pandas as pd from sklearn import preprocessing, metrics import lightgbm as lgb clicks = pd.read_parquet('../input/feature-engineering-data/baseline_data.pqt') """ Explanation: The next code cell repeats the work that you did in the previous exercise. End of explanation """ def get_data_splits(dataframe, valid_fraction=0.1): """Splits a dataframe into train, validation, and test sets. First, orders by the column 'click_time'. Set the size of the validation and test sets with the valid_fraction keyword argument. """ dataframe = dataframe.sort_values('click_time') valid_rows = int(len(dataframe) * valid_fraction) train = dataframe[:-valid_rows * 2] # valid size == test size, last two sections of the data valid = dataframe[-valid_rows * 2:-valid_rows] test = dataframe[-valid_rows:] return train, valid, test def train_model(train, valid, test=None, feature_cols=None): if feature_cols is None: feature_cols = train.columns.drop(['click_time', 'attributed_time', 'is_attributed']) dtrain = lgb.Dataset(train[feature_cols], label=train['is_attributed']) dvalid = lgb.Dataset(valid[feature_cols], label=valid['is_attributed']) param = {'num_leaves': 64, 'objective': 'binary', 'metric': 'auc', 'seed': 7} num_round = 1000 bst = lgb.train(param, dtrain, num_round, valid_sets=[dvalid], early_stopping_rounds=20, verbose_eval=False) valid_pred = bst.predict(valid[feature_cols]) valid_score = metrics.roc_auc_score(valid['is_attributed'], valid_pred) print(f"Validation AUC score: {valid_score}") if test is not None: test_pred = bst.predict(test[feature_cols]) test_score = metrics.roc_auc_score(test['is_attributed'], test_pred) return bst, valid_score, test_score else: return bst, valid_score """ Explanation: Next, we define a couple functions that you'll use to test the encodings that you implement in this exercise. End of explanation """ print("Baseline model") train, valid, test = get_data_splits(clicks) _ = train_model(train, valid) """ Explanation: Run this cell to get a baseline score. End of explanation """ # Check your answer (Run this code cell to receive credit!) q_1.solution() """ Explanation: 1) Categorical encodings and leakage These encodings are all based on statistics calculated from the dataset like counts and means. Considering this, what data should you be using to calculate the encodings? Specifically, can you use the validation data? Can you use the test data? Run the following line after you've decided your answer. End of explanation """ import category_encoders as ce cat_features = ['ip', 'app', 'device', 'os', 'channel'] train, valid, test = get_data_splits(clicks) """ Explanation: 2) Count encodings Begin by running the next code cell to get started. End of explanation """ # Create the count encoder count_enc = ____ # Learn encoding from the training set ____ # Apply encoding to the train and validation sets as new columns # Make sure to add `_count` as a suffix to the new columns train_encoded = ____ valid_encoded = ____ # Check your answer q_2.check() # Uncomment if you need some guidance # q_2.hint() # q_2.solution() #%%RM_IF(PROD)%% # Create the count encoder count_enc = ce.CountEncoder(cols=cat_features) # Learn encoding from the training set count_enc.fit(train[cat_features]) # Apply encoding to the train and validation sets train_encoded = train.join(count_enc.transform(train[cat_features]).add_suffix('_count')) valid_encoded = valid.join(count_enc.transform(valid[cat_features]).add_suffix('_count')) q_2.assert_check_passed() """ Explanation: Next, encode the categorical features ['ip', 'app', 'device', 'os', 'channel'] using the count of each value in the data set. - Using CountEncoder from the category_encoders library, fit the encoding using the categorical feature columns defined in cat_features. - Then apply the encodings to the train and validation sets, adding them as new columns with names suffixed "_count". End of explanation """ # Train the model on the encoded datasets # This can take around 30 seconds to complete _ = train_model(train_encoded, valid_encoded) """ Explanation: Run the next code cell to see how count encoding changes the results. End of explanation """ # Check your answer (Run this code cell to receive credit!) q_3.solution() """ Explanation: Count encoding improved our model's score! 3) Why is count encoding effective? At first glance, it could be surprising that count encoding helps make accurate models. Why do you think is count encoding is a good idea, or how does it improve the model score? Run the following line after you've decided your answer. End of explanation """ # Create the target encoder. You can find this easily by using tab completion. # Start typing ce. the press Tab to bring up a list of classes and functions. target_enc = ____ # Learn encoding from the training set. Use the 'is_attributed' column as the target. ____ # Apply encoding to the train and validation sets as new columns # Make sure to add `_target` as a suffix to the new columns train_encoded = ____ valid_encoded = ____ # Check your answer q_4.check() # Uncomment these if you need some guidance #q_4.hint() #q_4.solution() #%%RM_IF(PROD)%% target_enc = ce.TargetEncoder(cols=cat_features) target_enc.fit(train[cat_features], train['is_attributed']) train_encoded = train.join(target_enc.transform(train[cat_features]).add_suffix('_target')) valid_encoded = valid.join(target_enc.transform(valid[cat_features]).add_suffix('_target')) q_4.assert_check_passed() """ Explanation: 4) Target encoding Here you'll try some supervised encodings that use the labels (the targets) to transform categorical features. The first one is target encoding. - Create the target encoder from the category_encoders library. - Then, learn the encodings from the training dataset, apply the encodings to all the datasets, and retrain the model. End of explanation """ _ = train_model(train_encoded, valid_encoded) """ Explanation: Run the next cell to see how target encoding affects your results. End of explanation """ # Check your answer (Run this code cell to receive credit!) q_5.solution() """ Explanation: 5) Try removing IP encoding If you leave ip out of the encoded features and retrain the model with target encoding, you should find that the score increases and is above the baseline score! Why do you think the score is below baseline when we encode the IP address but above baseline when we don't? Run the following line after you've decided your answer. End of explanation """ # Remove IP from the encoded features cat_features = ['app', 'device', 'os', 'channel'] # Create the CatBoost encoder cb_enc = ce.CatBoostEncoder(cols=cat_features, random_state=7) # Learn encoding from the training set ____ # Apply encoding to the train and validation sets as new columns # Make sure to add `_cb` as a suffix to the new columns train_encoded = ____ valid_encoded = ____ # Check your answer q_6.check() # Uncomment these if you need some guidance #q_6.hint() #q_6.solution() #%%RM_IF(PROD)%% cat_features = ['app', 'device', 'os', 'channel'] cb_enc = ce.CatBoostEncoder(cols=cat_features, random_state=7) # Learn encodings on the train set cb_enc.fit(train[cat_features], train['is_attributed']) # Apply encodings to each set train_encoded = train.join(cb_enc.transform(train[cat_features]).add_suffix('_cb')) valid_encoded = valid.join(cb_enc.transform(valid[cat_features]).add_suffix('_cb')) q_6.assert_check_passed() """ Explanation: 6) CatBoost Encoding The CatBoost encoder is supposed to work well with the LightGBM model. Encode the categorical features with CatBoostEncoder and train the model on the encoded data again. End of explanation """ _ = train_model(train_encoded, valid_encoded) """ Explanation: Run the next code cell to see how the CatBoost encoder changes your results. End of explanation """
liganega/Gongsu-DataSci
previous/y2017/GongSu05_Flow_Control.ipynb
gpl-3.0
def sum_if_3(k, m): if (m % 3 == 0) or (str(m).endswith('3')): return k + m else: return k """ Explanation: 흐름 제어: 조건문과 반복문(루프) 활용 수정 사항 gcd 함수 위주로 반복문 작성 가능여부 확인 좀 더 실용적인 수학함수 활용 가능 요약 조건문 활용 if문: 불리언 값을 이용하여 조건을 제시하는 방법 반복문(루프) 활용 while 반복문(루프): 특정 조건이 만족되는 동안 동일한 과정을 반복하는 방법 for 반복문(루프): 특정 구간 내에서 동일한 과정을 반복하는 방법. range 함수의 활용 최종 목표 두 정수의 최대공약수를 구하는 함수: gcd() gcd(6, 8) = 2 gcd(14, 21) = 3 자연수 n이 주어졌을 때, 1부터 n까지의 자연수 중에서 3의 배수이거나 숫자 3을 포함하는 숫자들의 합을 구하는 함수 sum_of_3s() 구현하기: sum_of_3s(10) = 3 + 6 + 9 sum_of_3s(15) = 3 + 6 + 9 + 12 + 13 + 15 조건문 어떤 일은 특정 조건 하에서만 할 수 있는 경우가 있다. 예를 들어, 숫자 k가 3의 배수이거나 3으로 끝나는 경우에만 그 값을 다른 값에 더하라고 할 수 있다. 위 문장을 코드로 나타내려면 아래 요소들이 필요하다. 이런저런 경우에만 __무엇무엇__을 해라. 숫자 k가 3의 배수이다. 숫자 k가 숫자 3으로 끝난다. "이런저런 경우에만 무엇, 무엇을 해라"는 if문으로 나타낸다. if 이런저런: 무엇 무엇 "숫자 k가 3의 배수이다"는 아래 수식으로 표현된다. k % 3 == 0 "숫자 k가 숫자 3으로 끝난다"는 좀 더 어렵지만, 앞서 배운 문자열 메소드를 활용하면 된다. str(k).endswith('3') 여기서 str() 함수는 숫자를 문자열로 형변환 시키는 함수이다. int() 또는 float() 함수와 반대의 일을 한다. 예제 두 개의 숫자 k, m이 주어졌을 때, 만약 m이 3의 배수이거나 3으로 끝나는 숫자일 경우에만 k와 m을 더하는 함수 sum_if_3()를 구현하라. 견본답안: End of explanation """ sum_if_3(5, 18) sum_if_3(4, 7) """ Explanation: 주의: else문은 if문에서 다루는 경우가 성립하지 않을 때 무슨 일을 해야할지를 정한다. End of explanation """ if 'bc' in 'abcde': print("'bc'가 'abcde'의 부분문자열이다.") """ Explanation: 예제 두 개의 숫자 k, m이 주어졌을 때, 만약 m이 3의 배수이거나 숫자 3을 포함하는 경우에만 k와 m을 더하는 함수 sum_if_3s()를 구현하라. 이 문제를 풀기 위해서는 문자열에 특정 문자열이 부분문자열로 포함되어 있는지를 판단해야 하는데 아래 예제와 같이 in 함수를 이용할 수 있다. End of explanation """ def sum_if_3s(k, m): if (m % 3 == 0) or ('3' in str(m)): return k + m else: return k sum_if_3s(2, 31) sum_if_3s(3, 15) sum_if_3s(13, 28) """ Explanation: 견본답안: End of explanation """ num1 = 5 num2 = 10 if num1 < num2: print("num1이 num2 보다 작다.") else: if num1 == num2: print("num1이 num2와 같다.") else: print("num1이 num2 보다 크다.") """ Explanation: 중첩 조건문과 일반화된 조건문 if ... else ... 문은 두 가지 경우를 처리할 때 사용한다. 반면에, 예를 들어, 크거나, 같거나, 작거나 등 세 가지 이상의 경우를 처리하려면 if ... else ...문을 중첩해서 사용하거나 if ... elif ... elif ... else ... 처럼 다중 조건문을 사용할 수 있다. 중첩 조건문(중첩 if문) 활용 예제 End of explanation """ num1 = 5 num2 = 10 if num1 < num2: print("num1이 num2 보다 작다.") elif num1 == num2: print("num1이 num2와 같다.") else: print("num1이 num2 보다 크다.") """ Explanation: 다중 조건문(다중 if문) 활용 예제 End of explanation """ number = 43 divisor = 7 answer = 0 # While 루프 while number > 0: number = number - divisor # 음수가 아니라면 빼주는 횟수를 1회 늘린다. if number > 0: answer += 1 # 이제 answer를 출력하면 된다. print('몫은', answer, '이다') """ Explanation: 주의: if문의 중첩 정도는 임의로 복잡해질 수 있다. 따라서 가능하면 일반화된 조건문을 사용하면 다루기가 보다 쉬워진다. 반복문(루프) 반복문(루프)은 동일한 코드를 반복해서 실행시킬 때 사용한다. 루프를 만들기 위해 for문과 while문을 사용한다. for 반복문: 반복을 몇 번 할지 미리 알 수 있는 경우 사용 while 반복문: 특정 조건이 만족되는 동안 반복하고자 할 경우 여기서는 먼저 while 반복문을 살펴보고 이후에 for 반복문을 살펴본다. while 반복문 while 반복문은 항상 아래 모양을 갖는다: python while 조건: 본문코드1 본문코드2 ... __조건__이 참이 되는 동안 본문코드들이 실행된다. 예제 정수들을 나누어 몫을 구하는 코드를 작성해보자. 몫을 어떻게 구현할까? 먼저 몫이 어떤 의미인가를 알아야 한다. 그 다음에 그 의미를 구현하는 코드를 작성한다. 어떤 정수 a를 다른 정수 b로 나누었을 때의 몫은 a에서 b를 몇 번 뺄 수 있는가와 동일한 의미를 갖는다. 즉, a에서 b를 반복해서 빼주는 과정이 필요하고 이 과정을 음수가 되지 않을 때까지만 반복해야 한다. 예를 들어 43을 7로 나누었을 때의 몫은 다음과 같이 구할 수 있다. End of explanation """ def gcd(a, b): if a < b: # 이 경우에는 a와 b의 값을 서로 바꾼다. a, b = b, a while b != 0: a, b = b, a % b return a """ Explanation: 'while' 루프를 작성할 때 조건문이 언젠가는 만족되지 않아서 더 이상 루프가 돌지 않도록 코드를 작성하는 것이 가장 중요하다. 연습 두 정수의 최대공약수(gcd)를 리턴하는 함수를 구현하라. 힌트: 유클리드 호제법을 활용하라. 아래 사이트 참조: http://tibyte.kr/224 견본답안: End of explanation """ gcd(6, 8) gcd(14, 21) """ Explanation: 주의: 파이썬에서 두 변수에 할당된 값을 맞교환 하는 방법이 매우 간단하다. 하지만 C 또는 자바에서는 다르게 처리해야 한다. 예를 들어, 아래와 같은 방식을 이용할 수 있다. ```C int a = 3 int b = 5 int temp temp = a a = b b = temp ``` End of explanation """ for char in "adam": print(char) """ Explanation: for 루프 while 반복문과는 달리 몇 번 반복되어야 하는지를 아는 경우 for 반복문을 사용할 수 있으며, 아래 형식을 따른다. python for 항목변수 in 컬렉션 자료형 값: 코드1 코드2 ... 컬렉션 자료형: 리스트, 튜플, 문자열, 어레이 등 여러 개의 값을 동시에 다룰 수 있는 자료형을 의미하며, 다음 시간에 보다 자세히 다룬다. 여기서는 문자열과 range() 함수를 이용하여 for 반복문을 사용하는 법을 익힌다. 문자열과 for 문 예제 아래 코드는 문자열에 포함된 문자 각각을 출력한다. End of explanation """ a_word = 'aardvarks' new_word = '' for char in a_word: if char == 'a': new_word = new_word + 'A' else: new_word = new_word + char print(new_word) """ Explanation: 연습 문자열에 있는 소문자 a를 대문자 A로 변경하여 새로운 문자열을 생성하는 코드를 작성하라. 예를 들어, "aardvarks"를 이용하여 "AArdvArks"를 생성하는 코드를 작성하라. 견본답안1: End of explanation """ a_word = ' n o r t h w e s t e r n' temp_word = '' for char in a_word: if char != ' ': temp_word = temp_word + char new_word = temp_word.title() print(new_word) """ Explanation: 연습 아래 문자열 ' n o r t h w e s t e r n' 을 이용하여 아래 문자열을 생성하는 코드를 구현하라: 'Northwestern' End of explanation """ a_range = range(10) print(a_range) """ Explanation: range() 함수와 for 문 range() 함수는 일정한 규칙에 따라 나열된 수열을 생성한다. End of explanation """ type(a_range) """ Explanation: range() 함수의 리턴값의 자료형은 리스트이다. 주의: 파이썬3에서 range() 함수의 리턴값은 range라는 자료형이다. 리스트와 거의 비슷하지만 용도가 좀 다르다는 정도만 기억하고 넘어가도 좋다. End of explanation """ a_range_1 = range(3, 10) a_range_1 a_range_2 = range(3, 10, 2) a_range_2 """ Explanation: range() 함수는 인자를 최대 세 개까지 받을 수 있다. 각 인자들의 역할은 슬라이싱에 사용되는 세 개의 인자들의 역할과 동일하다. range([start,] stop [, step]) start의 경우 주어지지 않으면 0을 기본값으로 갖는다. step의 경우 주어지지 않으면 1을 기본값으로 갖느다. End of explanation """ for i in range(6): print(i,"의 제곱은", i ** 2, "이다.") for i in range(0, 6, 2): print(i,"의 제곱은", i ** 2, "이다.") """ Explanation: range 함수는 for문에서 유용하게 활용된다. End of explanation """ for i in range(5): print("다섯 번 출력합니다.") """ Explanation: 단순한 카운트 역할을 수행하는 용도로 range함수를 활용할 수도 있다. 즉, 어떤 일을 특정 횟수만큼 반복하고자 할 때 사용한다. End of explanation """ a_word = 'hamster' for i in range(7): print(a_word[i]) a_word = 'hamster' for i in a_word: print(i) """ Explanation: range() 함수와 문자열 인덱싱을 활용하면 문자열에 대해 for문을 직접 활용하는 것과 동일한 일을 할 수 있다. 예를 들어, 문자열의 길이와 range() 함수를 다음처럼 활용할 수 있다. End of explanation """ for i in range(8): print(a_word[i]) """ Explanation: 주의: 문자열의 길이가 range() 함수에 사용되는 인자보다 작으면 오류가 발생한다. 이유는 문자열의 길이보다 긴 인덱스가 사용되기 때문이다. End of explanation """ def sum_of_3s(n): sum = 0 for i in range(1, n+1): if i % 3 == 0: sum = sum + i elif '3' in str(i): sum = sum + i return sum sum_of_3s(10) sum_of_3s(15) """ Explanation: 이제 아래 문제를 해결할 수 있다. 연습 자연수 n이 주어졌을 때, 1부터 n까지의 자연수 중에서 3의 배수이거나 숫자 3을 포함하는 숫자들의 합을 구하는 함수 sum_of_3s() 구현하기: sum_of_3s(10) = 3 + 6 + 9 = 18 sum_of_3s(15) = 3 + 6 + 9 + 12 + 13 + 15 = 58 견본답안: End of explanation """ def gcd(a, b): if a < b : a, b = b, a while b != 0: a, b = b, a % b return a gcd(10, 25) gcd(124, 36) """ Explanation: 연습 두 정수의 최대공약수(gcd)를 리턴하는 함수를 구현하라. 견본답안: End of explanation """ def lcm(a, b): g = gcd(a, b) c = a/g return c*b lcm(10, 25) lcm(124, 36) """ Explanation: 연습 두 정수의 최소공배수(lcm)를 리턴하는 함수를 구현하라. 견본답안: End of explanation """ song = "When you are smiling, the whole world smiles with you" """ Explanation: 연습 아래 노래 가사를 활용하는 문제이다. End of explanation """ count_a = 0 for word in song: if word == 'a': count_a += 1 print(count_a) """ Explanation: (1) 위 문자열에서 a가 등장하는 횟수를 구하는 코드를 작성하라. 견본답안: End of explanation """ count_w = 0 for word in song.lower(): if word == 'w': count_w += 1 print(count_w) """ Explanation: (2) 위 문자열에서 대소문자 구별없이 w가 등장하는 횟수를 구하는 코드를 작성하라. 견본답안: End of explanation """ new_song = '' for word in song.lower(): if word not in 'aeiou, ': new_song += word print(new_song) """ Explanation: (3) 다음 문자열을 이용하여, whnyrsmlngthwhlwrldsmlswthyu를 생성하는 코드를 작성하라. (힌트: 모음(aeiou)와 공백 제거) 견본답안: End of explanation """
sdpython/ensae_teaching_cs
_doc/notebooks/exams/td_note_2022.ipynb
mit
from jyquickhelper import add_notebook_menu add_notebook_menu() """ Explanation: 1A - Enoncé 3 novembre 2021 Correction de l'examen du 3 novembre 2021. End of explanation """ import time def mesure_temps_fonction(fct, N=100): begin = time.perf_counter() for i in range(N): fct() return (time.perf_counter() - begin) / N mesure_temps_fonction(lambda: time.sleep(0.1), N=10) """ Explanation: Exercice 1 : multiplication de matrices On a besoin d'une fonction qui mesure le temps d'exécution d'une fonction. End of explanation """ print(997 * 93 * 1003 + 997 * 1003 * 97, 93 * 1003 * 97 + 997 * 93 * 97) import numpy m1 = numpy.random.randn(997, 93) m2 = numpy.random.randn(93, 1003) m3 = numpy.random.randn(1003, 97) mesure_temps_fonction(lambda: m1 @ m2 @ m3) mesure_temps_fonction(lambda: (m1 @ m2) @ m3) mesure_temps_fonction(lambda: m1 @ (m2 @ m3)) """ Explanation: Q1 : Pourquoi (m1 @ m2) @ m3 est-il plus lent que m1 @ (m2 @ m3) ? (2 points) Il y a deux options possible. Il suffit de compter le nombre d'opérations dans chaque option. Le coût d'une multiplication $M_{ab} \times m_{bc}$ est de l'ordre de $O(abc)$. Donc : cout((m1 @ m2) @ m3) ~ O(997 * 93 * 1003 + 997 * 1003 * 97) = 189998290 cout(m1 @ (m2 @ m3)) ~ O(93 * 1003 * 97 + 997 * 93 * 97) = 18042000 La seconde option est dix fois plus rapide. End of explanation """ def n_ops(m1_shape, m2_shape): return m1_shape[0] * m2_shape[1] * m1_shape[1] * 2 n_ops(m1.shape, m2.shape) """ Explanation: Q2 : Ecrire une fonction qui calcule le nombre d'operations dans une multiplication de deux matrices (2 points) End of explanation """ def n_ops_3(sh1, sh2, sh3): m1_m2m3 = n_ops(sh1, (sh2[0], sh3[1])) + n_ops(sh2, sh3) m1m2_m3 = n_ops(sh1, sh2) + n_ops((sh1[0], sh2[1]), sh3) if m1m2_m3 < m1_m2m3: return m1m2_m3, 2 else: return m1_m2m3, 1 n_ops_3(m1.shape, m2.shape, m3.shape) """ Explanation: Q3 : Ecrire une fonction qui retourne le meilleur coût d'une multiplication de deux matrices et la meilleure option (2 points) End of explanation """ from numpy.testing import assert_almost_equal def produit3(m1, m2, m3): cout, meilleur = n_ops_3(m1.shape, m2.shape, m3.shape) if meilleur == 2: return (m1 @ m2) @ m3 else: return m1 @ (m2 @ m3) assert_almost_equal(produit3(m1, m2, m3), m1 @ (m2 @ m3)) """ Explanation: Q4 : Ecrire une fonction qui effectue le produit de trois matrices le plus rapidement possible (2 points) End of explanation """ mesure_temps_fonction(lambda: produit3(m1, m2, m3)) """ Explanation: Q5 : Vérifiez que vous retrouvez les mêmes résultats avec la fonction mesure_temps (2 points) End of explanation """ mesure_temps_fonction(lambda: m1 @ (m2 @ m3)) """ Explanation: On vérifie que c'est égal à : End of explanation """ m4 = numpy.random.randn(97, 20) def n_ops_4(sh1, sh2, sh3, sh4): m1_m2m3m4 = n_ops(sh1, (sh2[0], sh4[1])) + n_ops_3(sh2, sh3, sh4)[0] m1m2_m3m4 = n_ops(sh1, sh2) + n_ops((sh1[0], sh2[1]), (sh3[0], sh4[1])) + n_ops(sh3, sh4) m1m2m3_m4 = n_ops_3(sh1, sh2, sh3)[0] + n_ops((sh1[0], sh3[1]), sh4) m = min(m1_m2m3m4, m1m2_m3m4, m1m2m3_m4) if m == m1_m2m3m4: return m, 1 if m == m1m2_m3m4: return m, 2 return m, 3 n_ops_4(m1.shape, m2.shape, m3.shape, m4.shape) """ Explanation: Ici, vous avez le choix entre faire les questions 6 à 9 ou les questions 9 et 10. Q6 : Ecrire une fonction qui retourne le meilleur coût d'une multiplication de 4 matrices et la meilleure option (3 points) End of explanation """ def produit4(m1, m2, m3, m4): cout, meilleur = n_ops_4(m1.shape, m2.shape, m3.shape, m4.shape) if meilleur == 1: return m1 @ produit3(m2, m3, m4) if meilleur == 2: return (m1 @ m2) @ (m3 @ m4) return produit3(m1, m2, m3) @ m4 mesure_temps_fonction(lambda: produit4(m1, m2, m3, m4)) """ Explanation: Q7 : Ecrire une fonction qui effectue le produit de 4 matrices le plus rapidement possible (3 points) End of explanation """ mesure_temps_fonction(lambda: ((m1 @ m2) @ m3) @ m4) mesure_temps_fonction(lambda: (m1 @ m2) @ (m3 @ m4)) mesure_temps_fonction(lambda: m1 @ (m2 @ (m3 @ m4))) mesure_temps_fonction(lambda: produit4(m1, m2, m3, m4)) """ Explanation: Q8 : Vérifiez que vous retrouvez les mêmes résultats avec la fonction mesure_temps et la matrice m4. (2 points) End of explanation """ def n_ops_N(shapes): if len(shapes) <= 1: raise RuntimeError("Unexpected list of shapes: %r." % shapes) if len(shapes) == 2: return n_ops(*shapes), 1 if len(shapes) == 3: return n_ops_3(*shapes) best_cost = None best_pos = None for i in range(1, len(shapes)): if i == 1: cost = n_ops(shapes[0], (shapes[1][0], shapes[-1][1])) + n_ops_N(shapes[1:])[0] best_cost = cost best_pos = i elif i == len(shapes)-1: cost = n_ops_N(shapes[:-1])[0] + n_ops((shapes[0][0], shapes[-2][1]), shapes[-1]) if cost < best_cost: best_cost = cost best_pos = i else: cost = (n_ops_N(shapes[:i])[0] + n_ops_N(shapes[i:])[0] + n_ops((shapes[0][0], shapes[i-1][1]), (shapes[i][0], shapes[-1][1]))) if cost < best_cost: best_cost = cost best_pos = i if best_pos is None: raise RuntimeError(shapes) return best_cost, best_pos n_ops_N([m1.shape, m2.shape, m3.shape, m4.shape]) n_ops_4(m1.shape, m2.shape, m3.shape, m4.shape) def product_N(inputs): if len(inputs) <= 1: raise RuntimeError( "List inputs must contain at least two elements bot has %d." % len(inputs)) cost, pos = n_ops_N([i.shape for i in inputs]) if len(inputs) == 2: return inputs[0] @ inputs[1] if pos == 1: right = product_N(inputs[1:]) return inputs[0] @ right if pos == len(shapes) - 1: left = product_N(inputs[:-1]) return left @ inputs[-1] else: left = product_N(inputs[:pos + 1]) right = product_N(inputs[pos + 1:]) return left @ right assert_almost_equal(m1 @ m2 @ m3 @ m4, product_N([m1, m2, m3, m4])) mesure_temps_fonction(lambda: produit4(m1, m2, m3, m4)) mesure_temps_fonction(lambda: product_N([m1, m2, m3, m4])) """ Explanation: Q9 : On se penche sur le cas à une multiplication de N matrices, combien y a-t-il de multiplications de 2 matrices ? (2 points) Il y a en toujours N-1. On considère le produit $M_1 \times... \times M_n$. La multiplication commence toujours par une multiplication de deux matrices consécutives quelles qu'elles soient. On les suppose aux positions $(i, i+1)$. On note le résultat $MM_i$. Après ce produit, il faudra faire : $(M_1 \times ... \times M_{i-1} \times MM_i \times M_{i+2} \times ... \times M_n$, soit une multiplication de $N-2$ matrices. On obtient le résultat par récurrence. Ici s'arrête l'énoncé pour ceux qui ont choisit de répondre aux question 6 à 9. Q10 : Résoudre l'optimisation de multiplication de N matrices. On l'envisage de façon récursive. La première solution effectue plein de calculs en double mais nous verront comment la modifier. End of explanation """ def n_ops_N(shapes, verbose=False): if verbose: print("n_ops_N(%r)" % shapes) if len(shapes) <= 1: raise RuntimeError("Unexpected list of shapes: %r." % shapes) if len(shapes) == 2: return n_ops(*shapes), 1 if len(shapes) == 3: return n_ops_3(*shapes) best_cost = None best_pos = None for i in range(1, len(shapes)): if i == 1: cost = (n_ops(shapes[0], (shapes[1][0], shapes[-1][1])) + n_ops_N(shapes[1:], verbose=verbose)[0]) best_cost = cost best_pos = i elif i == len(shapes)-1: cost = (n_ops_N(shapes[:-1], verbose=verbose)[0] + n_ops((shapes[0][0], shapes[-2][1]), shapes[-1])) if cost < best_cost: best_cost = cost best_pos = i else: cost = (n_ops_N(shapes[:i], verbose=verbose)[0] + n_ops_N(shapes[i:], verbose=verbose)[0] + n_ops((shapes[0][0], shapes[i-1][1]), (shapes[i][0], shapes[-1][1]))) if cost < best_cost: best_cost = cost best_pos = i if best_pos is None: raise RuntimeError(shapes) return best_cost, best_pos m5 = numpy.random.randn(20, 17) n_ops_N([m1.shape, m2.shape, m3.shape, m4.shape, m5.shape], verbose=True) """ Explanation: Ici s'arrête ce qui est attendu comme réponse à la question 10. Les calculs en double... On vérifie en ajoutant une ligne pour afficher tous les appels à n_ops_N. End of explanation """ def n_ops_N_opt(shapes, cache=None, verbose=False): if cache is None: cache = {} key = tuple(shapes) if key in cache: # On s'arrête, déjà calculé. return cache[key] if verbose: print("n_ops_N(%r)" % shapes) if len(shapes) <= 1: raise RuntimeError("Unexpected list of shapes: %r." % shapes) if len(shapes) == 2: res = n_ops(*shapes), 1 cache[key] = res return res if len(shapes) == 3: res = n_ops_3(*shapes) cache[key] = res return res best_cost = None best_pos = None for i in range(1, len(shapes)): if i == 1: cost = (n_ops(shapes[0], (shapes[1][0], shapes[-1][1])) + n_ops_N_opt(shapes[1:], verbose=verbose, cache=cache)[0]) best_cost = cost best_pos = i elif i == len(shapes)-1: cost = (n_ops_N_opt(shapes[:-1], verbose=verbose, cache=cache)[0] + n_ops((shapes[0][0], shapes[-2][1]), shapes[-1])) if cost < best_cost: best_cost = cost best_pos = i else: cost = (n_ops_N_opt(shapes[:i], verbose=verbose, cache=cache)[0] + n_ops_N_opt(shapes[i:], verbose=verbose, cache=cache)[0] + n_ops((shapes[0][0], shapes[i-1][1]), (shapes[i][0], shapes[-1][1]))) if cost < best_cost: best_cost = cost best_pos = i if best_pos is None: raise RuntimeError(shapes) res = best_cost, best_pos cache[key] = res return res n_ops_N_opt([m1.shape, m2.shape, m3.shape, m4.shape, m5.shape], verbose=True) """ Explanation: On voit deux appels identiques n_ops_N([(97, 20), (20, 17)]) et n_ops_N([(93, 1003), (1003, 97), (97, 20)]). Ce n'est pas trop problématique pour un petit nombre de matrices mais cela pourrait le devenir si ce même algorithme était appliquée à autre chose. Plutôt que de réécrire l'algorithme différemment, on se propose d'ajouter un paramètre pour garder la trace des résultats déjà retournés. End of explanation """ def product_N_opt(inputs, cache=None): if len(inputs) <= 1: raise RuntimeError( "List inputs must contain at least two elements bot has %d." % len(inputs)) cost, pos = n_ops_N_opt([i.shape for i in inputs], cache=cache) if len(inputs) == 2: return inputs[0] @ inputs[1] if pos == 1: right = product_N_opt(inputs[1:], cache=cache) return inputs[0] @ right if pos == len(shapes) - 1: left = product_N_opt(inputs[:-1], cache=cache) return left @ inputs[-1] else: left = product_N_opt(inputs[:pos + 1], cache=cache) right = product_N_opt(inputs[pos + 1:], cache=cache) return left @ right assert_almost_equal(m1 @ m2 @ m3 @ m4, product_N([m1, m2, m3, m4])) mesure_temps_fonction(lambda: product_N([m1, m2, m3, m4, m5])) mesure_temps_fonction(lambda: product_N_opt([m1, m2, m3, m4, m5])) mesure_temps_fonction(lambda: m1 @ m2 @ m3 @ m4 @ m5) """ Explanation: La liste est moins longue et tous les appels sont uniques. On met à jour la fonction product_N. End of explanation """
colour-science/colour-ipython
notebooks/colorimetry/luminance.ipynb
bsd-3-clause
import colour colour.utilities.filter_warnings(True, False) sorted(colour.LUMINANCE_METHODS.keys()) """ Explanation: !!! D . R . A . F . T !!! Luminance The Luminance $L_v$ is the quantity defined by the formula: <a name="back_reference_1"></a><a href="#reference_1">[1]</a> $$ \begin{equation} L_v=\cfrac{d\Phi_v}{dAcos\theta d\Omega} \end{equation} $$ where $d\Phi_v$ is the luminous flux transmitted by an elementary beam passing through the given point and propagating in the solid angle, $d\Omega$, containing the given direction. $dA$ is the area of a section of that beam containing the given point. $\theta$ is the angle between the normal to that section and the direction of the beam. $L_v$ unit is candela per square metre (or nits) $cd\cdot m^{-2}=lm\cdot m^{-2}\cdot sr^{-1}$. Colour defines the following luminance computation methods: End of explanation """ colour.colorimetry.luminance_Newhall1943(3.74629715382) """ Explanation: Note: 'astm2008' and 'cie1976' are convenient aliases for respectively 'ASTM D1535-08' and 'CIE 1976'. Newhall, Nickerson, and Judd (1943) Method Newhall, Nickerson, and Judd (1943) fitted a quintic-parabola function to the adjusted Munsell-Sloan-Godlove reflectances, the resulting equation computing luminance $R_Y$ as function of Munsell value $V$ is expressed as follows: <a name="back_reference_2"></a><a href="#reference_2">[2]</a> $$ \begin{equation} R_Y=1.2219V-0.23111V^2+0.23951V^3-0.021009V^4+0.0008404V^5 \end{equation} $$ See Also: The Munsell Renotation System notebook for in-depth information about the Munsell Renotation System. The colour.luminance_Newhall1943 definition is used to compute luminance $R_Y$: End of explanation """ colour.colorimetry.luminance(3.74629715382, method='Newhall 1943') """ Explanation: Note: Input Munsell value $V$ is in domain [0, 10], output luminance $R_Y$ is in domain [0, 100]. The colour.luminance definition is implemented as a wrapper for various luminance computation methods: End of explanation """ colour.colorimetry.luminance_ASTMD153508(3.74629715382) """ Explanation: ASTM D1535-08$^{\epsilon 1}$ (2008) Method Since 1943, the reference white used for the Munsell Renotation System has changed. As a result the quintic-parabola function from Newhall, Nickerson, and Judd (1943) has been adjusted: Each coefficient of the function has been multiplied by 0.975, the reflectance factor of magnesium oxide with respect to the perfect reflecting diffuser and then rounded to five digits. The updated equation for computing luminance $Y$ as function of the Munsell value $V$ is expressed as follows: <a name="back_reference_3"></a><a href="#reference_3">[3]</a> $$ \begin{equation} Y=1.1914V-0.22533V^2+0.23352V^3-0.020484V^4+0.00081939V^5 \end{equation} $$ See Also: The Munsell Renotation System notebook for in-depth information about the Munsell Renotation System. The colour.luminance_ASTMD153508 definition is used to compute luminance $Y$: End of explanation """ colour.luminance(3.74629715382, method='ASTM D1535-08') colour.luminance(3.74629715382, method='astm2008') """ Explanation: Note: Input Munsell value $V$ is in domain [0, 10], output luminance $Y$ is in domain [0, 100]. Using the colour.luminance wrapper definition: End of explanation """ colour.colorimetry.luminance_CIE1976(37.9856290977) """ Explanation: CIE 1976 Method The CIE $L^a^b^$ approximately uniform colourspace defined in 1976 computes the luminance* $Y$ quantity as follows: <a name="back_reference_4"></a><a href="#reference_4">[4]</a> $$ \begin{equation} Y=\begin{cases}Y_n\biggl(\cfrac{L^+16}{116}\biggr)^3 & for\ L^>\kappa\epsilon\ Y_n\biggl(\cfrac{L^}{\kappa}\biggr) & for\ L^<=\kappa\epsilon \end{cases} \end{equation} $$ where $Y_n$ is the reference white luminance. with $$ \begin{equation} \begin{aligned} \epsilon&\ =\begin{cases}0.008856 & Actual\ CIE\ Standard\ 216\ /\ 24389 & Intent\ of\ the\ CIE\ Standard \end{cases}\ \kappa&\ =\begin{cases}903.3 & Actual\ CIE\ Standard\ 24389\ /\ 27 & Intent\ of\ the\ CIE\ Standard \end{cases} \end{aligned} \end{equation} $$ The original $\epsilon$ and $\kappa$ constants values have been shown to exhibit discontinuity at the junction point of the two functions grafted together to create the Lightness $L^*$ function. <a name="back_reference_5"></a><a href="#reference_5">[5]</a> Colour uses the rational values instead of the decimal values for these constants. See Also: The CIE $L^a^b^*$ Colourspace notebook for in-depth information about the CIE $L^a^b^$* colourspace. The colour.luminance_CIE1976 definition is used to compute Luminance $Y$: End of explanation """ colour.luminance(37.9856290977, method='CIE 1976') colour.luminance(37.9856290977, method='cie1976') """ Explanation: Note: Input Lightness $L^$ and and $Y_n$ are in domain [0, 100], output luminance* $Y$ is in domain [0, 100]. Using the colour.luminance wrapper definition: End of explanation """ colour.colorimetry.luminance_Fairchild2010(24.902290269546651, 1.836) colour.luminance(24.902290269546651, method='Fairchild 2010', epsilon=1.836) """ Explanation: Fairchild and Wyble (2010) Method End of explanation """ colour.colorimetry.luminance_Fairchild2011(26.459509817572265, 0.710) colour.luminance(26.459509817572265, method='Fairchild 2011', epsilon=0.710) """ Explanation: Fairchild and Chen (2011) Method End of explanation """
BrownDwarf/ApJdataFrames
notebooks/Luhman2004c.ipynb
mit
import warnings warnings.filterwarnings("ignore") """ Explanation: ApJdataFrames 003: Luhman2004c Title: New Brown Dwarfs and an Updated Initial Mass Function in Taurus Authors: Luhman K.L. Data is from this paper: http://iopscience.iop.org/0004-637X/617/2/1216/ End of explanation """ import pandas as pd names = ["Name_2MASS", "RA", "Dec", "Spectral Type", "Membership", "Teff", "AJ", "Lbol", "I", "I-zp","J-H","H-Ks", "Ks", "inIMF", "Night"] tbl3 = pd.read_csv("http://iopscience.iop.org/0004-637X/617/2/1216/fulltext/60509.tb3.txt", sep='\t', names=names) tbl3 """ Explanation: Table 3- New Members of Taurus End of explanation """ tbl3.to_csv("../data/Luhman2004c/tbl3.csv", sep="\t") """ Explanation: Save the data tables locally. !mkdir ../data/Luhman2004c End of explanation """
vipmunot/Data-Science-Course
Data Visualization/Lab 6/w06_Vipul_Munot.ipynb
mit
import matplotlib.pyplot as plt import numpy as np import seaborn as sns import pandas as pd sns.set_style('white') %matplotlib inline """ Explanation: W6 Lab Assignment Deep dive into Histogram and boxplot. End of explanation """ bins = [0, 1, 3, 5, 10, 24] data = {0.5: 4300, 2: 6900, 4: 4900, 7: 2000, 15: 2100} """ Explanation: Histogram Let's revisit the table from the class | Hours | Frequency | |-------|-----------| | 0-1 | 4,300 | | 1-3 | 6,900 | | 3-5 | 4,900 | | 5-10 | 2,000 | | 10-24 | 2,100 | You can draw a histogram by just providing bins and counts instead of a list of numbers. So, let's do that for convenience. End of explanation """ # TODO: draw a histogram with pre-counted data. #plt.xlabel("Hours") val, weight = zip(*[(k, v) for k,v in data.items()]) plt.hist(val, weights=weight, bins = bins) plt.xlabel("Hours") """ Explanation: Draw histogram using this data. Useful query: Google search: matplotlib histogram pre-counted End of explanation """ # TODO: fix it with normed option. plt.hist(val, weights=weight, bins = bins, normed = True) """ Explanation: As you can see, the default histogram does not normalize with binwidth and simply shows the counts! This can be very misleading if you are working with variable bin width. One simple way to fix this is using the option normed. End of explanation """ # TODO: Load IMDB data into movie_df using pandas movie_df = pd.read_csv('imdb.csv', delimiter='\t') movie_df.head() """ Explanation: IMDB data How does matplotlib decide the bin width? Let's try with the IMDb data. End of explanation """ plt.hist(movie_df['Rating']) """ Explanation: Plot the histogram of movie ratings using the plt.hist() function. End of explanation """ n_raw, bins_raw, patches = plt.hist(movie_df['Rating']) print(n_raw) print(bins_raw) """ Explanation: Have you noticed that this function returns three objects? Take a look at the documentation here to figure out what they are. To get the returned three objects: End of explanation """ # TODO: test whether the sum of the numbers in n_raw is equal to the number of movies. sum(n_raw)==len(movie_df) """ Explanation: Actually, n_raw contains the values of histograms, i.e., the number of movies in each of the 10 bins. Thus, the sum of the elements in n_raw should be equal to the total number of movies: End of explanation """ # TODO: calculate the width of each bin and print them. for i in range(len(bins_raw)-1): print (bins_raw[i+1] - bins_raw[i]) """ Explanation: The second returned object (bins_raw) is a list containing the edges of the 10 bins: the first bin is [1.0,1.89], the second [1.89,2.78], and so on. We can calculate the width of each bin. End of explanation """ [ j-i for i,j in zip(bins_raw[:-1],bins_raw[1:]) ] """ Explanation: The above for loop can be conveniently rewritten as the following, using list comprehension and the zip() function. Can you explain what's going on inside the zip? End of explanation """ min_rating = min(movie_df['Rating']) max_rating = max(movie_df['Rating']) print(min_rating, max_rating) print( (max_rating-min_rating) / 10 ) """ Explanation: Noticed that the width of each bin is the same? This is equal-width binning. We can calculate the width as: End of explanation """ n, bins, patches = plt.hist(movie_df['Rating'], normed=True) print(n) print(bins) """ Explanation: Now, let's plot the histogram where the y axis is normed. End of explanation """ # TODO: verify that it is properly normalized. normalizeList = [] for i in range(len(bins)): try: Moviesbins = movie_df[(movie_df['Rating'] >= bins[i]) & (movie_df['Rating'] <= bins[i+1])] normalizeList.append(round(len(Moviesbins)/len(movie_df), 4)) except IndexError: pass print("Bin widths", normalizeList) print("Data from histogram", n) """ Explanation: In this case, the edges of the 10 bins do not change. But now n represents the heights of the bins. Can you verify that matplotlib has correctly normed the heights of the bins? Hint: the area of each bin should be equal to the fraction of movies in that bin. End of explanation """ plt.figure(figsize=(10,5)) plt.subplot(1,2,1) movie_df['Rating'].hist(bins=3) plt.subplot(1,2,2) movie_df['Rating'].hist(bins=100) """ Explanation: Selecting binsize A nice to way to explore this is using the "small multiples" with a set of sample bin sizes. In other words, pick some binsizes that you want to see and draw many plots within a single "figure". Read about subplot. For instance, you can do something like: End of explanation """ binsizes = [2, 3, 5, 10, 30, 40, 60, 100 ] plt.figure(1, figsize=(18,8)) for i, bins in enumerate(binsizes): # TODO: use subplot and hist() function to draw 8 plots plt.subplot(2, 4, i + 1) movie_df['Rating'].hist(bins = bins) plt.title("Bin size " + str(bins)) """ Explanation: What does the argument in plt.subplot(1,2,1) mean? http://stackoverflow.com/questions/3584805/in-matplotlib-what-does-the-argument-mean-in-fig-add-subplot111 Ok, so create 8 subplots (2 rows and 4 columns) with the given binsizes. End of explanation """ N = len(movie_df['Rating']) # TODO: plot three histograms based on three formulae plt.figure(figsize=(12,4)) # Sqrt nbins = int(np.sqrt(N)) plt.subplot(1,3,1) plt.hist(movie_df['Rating'], bins = nbins) plt.title("SQRT, {0} bins".format(nbins)) # Sturge's formula plt.subplot(1,3,2) nbins = int(np.ceil(np.log2(N) + 1)) plt.hist(movie_df['Rating'], bins = nbins) plt.title("Sturge's, {0} bins".format(nbins)) # Freedman-Diaconis plt.subplot(1,3,3) data = movie_df['Rating'].order() iqr = np.percentile(data, 75) - np.percentile(data, 25) width = 2*iqr/np.power(N, 1/3) nbins = int((max(data) - min(data)) / width) plt.hist(movie_df['Rating'], bins = nbins) plt.title("Freedman-Diaconis, {0} bins".format(nbins)) """ Explanation: Do you notice weird patterns that emerge from bins=40? Can you guess why do you see such patterns? What are the peaks and what are the empty bars? What do they tell you about choosing the binsize in histograms? Now, let's try to apply several algorithms for finding the number of bins. End of explanation """ # TODO: draw the histogram with 120 bins n, bins, patches = plt.hist(movie_df['Rating'], bins = 120) plt.title("Histogram with bins 120") plt.xlabel("Rating") plt.ylabel("Frequency") """ Explanation: Investigating the anomalies in the histogram Let's investigate the anormalies in the histogram. End of explanation """ # TODO: print out bins that doesn't contain any values. Check whether they fall into range like [1.8XX, 1.8XX] # useful zip: zip(bins[:-1], bins[1:], n) what does this do? zip_values = zip(bins[:-1], bins[1:], n) print("Range with value zero's are as follows") for i in zip_values: if i[2] == 0: print([i[0], i[1]]) if str(i[0])[:3] == str(i[1])[:3]: print("They fall in range") # TODO: draw the histogram with 120 bins n, bins, patches = plt.hist(movie_df['Rating'], bins = 120) plt.title("Histogram with bins 120") plt.xlabel("Rating") plt.ylabel("Frequency") """ Explanation: We can locate where the empty bins are, by checking whether the value in the n is zero or not. End of explanation """ # TODO: identify peaks and print the bins with the peaks # e.g. # [1.0, 1.1] # [1.3, 1.4] # [1.6, 1.7] # ... # # you can use zip again like zip(bins[:-1], bins[1:] ... ) to access the data in two adjacent bins. values = list(zip(bins[:-1], bins[1:], n)) print("Bin with peaks are as follows") for i in range(1, len(values)): try: if ((values[i][2] > values[i-1][2]) and (values[i][2] > values[i+1][2])): print([values[i][0], values[i][1]]) except IndexError: pass """ Explanation: One way to identify the peak is comparing the number to the next bin and see whether it is much higher than the next bin. End of explanation """ movie_df.describe() """ Explanation: Ok. They doesn't necessarilly cover the integer values. Let's see the minimum number of votes. End of explanation """ # TODO: plot the histogram only with ratings that have the minimum number of votes. df = movie_df[movie_df['Votes'] == 5] plt.hist(df['Rating'], bins = 30) plt.xlabel("Rating") plt.ylabel("Frequency") plt.title("Histogram of rating with min no of votes") """ Explanation: Ok, the minimum number of votes is 5 not 1. IMDB may only keep the rating information for movies with at least 5 votes. This may explain why the most frequent ratings are like 6.4 and 6.6. Let's plot the histogram with only the rows with 5 votes. Set the binsize 30. End of explanation """ # TODO: filter out the rows with the min number of votes (5) and then `value_counts()` them. # sort the result to see what are the most common numbers. df['Rating'].value_counts() # As you can see in the following output that 6.4 is most common rating. """ Explanation: Then, print out what are the most frequent rating values. Use value_counts() function for dataframe. End of explanation """ # Plot the CDF of votes. """ Explanation: So, the most frequent values are not "x.0". Let's see the CDF. End of explanation """ # TODO: plot the same thing but limit the xrange (xlim) to [0, 100]. """ Explanation: What's going on? The number of votes is heavily skewed and most datapoints are at the left end. End of explanation """ # TODO: set the xlim to [0, 10] adjust ylim and bins so that # we can see how many datapoints are there for each # of votes. """ Explanation: Draw a histogram focused on the range [0, 10] to just see how many datapoints are there. End of explanation """ #list(product([5,6,7,8], repeat=5))[:10] from itertools import product from collections import Counter c = Counter() for x in product([5,6,7,8], repeat=5): c[str(round(np.mean(x), 1))]+=1 sorted(c.items(), key=lambda x: x[1], reverse=True) # or sorted(Counter(str(round(np.mean(x), 1)) for x in product([5,6,7,8], repeat=5)).items(), key=lambda x: x[1], reverse=True) """ Explanation: Let's assume that most 5 ratings are from 5 to 8 and see what we'll get. You can use itertools.product function to generate the fake ratings. End of explanation """ data = [-1, 3, 3, 4, 15, 16, 16, 17, 23, 24, 24, 25, 35, 36, 37, 46] """ Explanation: Boxplot Let's look at the example data that we looked at during the class. End of explanation """ print(np.percentile(data, 25)) print(np.percentile(data, 50), np.median(data)) print(np.percentile(data, 75)) """ Explanation: The numpy.percentile() function provides a way to calculate the percentiles. Note that using the option interpolation, you can specify which value to take when the percentile value lies in between numbers. The default is linear. End of explanation """ # TODO: draw a boxplot of the data plt.boxplot(data) """ Explanation: Can you explain why do you get those first and third quartile values? The first quantile value is not 4, not 15, and not 9.5. Why? Let's draw a boxplot with matplotlib. End of explanation """
rpmunoz/topicos_ingenieria_1
clase_1/02 - Lectura de datos con Pandas.ipynb
gpl-3.0
import numpy as np from __future__ import print_function import pandas as pd pd.__version__ """ Explanation: Lectura y manipulación de datos con Pandas Autor: Roberto Muñoz <br /> E-mail: &#114;&#109;&#117;&#110;&#111;&#122;&#64;&#117;&#99;&#46;&#99;&#108; This notebook shows how to create Series and Dataframes with Pandas. Also, how to read CSV files and creaate pivot tables. The first part is based on the chapter 3 of the <a href=" http://nbviewer.jupyter.org/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/03.01-Introducing-Pandas-Objects.ipynb">Python Data Science Handbook</a>. End of explanation """ data = pd.Series([0.25, 0.5, 0.75, 1.0]) data """ Explanation: 1. The Pandas Series Object A Pandas Series is a one-dimensional array of indexed data. It can be created from a list or array as follows: End of explanation """ data.values """ Explanation: As we see in the output, the Series wraps both a sequence of values and a sequence of indices, which we can access with the values and index attributes. The values are simply a familiar NumPy array: End of explanation """ data.index """ Explanation: The index is an array-like object of type pd.Index, which we'll discuss in more detail momentarily. End of explanation """ data[1] """ Explanation: Like with a NumPy array, data can be accessed by the associated index via the familiar Python square-bracket notation: End of explanation """ data = pd.Series([0.25, 0.5, 0.75, 1.0], index=['a', 'b', 'c', 'd']) data """ Explanation: Series as generalized NumPy array From what we've seen so far, it may look like the Series object is basically interchangeable with a one-dimensional NumPy array. The essential difference is the presence of the index: while the Numpy Array has an implicitly defined integer index used to access the values, the Pandas Series has an explicitly defined index associated with the values. End of explanation """ data['b'] """ Explanation: And the item access works as expected: End of explanation """ population_dict = {'Arica y Parinacota': 243149, 'Antofagasta': 631875, 'Metropolitana de Santiago': 7399042, 'Valparaiso': 1842880, 'Bíobío': 2127902, 'Magallanes y Antártica Chilena': 165547} population = pd.Series(population_dict) population """ Explanation: Series as specialized dictionary In this way, you can think of a Pandas Series a bit like a specialization of a Python dictionary. A dictionary is a structure that maps arbitrary keys to a set of arbitrary values, and a Series is a structure which maps typed keys to a set of typed values. This typing is important: just as the type-specific compiled code behind a NumPy array makes it more efficient than a Python list for certain operations, the type information of a Pandas Series makes it much more efficient than Python dictionaries for certain operations. End of explanation """ population['Arica y Parinacota'] """ Explanation: You can notice the indexes were sorted lexicographically. That's the default behaviour in Pandas End of explanation """ population['Metropolitana':'Valparaíso'] """ Explanation: Unlike a dictionary, though, the Series also supports array-style operations such as slicing: End of explanation """ # Area in km^2 area_dict = {'Arica y Parinacota': 16873.3, 'Antofagasta': 126049.1, 'Metropolitana de Santiago': 15403.2, 'Valparaiso': 16396.1, 'Bíobío': 37068.7, 'Magallanes y Antártica Chilena': 1382291.1} area = pd.Series(area_dict) area """ Explanation: 2. The Pandas DataFrame Object The next fundamental structure in Pandas is the DataFrame. Like the Series object discussed in the previous section, the DataFrame can be thought of either as a generalization of a NumPy array, or as a specialization of a Python dictionary. We'll now take a look at each of these perspectives. DataFrame as a generalized NumPy array If a Series is an analog of a one-dimensional array with flexible indices, a DataFrame is an analog of a two-dimensional array with both flexible row indices and flexible column names. End of explanation """ regions = pd.DataFrame({'population': population, 'area': area}) regions regions.index regions.columns """ Explanation: Now that we have this along with the population Series from before, we can use a dictionary to construct a single two-dimensional object containing this information: End of explanation """ regions['area'] """ Explanation: DataFrame as specialized dictionary Similarly, we can also think of a DataFrame as a specialization of a dictionary. Where a dictionary maps a key to a value, a DataFrame maps a column name to a Series of column data. For example, asking for the 'area' attribute returns the Series object containing the areas we saw earlier: End of explanation """ pd.DataFrame(population, columns=['population']) """ Explanation: Constructing DataFrame objects A Pandas DataFrame can be constructed in a variety of ways. Here we'll give several examples. From a single Series object¶ A DataFrame is a collection of Series objects, and a single-column DataFrame can be constructed from a single Series: End of explanation """ pd.DataFrame({'population': population, 'area': area}, columns=['population', 'area']) """ Explanation: From a dictionary of Series objects As we saw before, a DataFrame can be constructed from a dictionary of Series objects as well: End of explanation """ regiones_file='data/chile_regiones.csv' provincias_file='data/chile_provincias.csv' comunas_file='data/chile_comunas.csv' regiones=pd.read_csv(regiones_file, header=0, sep=',') provincias=pd.read_csv(provincias_file, header=0, sep=',') comunas=pd.read_csv(comunas_file, header=0, sep=',') print('regiones table: ', regiones.columns.values.tolist()) print('provincias table: ', provincias.columns.values.tolist()) print('comunas table: ', comunas.columns.values.tolist()) regiones.head() provincias.head() comunas.head() regiones_provincias=pd.merge(regiones, provincias, how='outer') regiones_provincias.head() provincias_comunas=pd.merge(provincias, comunas, how='outer') provincias_comunas.head() regiones_provincias_comunas=pd.merge(regiones_provincias, comunas, how='outer') regiones_provincias_comunas.index.name='ID' regiones_provincias_comunas.head() #regiones_provincias_comunas.to_csv('chile_regiones_provincia_comuna.csv', index=False) """ Explanation: 3. Reading a CSV file and doing common Pandas operations End of explanation """ data_file='data/chile_demographic.csv' data=pd.read_csv(data_file, header=0, sep=',') data data.sort_values('Poblacion') data.sort_values('Poblacion', ascending=False) (data.groupby(['Region'])['Poblacion','Superficie'].sum()) (data.groupby(['Region'])['Poblacion','Superficie'].sum()).sort_values('Poblacion', ascending=False) data.sort_values(['RegionID']).groupby(['RegionID','Region'])['Poblacion','Superficie'].sum() """ Explanation: 4. Loading ful dataset End of explanation """
quantopian/research_public
notebooks/data/quandl.fred_usdontd156n/notebook.ipynb
apache-2.0
# import the dataset from quantopian.interactive.data.quandl import fred_usdontd156n as libor # Since this data is public domain and provided by Quandl for free, there is no _free version of this # data set, as found in the premium sets. This import gets you the entirety of this data set. # import data operations from odo import odo # import other libraries we will use import pandas as pd import matplotlib.pyplot as plt libor.sort('asof_date') """ Explanation: Quandl: Overnight LIBOR In this notebook, we'll take a look at data set available on Quantopian. This dataset spans from 2001 through the current day. It contains the value for the London Interbank Borrowing Rate (LIBOR). We access this data via the API provided by Quandl. More details on this dataset can be found on Quandl's website. Blaze Before we dig into the data, we want to tell you about how you generally access Quantopian partner data sets. These datasets are available using the Blaze library. Blaze provides the Quantopian user with a convenient interface to access very large datasets. Some of these sets (though not this one) are many millions of records. Bringing that data directly into Quantopian Research directly just is not viable. So Blaze allows us to provide a simple querying interface and shift the burden over to the server side. To learn more about using Blaze and generally accessing Quantopian partner data, clone this tutorial notebook. With preamble in place, let's get started: End of explanation """ libor.count() """ Explanation: The data goes all the way back to 2001 and is updated daily. Blaze provides us with the first 10 rows of the data for display. Just to confirm, let's just count the number of rows in the Blaze expression: End of explanation """ libor_df = odo(libor, pd.DataFrame) libor_df.plot(x='asof_date', y='value') plt.xlabel("As Of Date (asof_date)") plt.ylabel("LIBOR") plt.title("London Interbank Offered Rate") plt.legend().set_visible(False) """ Explanation: Let's go plot it for fun. This data set is definitely small enough to just put right into a Pandas DataFrame End of explanation """
karst87/ml
dev/pyml/datacamp/intro-to-python-for-data-science/02_python-lists.ipynb
mit
fmz = [1.65, 1.45, 1.76] fmz fmz2 = [1, 3, 1.2, 'Hello'] fmz2 fmz3= [[23,12], [99, 1]] fmz3 """ Explanation: Python Lists https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-2-python-lists?ex=1 Learn to store, access and manipulate data in lists: the first step towards efficiently working with huge amounts of data. 1. Lists, what are they? https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-2-python-lists?ex=1 End of explanation """ # area variables (in square meters) hall = 11.25 kit = 18.0 liv = 20.0 bed = 10.75 bath = 9.50 # Create list areas areas = [hall, kit, liv, bed, bath] # Print areas print(areas) """ Explanation: 2. Create a list https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-2-python-lists?ex=2 As opposed to int, bool etc, a list is a compound data type: you can group values together: a = "is" b = "nice" my_list = ["my", "list", a, b] After measuring the height of your family, you decide to collect some information on the house you're living in. The areas of the different parts of your house are stored in separate variables for now, as shown in the script. Instructions Create a list, areas, that contains the area of the hallway (hall), kitchen (kit), living room (liv), bedroom (bed) and bathroom (bath), in this order. Use the predefined variables. Print areas with the print() function. End of explanation """ # area variables (in square meters) hall = 11.25 kit = 18.0 liv = 20.0 bed = 10.75 bath = 9.50 # Adapt list areas areas = ['hallway', hall, 'kitchen', kit, "living room", liv, 'bedroom', bed, "bathroom", bath] # Print areas print(areas) """ Explanation: 3. Create list with different types https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-2-python-lists?ex=3 A list can contain any Python type. Although it's not really common, a list can also contain a mix of Python types including strings, floats, booleans, etc. The printout of the previous exercise wasn't really satisfying. It's just a list of numbers representing the areas, but you can't tell which area corresponds to which part of your house. The code on the right is the start of a solution. For some of the areas, the name of the corresponding room is already placed in front. Pay attention here! "bathroom" is a string, while bath is a variable that represents the float 9.50 you specified earlier. Instructions Finish the line of code that creates the areas list such that the list first contains the name of each room as a string, and then its area. More specifically, add the strings "hallway", "kitchen" and "bedroom" at the appropriate locations. Print areas again; is the printout more informative this time? End of explanation """ [1, 3, 4, 2] [[1, 2, 3], [4, 5, 7]] [1 + 2, "a" * 5, 3] """ Explanation: 4. Select the valid list https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-2-python-lists?ex=4 A list can contain any Python type. But a list itself is also a Python type. That means that a list can also contain a list! Python is getting funkier by the minute, but fear not, just remember the list syntax: my_list = [el1, el2, el3] Can you tell which ones of the following lines of Python code are valid ways to build a list? A. [1, 3, 4, 2] B. [[1, 2, 3], [4, 5, 7]] C. [1 + 2, "a" * 5, 3] Possible Answers A, B and C B B and C C End of explanation """ # area variables (in square meters) hall = 11.25 kit = 18.0 liv = 20.0 bed = 10.75 bath = 9.50 # house information as list of lists house = [["hallway", hall], ["kitchen", kit], ["living room", liv], ["bedroom", bed], ["bathroom", bath]] # Print out house print(house) # Print out the type of house print(type(house)) """ Explanation: 5. List of lists https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-2-python-lists?ex=5 As a data scientist, you'll often be dealing with a lot of data, and it will make sense to group some of this data. Instead of creating a flat list containing strings and floats, representing the names and areas of the rooms in your house, you can create a list of lists. The script on the right can already give you an idea. Don't get confused here: "hallway" is a string, while hall is a variable that represents the float 11.25 you specified earlier. Instructions Finish the list of lists so that it also contains the bedroom and bathroom data. Make sure you enter these in order! Print out house; does this way of structuring your data make more sense? Print out the type of house. Are you still dealing with a list? End of explanation """ fam = ['liz', 1.73, 'emma', 1.68, 'mom', 1.71, 'dad', 1.89] fam fam[3] fam[6] fam[-1] fam[-2] fam[3:5] fam[1:4] fam[:4] """ Explanation: 6. Subsetting lists https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-2-python-lists?ex=6 End of explanation """ # Create the areas list areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50] # Print out second element from areas print(areas[2]) # Print out last element from areas print(areas[-1]) # Print out the area of the living room print(areas[5]) # Create the areas list areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50] # Print out second element from areas print(areas[1]) # Print out last element from areas print(areas[-1]) # Print out the area of the living room print(areas[5]) """ Explanation: 7. Subset and conquer https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-2-python-lists?ex=7 Subsetting Python lists is a piece of cake. Take the code sample below, which creates a list x and then selects "b" from it. Remember that this is the second element, so it has index 1. You can also use negative indexing. x = list["a", "b", "c", "d"] x[1] x[-3] # same result! Remember the areas list from before, containing both strings and floats? Its definition is already in the script. Can you add the correct code to do some Python subsetting? Instructions Print out the second element from the areas list, so 11.25. Subset and print out the last element of areas, being 9.50. Using a negative index makes sense here! Select the number representing the area of the living room and print it out. End of explanation """ # Create the areas list areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50] # Sum of kitchen and bedroom area: eat_sleep_area eat_sleep_area = areas[3] + areas[-3] # Print the variable eat_sleep_area print(eat_sleep_area) """ Explanation: 8. Subset and calculate https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-2-python-lists?ex=8 After you've extracted values from a list, you can use them to perform additional calculations. Take this example, where the second and fourth element of a list x are extracted. The strings that result are pasted together using the + operator: x = ["a", "b", "c", "d"] print(x[1] + x[3]) Instructions Using a combination of list subsetting and variable assignment, create a new variable, eat_sleep_area, that contains the sum of the area of the kitchen and the area of the bedroom. Print this new variable eat_sleep_area. End of explanation """ # Create the areas list areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50] # Use slicing to create downstairs downstairs = areas[0:6] # Use slicing to create upstairs upstairs = areas[6:10] # Print out downstairs and upstairs print(downstairs) print(upstairs) """ Explanation: 9. Slicing and dicing https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-2-python-lists?ex=9 Selecting single values from a list is just one part of the story. It's also possible to slice your list, which means selecting multiple elements from your list. Use the following syntax: my_list[start:end] The start index will be included, while the end index is not. The code sample below shows an example. A list with "b" and "c", corresponding to indexes 1 and 2, are selected from a list x: x = ["a", "b", "c", "d"] x[1:3] The elements with index 1 and 2 are included, while the element with index 3 is not. Instructions Use slicing to create a list, downstairs, that contains the first 6 elements of areas. Do a similar thing to create a new variable, upstairs, that contains the last 4 elements of areas. Print both downstairs and upstairs using print(). End of explanation """ # Create the areas list areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50] # Use slicing to create downstairs downstairs = areas[:6] # Use slicing to create upstairs upstairs = areas[-4:] # Print out downstairs and upstairs print(downstairs) print(upstairs) """ Explanation: 10. Slicing and dicing (2) https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-2-python-lists?ex=10 In the video, Filip only discussed the syntax where you specify both where to begin and end the slice of your list: my_list[begin:end] However, it's also possible not to specify these indexes. If you don't specify the begin index, Python figures out that you want to start your slice at the beginning of your list. If you don't specify the end index, the slice will go all the way to the last element of your list. To experiment with this, try the following commands in the IPython Shell: x = ["a", "b", "c", "d"] x[:2] x[2:] x[:] Instructions Use slicing to create the lists downstairs and upstairs again, but this time without using indexes if it's not necessary. Remember downstairs is the first 6 elements of areas and upstairs is the last 4 elements of areas. End of explanation """ house[-1] house[-1][1] """ Explanation: 11. Subsetting lists of lists https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-2-python-lists?ex=11 You saw before that a Python list can contain practically anything; even other lists! To subset lists of lists, you can use the same technique as before: square brackets. Try out the commands in the following code sample in the IPython Shell: x = [["a", "b", "c"], ["d", "e", "f"], ["g", "h", "i"]] x[2][0] x[2][:2] x[2] results in a list, that you can subset again by adding additional square brackets. What will house[-1][1] return? house, the list of lists that you created before, is already defined for you in the workspace. You can experiment with it in the IPython Shell. Possible Answers A float: the kitchen area A string: "kitchen" A float: the bathroom area A string: "bathroom" End of explanation """ fam = ['liz', 1.73, 'emma', 1.68, 'mom', 1.71, 'dad', 1.89] fam fam[7] = 1.86 fam fam[0:2] = ['lisa', 1.74] fam fam + ['me', 1.79] fam_ext = fam + ['me', 1.79] fam_ext del(fam[2]) fam del(fam[2]) fam x = ['a', 'b', 'c'] x y = x y[1] = 'z' y x z = list(x) z = x[:] z[1] = 'm' z x """ Explanation: 12. List Manipulation https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-2-python-lists?ex=12 End of explanation """ # Create the areas list areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50] # Correct the bathroom area areas[-1] = 10.50 # Change "living room" to "chill zone" areas[4] = 'chill zone' areas """ Explanation: 13. Replace list elements https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-2-python-lists?ex=13 Replacing list elements is pretty easy. Simply subset the list and assign new values to the subset. You can select single elements or you can change entire list slices at once. Use the IPython Shell to experiment with the commands below. Can you tell what's happening and why? x = ["a", "b", "c", "d"] x[1] = "r" x[2:] = ["s", "t"] For this and the following exercises, you'll continue working on the areas list that contains the names and areas of different rooms in a house. Instructions You did a miscalculation when determining the area of the bathroom; it's 10.50 square meters instead of 9.50. Can you make the changes? Make the areas list more trendy! Change "living room" to "chill zone". End of explanation """ x = ["a", "b", "c", "d"] y = x + ["e", "f"] y # Create the areas list and make some changes areas = ["hallway", 11.25, "kitchen", 18.0, "chill zone", 20.0, "bedroom", 10.75, "bathroom", 10.50] # Add poolhouse data to areas, new list is areas_1 areas_1 = areas + ["poolhouse", 24.5] # Add garage data to areas_1, new list is areas_2 areas_2 = areas_1 + ["garage", 15.45] areas_2 """ Explanation: 14. Extend a list https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-2-python-lists?ex=14 If you can change elements in a list, you sure want to be able to add elements to it, right? You can use the + operator: x = ["a", "b", "c", "d"] y = x + ["e", "f"] You just won the lottery, awesome! You decide to build a poolhouse and a garage. Can you add the information to the areas list? Instructions Use the + operator to paste the list ["poolhouse", 24.5] to the end of the areas list. Store the resulting list as areas_1. Further extend areas_1 by adding data on your garage. Add the string "garage" and float 15.45. Name the resulting list areas_2. End of explanation """ x = ["a", "b", "c", "d"] del(x[1]) x areas = ["hallway", 11.25, "kitchen", 18.0, "chill zone", 20.0, "bedroom", 10.75, "bathroom", 10.50, "poolhouse", 24.5, "garage", 15.45] del(areas[-3]) areas """ Explanation: 15. Delete list elements https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-2-python-lists?ex=15 Finally, you can also remove elements from your list. You can do this with the del statement: x = ["a", "b", "c", "d"] del(x[1]) Pay attention here: as soon as you remove an element from a list, the indexes of the elements that come after the deleted element all change! The updated and extended version of areas that you've built in the previous exercises is coded below. You can copy and paste this into the IPython Shell to play around with the result. areas = ["hallway", 11.25, "kitchen", 18.0, "chill zone", 20.0, "bedroom", 10.75, "bathroom", 10.50, "poolhouse", 24.5, "garage", 15.45] There was a mistake! The amount you won with the lottery is not that big after all and it looks like the poolhouse isn't going to happen. You decide to remove the corresponding string and float from the areas list. The ; sign is used to place commands on the same line. The following two code chunks are equivalent: Same line command1; command2 Separate lines command1 command2 Which of the code chunks will do the job for us? Possible Answers del(areas[10]); del(areas[11]) del(areas[10:11]) del(areas[-4:-2]) del(areas[-3]); del(areas[-4]) End of explanation """ # Create list areas areas = [11.25, 18.0, 20.0, 10.75, 9.50] # Create areas_copy areas_copy = areas # Change areas_copy areas_copy[0] = 5.0 # Print areas print(areas) # Create list areas areas = [11.25, 18.0, 20.0, 10.75, 9.50] # Create areas_copy areas_copy = list(areas) areas_copy = areas[:] # Change areas_copy areas_copy[0] = 5.0 # Print areas print(areas) """ Explanation: 16. Inner workings of lists https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-2-python-lists?ex=16 At the end of the video, Filip explained how Python lists work behind the scenes. In this exercise you'll get some hands-on experience with this. The Python code in the script already creates a list with the name areas and a copy named areas_copy. Next, the first element in the areas_copy list is changed and the areas list is printed out. If you hit Submit Answer you'll see that, although you've changed areas_copy, the change also takes effect in the areas list. That's because areas and areas_copy point to the same list. If you want to prevent changes in areas_copy to also take effect in areas, you'll have to do a more explicit copy of the areas list. You can do this with list() or by using [:]. Watch out! list() and [:] might not copy properly if you put complex things in your lists. Instructions Change the second command, that creates the variable areas_copy, such that areas_copy is an explicit copy of areas Now, changes made to areas_copy shouldn't affect areas. Hit Submit Answer to check this. End of explanation """
bjshaw/phys202-project
galaxy_project/F) Plotting_function.ipynb
mit
def plotter(ic,sol,n=0): """Plots the positions of the stars and disrupting galaxy at each t in the time array Parameters -------------- ic : initial conditions sol : solution array n : integer Returns ------------- """ plt.figure(figsize=(10,10)) y = np.linspace(-150,150,100) plt.plot(-.01*y**2+25,y,color='k',label='S path') plt.scatter(0,0,color='y',label='Galaxy M') plt.scatter(sol[n][0],sol[n][1],color='b',label='Galaxy S') for i in range(1,int(len(ic)/4)): a = plt.scatter(sol[n][4*i],sol[n][4*i+1],color='r') a.set_label('Star') plt.legend() plt.ylim(-50,50) plt.xlim(-50,50) """ Explanation: I also define a plotting function to use with the interact function to visualize the behavior of the stars when the disrupting galaxy orbits close to the main galaxy End of explanation """ def static_plot(ic,sol,n=0): """Plots the positions of the stars and disrupting galaxy at a certain t in the time array Parameters -------------- ic : initial conditions sol : solution array n : integer Returns ------------- """ plt.scatter(0,0,color='y',label='Galaxy M') plt.scatter(sol[n][0],sol[n][1],color='b',label='Galaxy S') for i in range(1,int(len(ic)/4)): a = plt.scatter(sol[n][4*i],sol[n][4*i+1],color='r') plt.ylim(-50,50) plt.xlim(-50,50) plt.tick_params(right=False,left=False,top=False,bottom=False) ax=plt.gca() ax.spines['top'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.spines['left'].set_visible(False) ax.spines['right'].set_visible(False) plt.tick_params(axis='x',labelbottom='off') plt.tick_params(axis='y',labelleft='off') """ Explanation: Defining a plotting function that will help with plotting static images at certain times: End of explanation """ def com_plot(ic,sol,M,S,n=0): """Plots the positions of the stars, main, and disrupting galaxy relative to the center of mass at a certain t in the time array Parameters --------------- ic : initial condition sol : solution array M : mass of main galaxy S : mass of disrupting galaxy n : integer Returns -------------- """ plt.figure(figsize=(10,10)) cm_x = (S*sol[n][0])/(M+S) cm_y = (S*sol[n][1])/(M+S) plt.scatter(0,0,color='g',label='Center of Mass') plt.scatter(0-cm_x,0-cm_y,color='y',label='Galaxy M') plt.scatter(sol[n][0]-cm_x,sol[n][1]-cm_y,color='b',label='Galaxy S') for i in range(1,int(len(ic)/4)): a = plt.scatter(sol[n][4*i]-cm_x,sol[n][4*i+1]-cm_y,color='r') a.set_label('Star') plt.legend() plt.ylim(-100,100) plt.xlim(-100,100) """ Explanation: Defining a plotting function that will help with plotting positions relative to the center of mass between the two galaxies: End of explanation """ def static_plot_com(ic,sol,M,S,n=0): """Plots the positions of the stars, main, and disrupting galaxy relative to the center of mass at a certain t in the time array Parameters -------------- ic : initial conditions sol : solution array M : mass of main galaxy S : mass of disrupting galaxy n : integer Returns ------------- """ cm_x = (S*sol[n][0])/(M+S) cm_y = (S*sol[n][1])/(M+S) plt.scatter(0,0,color='g',label='Center of Mass') plt.scatter(0-cm_x,0-cm_y,color='y',label='Galaxy M') plt.scatter(sol[n][0]-cm_x,sol[n][1]-cm_y,color='b',label='Galaxy S') for i in range(1,int(len(ic)/4)): a = plt.scatter(sol[n][4*i]-cm_x,sol[n][4*i+1]-cm_y,color='r') plt.ylim(-100,100) plt.xlim(-100,100) plt.tick_params(right=False,left=False,top=False,bottom=False) ax=plt.gca() ax.spines['top'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.spines['left'].set_visible(False) ax.spines['right'].set_visible(False) plt.tick_params(axis='x',labelbottom='off') plt.tick_params(axis='y',labelleft='off') """ Explanation: Static plotting function around center of mass: End of explanation """
synthicity/synthpop
demos/census_api.ipynb
bsd-3-clause
c = Census(os.environ["CENSUS"]) """ Explanation: The census api needs a key - you can register for can sign up http://api.census.gov/data/key_signup.html End of explanation """ income_columns = ['B19001_0%02dE'%i for i in range(1, 18)] vehicle_columns = ['B08201_0%02dE'%i for i in range(1, 7)] workers_columns = ['B08202_0%02dE'%i for i in range(1, 6)] families_columns = ['B11001_001E', 'B11001_002E'] block_group_columns = income_columns + families_columns tract_columns = vehicle_columns + workers_columns h_acs = c.block_group_and_tract_query(block_group_columns, tract_columns, "06", "075", merge_columns=['tract', 'county', 'state'], block_group_size_attr="B11001_001E", tract_size_attr="B08201_001E", tract="030600") h_acs """ Explanation: Here we get aggregate information on households from ACS - note some variables are associated with block groups and others with tracts End of explanation """ population = ['B01001_001E'] sex = ['B01001_002E', 'B01001_026E'] race = ['B02001_0%02dE'%i for i in range(1,11)] male_age_columns = ['B01001_0%02dE'%i for i in range(3,26)] female_age_columns = ['B01001_0%02dE'%i for i in range(27,50)] all_columns = population + sex + race + male_age_columns + female_age_columns p_acs = c.block_group_query(all_columns, "06", "075", tract="030600") p_acs """ Explanation: And here is aggregate information on people from ACS End of explanation """ puma = c.tract_to_puma("06", "075", "030600") puma puma10 = puma[0] puma00 = puma[1] """ Explanation: Get the puma for our test tracts - this actually downloads the mapping file from the census website so it might take a few seconds End of explanation """ p_pums = c.download_population_pums("06", puma10=puma10, puma00=puma00) p_pums.head(5) """ Explanation: Download PUMS for people records for a PUMA from our server (we processed the large files into smaller ones for you) End of explanation """ h_pums = c.download_household_pums("06", puma10=puma10, puma00=puma00) h_pums.head(5) """ Explanation: Download PUMS for household records for a PUMA End of explanation """ h_acs_cat = cat.categorize(h_acs, { ("households", "total"): "B11001_001E", ("children", "yes"): "B11001_002E", ("children", "no"): "B11001_001E - B11001_002E", ("income", "lt35"): "B19001_002E + B19001_003E + B19001_004E + " "B19001_005E + B19001_006E + B19001_007E", ("income", "gt35-lt100"): "B19001_008E + B19001_009E + " "B19001_010E + B19001_011E + B19001_012E" "+ B19001_013E", ("income", "gt100"): "B19001_014E + B19001_015E + B19001_016E" "+ B19001_017E", ("cars", "none"): "B08201_002E", ("cars", "one"): "B08201_003E", ("cars", "two or more"): "B08201_004E + B08201_005E + B08201_006E", ("workers", "none"): "B08202_002E", ("workers", "one"): "B08202_003E", ("workers", "two or more"): "B08202_004E + B08202_005E" }, index_cols=['NAME']) h_acs_cat assert np.all(cat.sum_accross_category(h_acs_cat) < 2) """ Explanation: Now the job is to categorize acs and pums into the same categories - we start with the household acs data End of explanation """ p_acs_cat = cat.categorize(p_acs, { ("population", "total"): "B01001_001E", ("age", "19 and under"): "B01001_003E + B01001_004E + B01001_005E + " "B01001_006E + B01001_007E + B01001_027E + " "B01001_028E + B01001_029E + B01001_030E + " "B01001_031E", ("age", "20 to 35"): "B01001_008E + B01001_009E + B01001_010E + " "B01001_011E + B01001_012E + B01001_032E + " "B01001_033E + B01001_034E + B01001_035E + " "B01001_036E", ("age", "35 to 60"): "B01001_013E + B01001_014E + B01001_015E + " "B01001_016E + B01001_017E + B01001_037E + " "B01001_038E + B01001_039E + B01001_040E + " "B01001_041E", ("age", "above 60"): "B01001_018E + B01001_019E + B01001_020E + " "B01001_021E + B01001_022E + B01001_023E + " "B01001_024E + B01001_025E + B01001_042E + " "B01001_043E + B01001_044E + B01001_045E + " "B01001_046E + B01001_047E + B01001_048E + " "B01001_049E", ("race", "white"): "B02001_002E", ("race", "black"): "B02001_003E", ("race", "asian"): "B02001_005E", ("race", "other"): "B02001_004E + B02001_006E + B02001_007E + " "B02001_008E", ("sex", "male"): "B01001_002E", ("sex", "female"): "B01001_026E" }, index_cols=['NAME']) p_acs_cat assert np.all(cat.sum_accross_category(p_acs_cat) < 2) """ Explanation: And the same for ACS population - the output of the categorization is the MARGINALS for each variable category End of explanation """ p_acs_cat.iloc[0].transpose() """ Explanation: To get the marginals a series for one geography do this End of explanation """ def age_cat(r): if r.AGEP <= 19: return "19 and under" elif r.AGEP <= 35: return "20 to 35" elif r.AGEP <= 60: return "35 to 60" return "above 60" def race_cat(r): if r.RAC1P == 1: return "white" elif r.RAC1P == 2: return "black" elif r.RAC1P == 6: return "asian" return "other" def sex_cat(r): if r.SEX == 1: return "male" return "female" _, jd_persons = cat.joint_distribution( p_pums, cat.category_combinations(p_acs_cat.columns), {"age": age_cat, "race": race_cat, "sex": sex_cat} ) jd_persons """ Explanation: Now categorize the PUMS population data into the same categories End of explanation """ def cars_cat(r): if r.VEH == 0: return "none" elif r.VEH == 1: return "one" return "two or more" def children_cat(r): if r.NOC > 0: return "yes" return "no" def income_cat(r): if r.FINCP > 100000: return "gt100" elif r.FINCP > 35000: return "gt35-lt100" return "lt35" def workers_cat(r): if r.WIF == 3: return "two or more" elif r.WIF == 2: return "two or more" elif r.WIF == 1: return "one" return "none" _, jd_households = cat.joint_distribution( h_pums, cat.category_combinations(h_acs_cat.columns), {"cars": cars_cat, "children": children_cat, "income": income_cat, "workers": workers_cat} ) jd_households """ Explanation: Do the same for households - the output of this step is the JOINT DISTRIBUTIONS for the cross product of all possible categories End of explanation """ "TBD" """ Explanation: With marginals (aggregate, from ACS) and joint distribution (disaggregate, from PUMS) we're ready for some synthesis End of explanation """
IsaacLab/LaboratorioIntangible
T4/T4.5-Prisoner's-dilemma.ipynb
agpl-3.0
from pydilemma.game_play import * play_with('Nice', 'TitForTat') # These 2 guys get along very well... #play_with('Nice', 'Naive') # Naive tries to get advantage of what works... #play_with('Nice', 'NaiveProber') # And Naive Prober tries aggressively... #play_with('NaiveProber', 'Majority') # But the NaiveProber can't compete against a Majority! #play_with('SmoothTitForTat', 'TitForTwoTats') # On the other hand, Tit For Tat cousins destroy each other... #play_with('Selfish', 'Crazy') # A Selfish guy gets a good chance against someone who doesn't know too much about the game... #play_with('Alternate', 'Majority') # Some really simple strategies can get good results sometimes... """ Explanation: Prisoner's dilemma The prisoner's dilemma is a standard example of a game analyzed in game theory that shows why two completely "rational" individuals might not cooperate, even if it appears that it is in their best interests to do so. Two members of a criminal gang are arrested and imprisoned. Each prisoner is in solitary confinement with no means of communicating with the other. The prosecutors lack sufficient evidence to convict the pair on the principal charge. They hope to get both sentenced to a year in prison on a lesser charge. Simultaneously, the prosecutors offer each prisoner a bargain. Each prisoner is given the opportunity either to: betray the other by testifying that the other committed the crime, or to cooperate with the other by remaining silent. The offer is: - If A and B each betray the other, each of them serves 2 years in prison - If A betrays B but B remains silent, A will be set free and B will serve 3 years in prison (and vice versa) - If A and B both remain silent, both of them will only serve 1 year in prison (on the lesser charge) It is assumed that both understand the nature of the game, and that despite being members of the same gang, they have no loyalty to each other and will have no opportunity for retribution or reward outside the game. Because defection always results in a better payoff than cooperation, regardless of the other player's choice, it is a dominant strategy. The dilemma then is that mutual cooperation yields a better outcome than mutual defection but it is not the rational outcome because from a self-interested perspective, the choice to cooperate, at the individual level, is irrational. All purely rational self-interested prisoners would betray the other because betraying a partner offers a greater reward than cooperating with them, and so the only possible outcome for two purely rational prisoners is for them to betray each other. The interesting part of this result is that pursuing individual reward logically leads both of the prisoners to betray, when they would get a better reward if they both kept silent. In reality, humans display a systemic bias towards cooperative behavior in this and similar games, much more so than predicted by simple models of "rational" self-interested action. The prisoner's dilemma game can be used as a model for many real world situations involving cooperative behaviour. In casual usage, the label "prisoner's dilemma" may be applied to situations not strictly matching the formal criteria of the classic or iterative games: for instance, those in which two entities could gain important benefits from cooperating or suffer from the failure to do so, but find it merely difficult or expensive, not necessarily impossible, to coordinate their activities to achieve cooperation. Exercise The purpose is to show how the game evolves with different strategies. Lets kick some matches with a Prissoner's Dilemma matrix and 100 rounds per match. Strategies: Nice: Cooperates with 70% chance. Crazy: Cooperates with 50% chance. Alternate: Alternates cooperate / defeat. AlternateCCD: Cooperates 2 of every 3 times. Naive: Repeats the last play if the opponent cooperated. NaiveProber: Defeats with 20% chance, else repeats the last play if the opponent cooperated. TitForTat: Plays what the opponent played last time. TitForTwoTats: Cooperates if the opponent cooperated the last 2 times. SmoothTitForTat: Cooperates with 10% chance, othewise like tit for tat. Selfish: Cooperates with 50% chance only if the opponent cooperated the last time. Majority: Defects when the opponent defected more that 50% of the times. Spiteful: Cooperates until opponent defeats. Then defeats always. Pair the strategies as shoen in the code and look for the better results. End of explanation """ from pydilemma.generational import * Generational('PrisonerMatrix', ['TitForTat', 'Alternate', 'Majority', 'Naive', 'Nice'], 200).start() """ Explanation: Exercise 2 We can simulate a population of strategies, put them to play, and see how the population evolve. These are the rules: There is a initial queue filled randomly with strategies passed as input. It pulls the first 2 players of the queue and performs a game with 50 rounds per game. If one player reaches 70 points, 2 players with the same strategy will be added to the queue. At that time, if the other one has more than 50 points, 1 player with the same strategy will be added to the queue. Both scores reset. Repeat until queue is empty or number of generations limit has reached. Lets do some tests with 200 generations each, and try to get the best strategies. End of explanation """
tcstewar/testing_notebooks
semd/sEMD.ipynb
gpl-2.0
# the facilitation spikes def stim_1_func(t): index = int(t/0.001) if index in [100, 1100, 2100]: return 1000 else: return 0 # the trigger spikes def stim_2_func(t): index = int(t/0.001) if index in [90, 1500, 2150]: return 1000 else: return 0 # the operation we're going to do on the two different inputs to the sEMD neuron def dendrite_func(t, x): return x[0]*x[1] # the trigger weight (w_e2 in the paper) w = 2.0 model = nengo.Network() with model: stim1 = nengo.Node(stim_1_func) stim2 = nengo.Node(stim_2_func) # this will handle the non-linearity we need for the input dendrite = nengo.Node(dendrite_func, size_in=2) # the facilitation input gets a low-pass filter of 10ms but the trigger is unfiltered nengo.Connection(stim1, dendrite[0], synapse=0.01) nengo.Connection(stim2, dendrite[1], transform=w, synapse=None) # one simple leaky integrate-and-fire neuron ens = nengo.Ensemble(n_neurons=1, dimensions=1, gain=np.ones(1), bias=np.zeros(1)) # a low-pass filter of 5 ms for the output from the dendritic nonlinearity nengo.Connection(dendrite, ens.neurons, synapse=0.005) # now let's probe a bunch of data so we can plot things pd = nengo.Probe(dendrite, synapse=0.005) p1_n = nengo.Probe(stim1, synapse=None) p1 = nengo.Probe(stim1, synapse=0.01) p2 = nengo.Probe(stim2, synapse=None) pn = nengo.Probe(ens.neurons) sim = nengo.Simulator(model) with sim: sim.run(3) """ Explanation: This notebook gives a Nengo implementation of the Spiking Elementary Motion Detector (sEMD) from doi:10.1162/neco_a_01112 First, let's try to replicate Figure 2. End of explanation """ plt.figure(figsize=(14,5)) plt.subplot(3,1,1) import nengo.utils.matplotlib nengo.utils.matplotlib.rasterplot(sim.trange(), np.hstack([sim.data[p1_n], sim.data[p2]])) plt.xlim(0, sim.trange()[-1]) plt.ylim(0.5,2.5) plt.subplot(3, 1, 2) plt.plot(sim.trange(), sim.data[p1]) plt.plot(sim.trange(), sim.data[pd]) plt.xlim(0, sim.trange()[-1]) plt.subplot(3, 1, 3) plt.plot(sim.trange(), sim.data[pn]) plt.xlim(0, sim.trange()[-1]) plt.show() """ Explanation: Now let's re-create Figure 2 End of explanation """ import pytry class SEMDTrial(pytry.PlotTrial): def params(self): self.param('trigger weight', w_trig=1.0) self.param('facilitation weight', w_fac=1.0) self.param('time delay between facilitation spike and trigger spike', dt=0) self.param('facilitation synapse', syn_fac=0.01) self.param('trigger synapse', syn_trig=0.005) def evaluate(self, p, plt): model = nengo.Network() with model: stim1 = nengo.Node(lambda t: 1000 if int(t/0.001)==100 else 0) stim2 = nengo.Node(lambda t: 1000 if int((t-p.dt)/0.001)==100 else 0) dendrite = nengo.Node(lambda t, x: x[0]*x[1], size_in=2) nengo.Connection(stim1, dendrite[0], transform=p.w_fac, synapse=p.syn_fac) nengo.Connection(stim2, dendrite[1], transform=p.w_trig, synapse=None) ens = nengo.Ensemble(n_neurons=1, dimensions=1, gain=np.ones(1), bias=np.zeros(1)) nengo.Connection(dendrite, ens.neurons, synapse=p.syn_trig) pn = nengo.Probe(ens.neurons) sim = nengo.Simulator(model, progress_bar=False) with sim: sim.run(0.1+p.dt+0.2) if plt: plt.plot(sim.trange(), sim.data[pn]) # neuron output plt.axvline(0.1, color='g') # facilitation spike plt.axvline(0.1+p.dt, color='b') # trigger spike spike_count = np.sum(sim.data[pn])/1000 return dict(spike_count=spike_count) SEMDTrial().run(plt=True, dt=0.02) """ Explanation: Now let's see what the performance is as we vary different parameters. To do this, I'm using pytry, a simple Python package for running experiments and gathering data. (You can install it with pip install pytry) End of explanation """ dts = (np.arange(99)+1)*0.001 for dt in dts: SEMDTrial().run(verbose=False, dt=dt, data_dir='exp2') """ Explanation: Now let's see how the spike count varies as we adjust dt. We run the experiment varying dt and it will save data in a directory called exp2. End of explanation """ df = pandas.DataFrame(pytry.read('exp2')) seaborn.lineplot('dt', 'spike_count', data=df) """ Explanation: And we can now plot the data. End of explanation """ dts = (np.arange(0,100,5)+1)*0.001 ws = [0.1, 0.2, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0] for dt in dts: for w_fac in ws: SEMDTrial().run(verbose=False, dt=dt, w_fac=w_fac, data_dir='exp3') df = pandas.DataFrame(pytry.read('exp3')) plt.figure(figsize=(14,7)) seaborn.pointplot('dt', 'spike_count', hue='w_fac', data=df) plt.xticks(range(len(dts)), ['%g'%x for x in dts], rotation='vertical') plt.show() """ Explanation: That looks great! Now let's try varying w_fac (the weight for the facilitation input). End of explanation """ dts = (np.arange(0,100,5)+1)*0.001 ws = [0.1, 0.2, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0] for dt in dts: for w_trig in ws: SEMDTrial().run(verbose=False, dt=dt, w_trig=w_trig, data_dir='exp4') df = pandas.DataFrame(pytry.read('exp4')) plt.figure(figsize=(14,7)) seaborn.pointplot('dt', 'spike_count', hue='w_trig', data=df) plt.xticks(range(len(dts)), ['%g'%x for x in dts], rotation='vertical') plt.show() """ Explanation: And let's also check varying w_trig. This should give the identical results as varying w_fac, since they are just multiplied together. End of explanation """ dts = (np.arange(0,100,5)+1)*0.001 syns = [0.001, 0.002, 0.005, 0.1, 0.2] syns = [0.01, 0.02, 0.05] for dt in dts: for syn_trig in syns: SEMDTrial().run(verbose=False, dt=dt, syn_trig=syn_trig, data_dir='exp5') df = pandas.DataFrame(pytry.read('exp5')) plt.figure(figsize=(14,7)) seaborn.pointplot('dt', 'spike_count', hue='syn_trig', data=df) plt.xticks(range(len(dts)), ['%g'%x for x in dts], rotation='vertical') plt.show() """ Explanation: Now let's vary the time constant for the trigger synapse. End of explanation """ dts = (np.arange(0,100,5)+1)*0.001 syns = [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2] for dt in dts: for syn_fac in syns: SEMDTrial().run(verbose=False, dt=dt, syn_fac=syn_fac, data_dir='exp6') df = pandas.DataFrame(pytry.read('exp6')) plt.figure(figsize=(14,7)) seaborn.pointplot('dt', 'spike_count', hue='syn_fac', data=df) plt.xticks(range(len(dts)), ['%g'%x for x in dts], rotation='vertical') plt.show() """ Explanation: And finally, let's very the time constant for the facilitation synapse. End of explanation """
GoogleCloudPlatform/asl-ml-immersion
notebooks/tfx_pipelines/guided_projects/guided_project_1.ipynb
apache-2.0
import os """ Explanation: Guided Project 1 Learning Objectives: Learn how to generate a standard TFX template pipeline using tfx template Learn how to modify and run a templated TFX pipeline Note: This guided project is adapted from Create a TFX pipeline using templates). End of explanation """ PATH = %env PATH %env PATH={PATH}:/home/jupyter/.local/bin %%bash LOCAL_BIN="/home/jupyter/.local/bin" SKAFFOLD_URI="https://storage.googleapis.com/skaffold/releases/latest/skaffold-linux-amd64" test -d $LOCAL_BIN || mkdir -p $LOCAL_BIN which skaffold || ( curl -Lo skaffold $SKAFFOLD_URI && chmod +x skaffold && mv skaffold $LOCAL_BIN ) """ Explanation: Step 1. Environment setup skaffold tool setup End of explanation """ !which skaffold """ Explanation: Modify the PATH environment variable so that skaffold is available: At this point, you shoud see the skaffold tool with the command which: End of explanation """ shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null GOOGLE_CLOUD_PROJECT = shell_output[0] %env GOOGLE_CLOUD_PROJECT={GOOGLE_CLOUD_PROJECT} """ Explanation: Environment variable setup In AI Platform Pipelines, TFX is running in a hosted Kubernetes environment using Kubeflow Pipelines. Let's set some environment variables to use Kubeflow Pipelines. First, get your GCP project ID. End of explanation """ ENDPOINT = # Enter your ENDPOINT here. """ Explanation: We also need to access your KFP cluster. You can access it in your Google Cloud Console under "AI Platform > Pipeline" menu. The "endpoint" of the KFP cluster can be found from the URL of the Pipelines dashboard, or you can get it from the URL of the Getting Started page where you launched this notebook. Let's create an ENDPOINT environment variable and set it to the KFP cluster endpoint. ENDPOINT should contain only the hostname part of the URL. For example, if the URL of the KFP dashboard is <a href="https://1e9deb537390ca22-dot-asia-east1.pipelines.googleusercontent.com/#/start">https://1e9deb537390ca22-dot-asia-east1.pipelines.googleusercontent.com/#/start</a>, ENDPOINT value becomes 1e9deb537390ca22-dot-asia-east1.pipelines.googleusercontent.com. End of explanation """ # Docker image name for the pipeline image. CUSTOM_TFX_IMAGE = "gcr.io/" + GOOGLE_CLOUD_PROJECT + "/tfx-pipeline" CUSTOM_TFX_IMAGE """ Explanation: Set the image name as tfx-pipeline under the current GCP project: End of explanation """ PIPELINE_NAME = "guided_project_1" PROJECT_DIR = os.path.join(os.path.expanduser("."), PIPELINE_NAME) PROJECT_DIR """ Explanation: Step 2. Copy the predefined template to your project directory. In this step, we will create a working pipeline project directory and files by copying additional files from a predefined template. You may give your pipeline a different name by changing the PIPELINE_NAME below. This will also become the name of the project directory where your files will be put. End of explanation """ !tfx template copy \ --pipeline-name={PIPELINE_NAME} \ --destination-path={PROJECT_DIR} \ --model=taxi %cd {PROJECT_DIR} """ Explanation: TFX includes the taxi template with the TFX python package. If you are planning to solve a point-wise prediction problem, including classification and regresssion, this template could be used as a starting point. The tfx template copy CLI command copies predefined template files into your project directory. End of explanation """ !python -m models.features_test !python -m models.keras.model_test """ Explanation: Step 3. Browse your copied source files The TFX template provides basic scaffold files to build a pipeline, including Python source code, sample data, and Jupyter Notebooks to analyse the output of the pipeline. The taxi template uses the Chicago Taxi dataset. Here is brief introduction to each of the Python files: pipeline - This directory contains the definition of the pipeline * configs.py — defines common constants for pipeline runners * pipeline.py — defines TFX components and a pipeline models - This directory contains ML model definitions. * features.py, features_test.py — defines features for the model * preprocessing.py, preprocessing_test.py — defines preprocessing jobs using tf::Transform models/estimator - This directory contains an Estimator based model. * constants.py — defines constants of the model * model.py, model_test.py — defines DNN model using TF estimator models/keras - This directory contains a Keras based model. * constants.py — defines constants of the model * model.py, model_test.py — defines DNN model using Keras beam_dag_runner.py, kubeflow_dag_runner.py — define runners for each orchestration engine Running the tests: You might notice that there are some files with _test.py in their name. These are unit tests of the pipeline and it is recommended to add more unit tests as you implement your own pipelines. You can run unit tests by supplying the module name of test files with -m flag. You can usually get a module name by deleting .py extension and replacing / with .. For example: End of explanation """ !tail -26 models/features_test.py """ Explanation: Let's quickly go over the structure of a test file to test Tensorflow code: End of explanation """ GCS_BUCKET_NAME = GOOGLE_CLOUD_PROJECT + "-kubeflowpipelines-default" GCS_BUCKET_NAME !gsutil mb gs://{GCS_BUCKET_NAME} """ Explanation: First of all, notice that you start by importing the code you want to test by importing the corresponding module. Here we want to test the code in features.py so we import the module features: python from models import features To implement test cases start by defining your own test class inheriting from tf.test.TestCase: python class FeaturesTest(tf.test.TestCase): Wen you execute the test file with bash python -m models.features_test the main method python tf.test.main() will parse your test class (here: FeaturesTest) and execute every method whose name starts by test. Here we have two such methods for instance: python def testNumberOfBucketFeatureBucketCount(self): def testTransformedNames(self): So when you want to add a test case, just add a method to that test class whose name starts by test. Now inside the body of these test methods is where the actual testing takes place. In this case for instance, testTransformedNames test the function features.transformed_name and makes sure it outputs what is expected. Since your test class inherits from tf.test.TestCase it has a number of helper methods you can use to help you create tests, as for instance python self.assertEqual(expected_outputs, obtained_outputs) that will fail the test case if obtained_outputs do the match the expected_outputs. Typical examples of test case you may want to implement for machine learning code would comprise test insurring that your model builds correctly, your preprocessing function preprocesses raw data as expected, or that your model can train successfully on a few mock examples. When writing tests make sure that their execution is fast (we just want to check that the code works not actually train a performant model when testing). For that you may have to create synthetic data in your test files. For more information, read the tf.test.TestCase documentation and the Tensorflow testing best practices. Step 4. Run your first TFX pipeline Components in the TFX pipeline will generate outputs for each run as ML Metadata Artifacts, and they need to be stored somewhere. You can use any storage which the KFP cluster can access, and for this example we will use Google Cloud Storage (GCS). Let us create this bucket. Its name will be &lt;YOUR_PROJECT&gt;-kubeflowpipelines-default. End of explanation """ !gsutil cp data/data.csv gs://{GCS_BUCKET_NAME}/tfx-template/data/data.csv """ Explanation: Let's upload our sample data to GCS bucket so that we can use it in our pipeline later. End of explanation """ !tfx pipeline create \ --pipeline-path=kubeflow_dag_runner.py \ --endpoint={ENDPOINT} \ --build-target-image={CUSTOM_TFX_IMAGE} """ Explanation: Let's create a TFX pipeline using the tfx pipeline create command. Note: When creating a pipeline for KFP, we need a container image which will be used to run our pipeline. And skaffold will build the image for us. Because skaffold pulls base images from the docker hub, it will take 5~10 minutes when we build the image for the first time, but it will take much less time from the second build. End of explanation """ !tfx run create --pipeline-name={PIPELINE_NAME} --endpoint={ENDPOINT} """ Explanation: While creating a pipeline, Dockerfile and build.yaml will be generated to build a Docker image. Don't forget to add these files to the source control system (for example, git) along with other source files. A pipeline definition file for argo will be generated, too. The name of this file is ${PIPELINE_NAME}.tar.gz. For example, it will be guided_project_1.tar.gz if the name of your pipeline is guided_project_1. It is recommended NOT to include this pipeline definition file into source control, because it will be generated from other Python files and will be updated whenever you update the pipeline. For your convenience, this file is already listed in .gitignore which is generated automatically. Now start an execution run with the newly created pipeline using the tfx run create command. Note: You may see the following error Error importing tfx_bsl_extension.coders. Please ignore it. Debugging tip: If your pipeline run fails, you can see detailed logs for each TFX component in the Experiments tab in the KFP Dashboard. One of the major sources of failure is permission related problems. Please make sure your KFP cluster has permissions to access Google Cloud APIs. This can be configured when you create a KFP cluster in GCP, or see Troubleshooting document in GCP. End of explanation """ # Update the pipeline !tfx pipeline update \ --pipeline-path=kubeflow_dag_runner.py \ --endpoint={ENDPOINT} # You can run the pipeline the same way. !tfx run create --pipeline-name {PIPELINE_NAME} --endpoint={ENDPOINT} """ Explanation: Or, you can also run the pipeline in the KFP Dashboard. The new execution run will be listed under Experiments in the KFP Dashboard. Clicking into the experiment will allow you to monitor progress and visualize the artifacts created during the execution run. However, we recommend visiting the KFP Dashboard. You can access the KFP Dashboard from the Cloud AI Platform Pipelines menu in Google Cloud Console. Once you visit the dashboard, you will be able to find the pipeline, and access a wealth of information about the pipeline. For example, you can find your runs under the Experiments menu, and when you open your execution run under Experiments you can find all your artifacts from the pipeline under Artifacts menu. Step 5. Add components for data validation. In this step, you will add components for data validation including StatisticsGen, SchemaGen, and ExampleValidator. If you are interested in data validation, please see Get started with Tensorflow Data Validation. Double-click to change directory to pipeline and double-click again to open pipeline.py. Find and uncomment the 3 lines which add StatisticsGen, SchemaGen, and ExampleValidator to the pipeline. (Tip: search for comments containing TODO(step 5):). Make sure to save pipeline.py after you edit it. You now need to update the existing pipeline with modified pipeline definition. Use the tfx pipeline update command to update your pipeline, followed by the tfx run create command to create a new execution run of your updated pipeline. End of explanation """ print("https://" + ENDPOINT) """ Explanation: Check pipeline outputs Visit the KFP dashboard to find pipeline outputs in the page for your pipeline run. Click the Experiments tab on the left, and All runs in the Experiments page. You should be able to find the latest run under the name of your pipeline. See link below to access the dashboard: End of explanation """ !tfx pipeline update \ --pipeline-path=kubeflow_dag_runner.py \ --endpoint={ENDPOINT} print("https://" + ENDPOINT) !tfx run create --pipeline-name {PIPELINE_NAME} --endpoint={ENDPOINT} """ Explanation: Step 6. Add components for training In this step, you will add components for training and model validation including Transform, Trainer, ResolverNode, Evaluator, and Pusher. Double-click to open pipeline.py. Find and uncomment the 5 lines which add Transform, Trainer, ResolverNode, Evaluator and Pusher to the pipeline. (Tip: search for TODO(step 6):) As you did before, you now need to update the existing pipeline with the modified pipeline definition. The instructions are the same as Step 5. Update the pipeline using tfx pipeline update, and create an execution run using tfx run create. Verify that the pipeline DAG has changed accordingly in the Kubeflow UI: End of explanation """ !tfx pipeline update \ --pipeline-path=kubeflow_dag_runner.py \ --endpoint={ENDPOINT} !tfx run create --pipeline-name {PIPELINE_NAME} --endpoint={ENDPOINT} """ Explanation: When this execution run finishes successfully, you have now created and run your first TFX pipeline in AI Platform Pipelines! Step 7. Try BigQueryExampleGen BigQuery is a serverless, highly scalable, and cost-effective cloud data warehouse. BigQuery can be used as a source for training examples in TFX. In this step, we will add BigQueryExampleGen to the pipeline. Double-click to open pipeline.py. Comment out CsvExampleGen and uncomment the line which creates an instance of BigQueryExampleGen. You also need to uncomment the query argument of the create_pipeline function. We need to specify which GCP project to use for BigQuery, and this is done by setting --project in beam_pipeline_args when creating a pipeline. Double-click to open configs.py. Uncomment the definition of GOOGLE_CLOUD_REGION, BIG_QUERY_WITH_DIRECT_RUNNER_BEAM_PIPELINE_ARGS and BIG_QUERY_QUERY. You should replace the region value in this file with the correct values for your GCP project. Note: You MUST set your GCP region in the configs.py file before proceeding Change directory one level up. Click the name of the directory above the file list. The name of the directory is the name of the pipeline which is guided_project_1 if you didn't change. Double-click to open kubeflow_dag_runner.py. Uncomment two arguments, query and beam_pipeline_args, for the create_pipeline function. Now the pipeline is ready to use BigQuery as an example source. Update the pipeline as before and create a new execution run as we did in step 5 and 6. End of explanation """ !tfx pipeline update \ --pipeline-path=kubeflow_dag_runner.py \ --endpoint={ENDPOINT} !tfx run create --pipeline-name {PIPELINE_NAME} --endpoint={ENDPOINT} """ Explanation: Step 8. Try Dataflow with KFP Several TFX Components uses Apache Beam to implement data-parallel pipelines, and it means that you can distribute data processing workloads using Google Cloud Dataflow. In this step, we will set the Kubeflow orchestrator to use dataflow as the data processing back-end for Apache Beam. Double-click pipeline to change directory, and double-click to open configs.py. Uncomment the definition of GOOGLE_CLOUD_REGION, and DATAFLOW_BEAM_PIPELINE_ARGS. Double-click to open pipeline.py. Change the value of enable_cache to False. Change directory one level up. Click the name of the directory above the file list. The name of the directory is the name of the pipeline which is guided_project_1 if you didn't change. Double-click to open kubeflow_dag_runner.py. Uncomment beam_pipeline_args. (Also make sure to comment out current beam_pipeline_args that you added in Step 7.) Note that we deliberately disabled caching. Because we have already run the pipeline successfully, we will get cached execution result for all components if cache is enabled. Now the pipeline is ready to use Dataflow. Update the pipeline and create an execution run as we did in step 5 and 6. End of explanation """ !tfx pipeline update \ --pipeline-path=kubeflow_dag_runner.py \ --endpoint={ENDPOINT} !tfx run create --pipeline-name {PIPELINE_NAME} --endpoint={ENDPOINT} """ Explanation: You can find your Dataflow jobs in Dataflow in Cloud Console. Please reset enable_cache to True to benefit from caching execution results. Double-click to open pipeline.py. Reset the value of enable_cache to True. Step 9. Try Cloud AI Platform Training and Prediction with KFP TFX interoperates with several managed GCP services, such as Cloud AI Platform for Training and Prediction. You can set your Trainer component to use Cloud AI Platform Training, a managed service for training ML models. Moreover, when your model is built and ready to be served, you can push your model to Cloud AI Platform Prediction for serving. In this step, we will set our Trainer and Pusher component to use Cloud AI Platform services. Before editing files, you might first have to enable AI Platform Training & Prediction API. Double-click pipeline to change directory, and double-click to open configs.py. Uncomment the definition of GOOGLE_CLOUD_REGION, GCP_AI_PLATFORM_TRAINING_ARGS and GCP_AI_PLATFORM_SERVING_ARGS. We will use our custom built container image to train a model in Cloud AI Platform Training, so we should set masterConfig.imageUri in GCP_AI_PLATFORM_TRAINING_ARGS to the same value as CUSTOM_TFX_IMAGE above. Change directory one level up, and double-click to open kubeflow_dag_runner.py. Uncomment ai_platform_training_args and ai_platform_serving_args. Update the pipeline and create an execution run as we did in step 5 and 6. End of explanation """
jmitz/daymetDataExtraction
daymetDataDownload.ipynb
unlicense
import urllib import os from datetime import date as dt """ Explanation: <h1>Daymet Data Download</h1> Daymet data can be extracted/downloaded in two ways. The nationwide or localized grid can be downloaded; alternately, the data for particular grid cells can be extracted through a web interface. <h2>Daymet Data Download - Nationwide Dataset</h2> <h3>Required Python libraries</h3> End of explanation """ startYear = 2016 # First year of data extraction endYear = dt.today().year # Last year of data extraction Defaults to current year. """ Explanation: <h3>Parameters</h3> Range of years to download datasets. - startYear is the first year of data - endYear is the last year of data - the default value for the last year is the current year. End of explanation """ dataDir = "..\daymet" """ Explanation: Local data file location - This is the base directory of the Daymet data which contains the Daymet Data File Structure. End of explanation """ urlBase = "http://thredds.daac.ornl.gov/thredds/fileServer/ornldaac" """ Explanation: Set up the URL template information - This information is determined by the URL structure of the Oak Ridge National Laboratory (ORLN) file server. This is how we determine the file structure. - Go to the "Daymet Data Sets List" page - In the Daymet Data Sets List there is a THREDDS column, the URL for each of the types of data (Annual, Daily, and Monthly) can be discovered. Annual - https://thredds.daac.ornl.gov/thredds/catalog/ornldaac/1343/catalog.html Daily - https://thredds.daac.ornl.gov/thredds/catalog/ornldaac/1328/catalog.html Monthly - https://thredds.daac.ornl.gov/thredds/catalog/ornldaac/1345/catalog.html One important portion of these URLs is "https://thredds.daac.ornl.gov/thredds/catalog/ornldaac/". The other important portion is the number which follows (current values Annual - 1343, Daily 1328, and Monthly - 1345.) These will need to be checked and updated as they change. End of explanation """ !conda --version import daymetFileDownload as dfd """ Explanation: <h2>Daymet Data Parameters and Time Frames</h2> <table> <tr> <th>Parameter Abbr</th><th>Data Type</th><th>Annual</th><th>Daily</th><th>Monthly</th> </tr><tr> <td>dayl</td><td>day length (s)</td><td></td><td>X</td><td></td> </tr><tr> <td>prcp</td><td>precipitation (mm/day)</td><td>X</td><td>X</td><td>X</td> </tr><tr> <td>srad</td><td>shortwave radiation (W/m<sup>2</sup>)</td><td></td><td>X</td><td></td> </tr><tr> <td>swe</td><td>snow water equivalent (kg/m<sup>2</sup>)</td><td></td><td>X</td><td></td> </tr><tr> <td>tmax</td><td>maximum temp (&deg;C)</td><td>X</td><td>X</td><td>X</td> </tr><tr> <td>tmin</td><td>minimum temp (&deg;C)</td><td>X</td><td>X</td><td>X</td> </tr><tr> <td>vp</td><td>humidity as water vapor pressure (Pa)</td><td>X</td><td>X</td><td>X</td> </tr> </table> <h2><a id='daymetDataStructure'>Daymet Data File Structure</a></h2> The following is a representation of the data structure for the Daymet data directory. The annual, monthly, and daily directories each contain directories which hold the parametric data for their identified type. Daymet Annual prcp tmax tmin vp Daily dayl prcp srad swe tmax tmin vp Monthly prcp tmax tmin vp <h2>Execute Data Download</h2> This script should only need to be executed once a year. The data files being downloaded are HUGE. A single years worth of data is about 21.5 GB. The best idea for running this script is overnight or over a weekend. This should minimize limiting Internet access for other users. The script should be executed from the python directory of the Daymet external drive. To do this: - Plug in Daymet external drive - Check which drive letter the external drive is assigned in Windows Explorer (for this example we will use F:) - Open a command prompt - Check that the appropriate version of Python is installed. - The response should look like <code>conda 4.3.21</code>. - An error will look like <code>'conda' is not recognized ....</code> - If an error occurs you will need to install Anaconda try looking in the G:\Software\Python directory for installation instructions. End of explanation """ dfd.downloadDaymet(startYear, endYear) """ Explanation: Change the begin/end date values on the following to allow for download of data. Remember each year of data requires about 21.5 GB of storage and bandwidth. Do everyone a favor and run this over a weekend or at night. If a data file has already been downloaded the system will skip to the next file. Currently the system has all the data from 1980 to 2015. End of explanation """
JakeColtman/BayesianSurvivalAnalysis
PyMC Done.ipynb
mit
running_id = 0 output = [[0]] with open("E:/output.txt") as file_open: for row in file_open.read().split("\n"): cols = row.split(",") if cols[0] == output[-1][0]: output[-1].append(cols[1]) output[-1].append(True) else: output.append(cols) output = output[1:] for row in output: if len(row) == 6: row += [datetime(2016, 5, 3, 20, 36, 8, 92165), False] output = output[1:-1] def convert_to_days(dt): day_diff = dt / np.timedelta64(1, 'D') if day_diff == 0: return 23.0 else: return day_diff df = pd.DataFrame(output, columns=["id", "advert_time", "male","age","search","brand","conversion_time","event"]) df["lifetime"] = pd.to_datetime(df["conversion_time"]) - pd.to_datetime(df["advert_time"]) df["lifetime"] = df["lifetime"].apply(convert_to_days) df["male"] = df["male"].astype(int) df["search"] = df["search"].astype(int) df["brand"] = df["brand"].astype(int) df["age"] = df["age"].astype(int) df["event"] = df["event"].astype(int) df = df.drop('advert_time', 1) df = df.drop('conversion_time', 1) df = df.set_index("id") df = df.dropna(thresh=2) df.median() ###Parametric Bayes #Shout out to Cam Davidson-Pilon ## Example fully worked model using toy data ## Adapted from http://blog.yhat.com/posts/estimating-user-lifetimes-with-pymc.html ## Note that we've made some corrections N = 2500 ##Generate some random data lifetime = pm.rweibull( 2, 5, size = N ) birth = pm.runiform(0, 10, N) censor = ((birth + lifetime) >= 10) lifetime_ = lifetime.copy() lifetime_[censor] = 10 - birth[censor] alpha = pm.Uniform('alpha', 0, 20) beta = pm.Uniform('beta', 0, 20) @pm.observed def survival(value=lifetime_, alpha = alpha, beta = beta ): return sum( (1-censor)*(log( alpha/beta) + (alpha-1)*log(value/beta)) - (value/beta)**(alpha)) mcmc = pm.MCMC([alpha, beta, survival ] ) mcmc.sample(50000, 30000) pm.Matplot.plot(mcmc) mcmc.trace("alpha")[:] """ Explanation: The first step in any data analysis is acquiring and munging the data Our starting data set can be found here: http://jakecoltman.com in the pyData post It is designed to be roughly similar to the output from DCM's path to conversion Download the file and transform it into something with the columns: id,lifetime,age,male,event,search,brand where lifetime is the total time that we observed someone not convert for and event should be 1 if we see a conversion and 0 if we don't. Note that all values should be converted into ints It is useful to note that end_date = datetime.datetime(2016, 5, 3, 20, 36, 8, 92165) End of explanation """ censor = np.array(df["event"].apply(lambda x: 0 if x else 1).tolist()) alpha = pm.Uniform("alpha", 0,50) beta = pm.Uniform("beta", 0,50) @pm.observed def survival(value=df["lifetime"], alpha = alpha, beta = beta ): return sum( (1-censor)*(np.log( alpha/beta) + (alpha-1)*np.log(value/beta)) - (value/beta)**(alpha)) mcmc = pm.MCMC([alpha, beta, survival ] ) mcmc.sample(10000) def weibull_median(alpha, beta): return beta * ((log(2)) ** ( 1 / alpha)) plt.hist([weibull_median(x[0], x[1]) for x in zip(mcmc.trace("alpha"), mcmc.trace("beta"))]) """ Explanation: Problems: 1 - Try to fit your data from section 1 2 - Use the results to plot the distribution of the median Note that the media of a Weibull distribution is: $$β(log 2)^{1/α}$$ End of explanation """ censor = np.array(df["event"].apply(lambda x: 0 if x else 1).tolist()) alpha = pm.Uniform("alpha", 0,50) beta = pm.Uniform("beta", 0,50) @pm.observed def survival(value=df["lifetime"], alpha = alpha, beta = beta ): return sum( (1-censor)*(np.log( alpha/beta) + (alpha-1)*np.log(value/beta)) - (value/beta)**(alpha)) mcmc = pm.MCMC([alpha, beta, survival ] ) mcmc.sample(10000, burn = 3000, thin = 20) pm.Matplot.plot(mcmc) #Solution to Q5 ## Adjusting the priors impacts the overall result ## If we give a looser, less informative prior then we end up with a broader, shorter distribution ## If we give much more informative priors, then we get a tighter, taller distribution censor = np.array(df["event"].apply(lambda x: 0 if x else 1).tolist()) ## Note the narrowing of the prior alpha = pm.Normal("alpha", 1.7, 10000) beta = pm.Normal("beta", 18.5, 10000) ####Uncomment this to see the result of looser priors ## Note this ends up pretty much the same as we're already very loose #alpha = pm.Uniform("alpha", 0, 30) #beta = pm.Uniform("beta", 0, 30) @pm.observed def survival(value=df["lifetime"], alpha = alpha, beta = beta ): return sum( (1-censor)*(np.log( alpha/beta) + (alpha-1)*np.log(value/beta)) - (value/beta)**(alpha)) mcmc = pm.MCMC([alpha, beta, survival ] ) mcmc.sample(10000, burn = 5000, thin = 20) pm.Matplot.plot(mcmc) #plt.hist([weibull_median(x[0], x[1]) for x in zip(mcmc.trace("alpha"), mcmc.trace("beta"))]) """ Explanation: Problems: 4 - Try adjusting the number of samples for burning and thinnning 5 - Try adjusting the prior and see how it affects the estimate End of explanation """ medians = [weibull_median(x[0], x[1]) for x in zip(mcmc.trace("alpha"), mcmc.trace("beta"))] testing_value = 14.9 number_of_greater_samples = sum([x >= testing_value for x in medians]) 100 * (number_of_greater_samples / len(medians)) """ Explanation: Problems: 7 - Try testing whether the median is greater than a different values End of explanation """ ### Fit a cox proprtional hazards model """ Explanation: If we want to look at covariates, we need a new approach. We'll use Cox proprtional hazards, a very popular regression model. To fit in python we use the module lifelines: http://lifelines.readthedocs.io/en/latest/ End of explanation """ #### Plot baseline hazard function #### Predict #### Plot survival functions for different covariates #### Plot some odds """ Explanation: Once we've fit the data, we need to do something useful with it. Try to do the following things: 1 - Plot the baseline survival function 2 - Predict the functions for a particular set of features 3 - Plot the survival function for two different set of features 4 - For your results in part 3 caculate how much more likely a death event is for one than the other for a given period of time End of explanation """ #### BMA Coefficient values #### Different priors """ Explanation: Model selection Difficult to do with classic tools (here) Problem: 1 - Calculate the BMA coefficient values 2 - Try running with different priors End of explanation """
spulido99/Programacion
Camilo/Taller 2 - Archivos y Bases de Datos.ipynb
mit
import pandas as pd DF = pd.read_csv('../data/alternative.tsv', sep='\t') DF """ Explanation: Archivos y Bases de datos La idea de este taller es manipular archivos (leerlos, parsearlos y escribirlos) y hacer lo mismo con bases de datos estructuradas. Ejercicio 1 Baje el archivo de "All associations with added ontology annotations" del GWAS Catalog. + https://www.ebi.ac.uk/gwas/docs/file-downloads Describa las columnas del archivo (que información estamos mirando? Para qué sirve? Por qué la hicieron?) End of explanation """ import mysql.connector conn = mysql.connector.Connect(host='127.0.0.1',user='root',\ password='gaspar',database='programacion') c = conn.cursor() c.execute("""insert into enfermedad values (3, "Psoriasis", "psoriasis", "http://www.ebi.ac.uk/efo/EFO_0000676" )""") conn.commit() c.execute ("select * from enfermedad") for row in c: print (row) c.close() conn.close() """ Explanation: Qué Entidades (tablas) puede definir? -Entidades intermedias -Modelos de entidad y relación -llaves foraneas (lineas que conectan entidades) -como desde python meter datos en mysql End of explanation """
zambzamb/zpic
python/O-X Waves.ipynb
agpl-3.0
import em1ds as zpic electrons = zpic.Species( "electrons", -1.0, ppc = 64, uth=[0.005,0.005,0.005]) sim = zpic.Simulation( nx = 1000, box = 100.0, dt = 0.05, species = electrons ) #Bz0 = 0.5 Bz0 = 1.0 #Bz0 = 4.0 sim.emf.set_ext_fld('uniform', B0= [0.0, 0.0, Bz0]) """ Explanation: Waves in magnetized Plasmas: O-waves and X-waves To study electromagnetic waves in a magnetized plasma, in particular polarized either along the applied magnetic fields (O-waves) or perpendicular to it (X-waves) we initialize the simulation with a uniform thermal plasma, effectively injecting waves of all possible wavelengths into the simulation. The external magnetic field is applied along the z direction, and can be controlled through the Bz0 variable: End of explanation """ import numpy as np niter = 1000 Ey_t = np.zeros((niter,sim.nx)) Ez_t = np.zeros((niter,sim.nx)) print("\nRunning simulation up to t = {:g} ...".format(niter * sim.dt)) while sim.n < niter: print('n = {:d}, t = {:g}'.format(sim.n,sim.t), end = '\r') Ey_t[sim.n,:] = sim.emf.Ey Ez_t[sim.n,:] = sim.emf.Ez sim.iter() print("\nDone.") """ Explanation: We run the simulation up to a fixed number of iterations, controlled by the variable niter, storing the value of the EM fields $E_y$ (X-wave) and $E_z$ (O-wave) at every timestep so we can analyze them later: End of explanation """ import matplotlib.pyplot as plt iter = sim.n//2 plt.plot(np.linspace(0, sim.box, num = sim.nx),Ez_t[iter,:], label = "$E_z$") plt.plot(np.linspace(0, sim.box, num = sim.nx),Ey_t[iter,:], label = "$E_y$") plt.grid(True) plt.xlabel("$x_1$ [$c/\omega_n$]") plt.ylabel("$E$ field []") plt.title("$E_z$, $E_y$, t = {:g}".format( iter * sim.dt)) plt.legend() plt.show() """ Explanation: EM Waves As discussed above, the simulation was initialized with a broad spectrum of waves through the thermal noise of the plasma. We can see the noisy fields in the plot below: End of explanation """ import matplotlib.pyplot as plt import matplotlib.colors as colors # (omega,k) power spectrum win = np.hanning(niter) for i in range(sim.nx): Ez_t[:,i] *= win sp = np.abs(np.fft.fft2(Ez_t))**2 sp = np.fft.fftshift( sp ) k_max = np.pi / sim.dx omega_max = np.pi / sim.dt plt.imshow( sp, origin = 'lower', norm=colors.LogNorm(vmin = 1e-4, vmax = 0.1), extent = ( -k_max, k_max, -omega_max, omega_max ), aspect = 'auto', cmap = 'gray') plt.colorbar().set_label('$|FFT(E_z)|^2$') # Theoretical prediction k = np.linspace(-k_max, k_max, num = 512) plt.plot( k, np.sqrt( 1 + k**2), label = "theoretical", ls = "--" ) plt.ylim(0,12) plt.xlim(0,12) plt.xlabel("$k$ [$\omega_n/c$]") plt.ylabel("$\omega$ [$\omega_n$]") plt.title("O-Wave dispersion relation") plt.legend() plt.show() """ Explanation: O-Wave To analyze the dispersion relation of the O-waves we use a 2D (Fast) Fourier transform of $E_z(x,t)$ field values that we stored during the simulation. The plot below shows the obtained power spectrum, alongside with the theoretical prediction for the dispersion relation (in simulation units): $\omega = \sqrt{(1 + k^2)}$ Since the dataset is not periodic along $t$ we apply a windowing technique (Hanning) to the dataset to lower the background spectrum, and make the dispersion relation more visible. End of explanation """ import matplotlib.pyplot as plt import matplotlib.colors as colors win = np.hanning(niter) for i in range(sim.nx): Ey_t[:,i] *= win k_max = np.pi / sim.dx omega_max = np.pi / sim.dt sp = np.abs( np.fft.fft2(Ey_t))**2 sp = np.fft.fftshift( sp ) plt.imshow( sp, origin = 'lower', norm=colors.LogNorm(vmin = 1e-4, vmax = 0.1), extent = ( -k_max, k_max, -omega_max, omega_max ), aspect = 'auto', cmap = 'gray') plt.colorbar().set_label('$|FFT(E_y)|^2$') k = np.linspace(-k_max, k_max, num = 512) wa=np.sqrt((k**2+Bz0**2+2-np.sqrt(k**4-2*k**2*Bz0**2+Bz0**4+4*Bz0**2))/2) wb=np.sqrt((k**2+Bz0**2+2+np.sqrt(k**4-2*k**2*Bz0**2+Bz0**4+4*Bz0**2))/2) plt.plot( k,wb, label = 'theoretical $\omega_+$', color = 'r', ls = "--" ) plt.plot( k,wa, label = 'theoretical $\omega_-$', color = 'b', ls = "--" ) plt.xlabel("$k$ [$\omega_n/c$]") plt.ylabel("$\omega$ [$\omega_n$]") plt.title("X-wave dispersion relation") plt.legend() plt.ylim(0,12) plt.xlim(0,12) plt.show() """ Explanation: X-wave To analyze the dispersion relation of the O-waves we use a 2D (Fast) Fourier transform of $E_y(x,t)$ field values that we stored during the simulation. The theoretical prediction has 2 branches: $\omega = 0$ Since the dataset is not periodic along $t$ we apply a windowing technique (Hanning) to the dataset to lower the background spectrum, and make the dispersion relation more visible. End of explanation """
regata/dbda2e_py
chapters/4.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt plt.style.use('ggplot') import numpy as np np.random.seed(47405) N = 500 # Specify the total number of flips, denoted N. p_heads = 0.5 # Specify underlying probability of heads. # Flip a coin N times and compute the running proportion of heads at each flip. # Generate a random sample of N flips (heads=1, tails=0): flip_sequence = np.random.choice([0,1], p=[1-p_heads,p_heads], size=N) # Compute the running proportion of heads: r = np.cumsum(flip_sequence) # Cumulative sum: Number of heads at each step. n = np.linspace(1, N, num=N) # Number of flips at each step. run_prop = r / n # Component by component division. # Display the beginning of the flip sequence: np.array(['T', 'H'])[flip_sequence[:10]].tolist() # Display the relative frequency at the end of the sequence. run_prop[-1] # Graph the running proportion: plt.figure(figsize=(10,6)) plt.plot(n, run_prop, marker='.') plt.hlines(p_heads, n[0], n[-1], linestyle='dashed') plt.ylim([0.0, 1.0]) plt.xscale('log') plt.xlim(n[0], n[-1]) plt.title('Running Proportion of Heads') plt.xlabel('Flip Number') plt.ylabel('Proportion Heads') plt.show() """ Explanation: What Is This Stu Called Probability? 4.5. Appendix: Code for Figure 4.1 Exercise 4.1 Exercise 4.2 Exercise 4.3 Exercise 4.4 Exercise 4.5 Exercise 4.6 4.5. Appendix: Code for Figure 4.1 Python version of RunningProportion.R End of explanation """ import pandas as pd pd.options.display.float_format = '{:.2f}'.format # # load and preview data df = pd.read_csv('../datasets/HairEyeColor.csv') df.head() eye_hair_freq = df.groupby(['Eye', 'Hair']).sum() # Sum across sex # p(eye, hair) eye_hair_prop = eye_hair_freq / eye_hair_freq.sum() # joint proportions, Table 4.1 eye_hair_prop.rename(columns={'Freq': 'Prop'}, inplace=True) eye_hair_prop hair_freq = df.groupby('Hair').sum() # Sum across sex and eye # p(hair) hair_prop = hair_freq / hair_freq.sum() # marginal proportions, Table 4.1 hair_prop.rename(columns={'Freq': 'Prop'}, inplace=True) hair_prop eye_freq = df.groupby('Eye').sum() # Sum across sex and hair # p(eye) eye_prop = eye_freq / eye_freq.sum() # marginal proportions, Table 4.1 eye_prop.rename(columns={'Freq': 'Prop'}, inplace=True) eye_prop # probabilities of the hair colors given Blue eyes, p(hair|Blue eyes) eye_hair_prop.loc['Blue'] / eye_prop.loc['Blue'] # conditional prob, Table 4.2 # probabilities of the hair colors given Brown eyes, p(hair|brown eyes) eye_hair_prop.loc['Brown'] / eye_prop.loc['Brown'] # probabilities of the eye colors given Brown hair, p(eyes|brown hair) brown_hair_eye_prop = eye_hair_prop.xs('Brown', level=1) brown_hair_eye_prop / hair_prop.loc['Brown'] """ Explanation: Exercise 4.1 Purpose: To gain experience with the apply function in R, while dealing with a concrete example of computing conditional probabilities. End of explanation """ values = ['9', '10', 'J', 'Q', 'K', 'A'] suits = ['♠︎', '♣︎', '♥︎', '♦︎'] n_suits = len(suits) n_values = len(values) n_cards = 48 """ Explanation: Exercise 4.2 Purpose: To give you some experience with random number generation in R. Set p_heads = 0.8 in 4.5. Appendix: Code for Figure 4.1 and re-run the section. Exercise 4.3 Purpose: To have you work through an example of the logic presented in Section 4.2.1.2. End of explanation """ n_10 = 2 * n_suits p_10 = n_10 / n_cards p_10 """ Explanation: Part A End of explanation """ n_J = 2 * n_suits p_10_or_J = (n_10 + n_J) / n_cards p_10_or_J """ Explanation: Part B End of explanation """ x, dx = np.linspace(0, 1, 50, retstep=True) p = lambda x: 6 * x * (1 - x) # p(x) y = p(x) """ Explanation: Exercise 4.4 Purpose: To give you hands-on experience with a simple probability density function, in R and in calculus, and to reemphasize that density functions can have values larger than 1. End of explanation """ plt.plot(x, y) plt.vlines(x, 0, y) plt.xlabel('x') plt.ylabel('p(x)') plt.title('Probability Density, p(x)') plt.show() area = sum(y * dx) area """ Explanation: Part A End of explanation """ meanval = 0.0 # Specify mean of distribution. sdval = 0.2 # Specify standard deviation of distribution. xlow = meanval - sdval # Specify low end of x-axis. xhigh = meanval + sdval # Specify high end of x-axis. x, dx = np.linspace(xlow, xhigh, 50, retstep=True) p = lambda x: ( 1/(sdval*np.sqrt(2*np.pi)) ) * np.exp( -((x-meanval)/sdval)**2/2 ) y = p(x) plt.plot(x, y) plt.vlines(x, 0, y) plt.xlim([x[0], x[-1]]) plt.xlabel('x') plt.ylabel('p(x)') plt.title('Probability Density, p(x)') plt.show() area = sum(y * dx) area """ Explanation: Part B p(x) = 6*x*(1 - x) = 6*x - 6*x^2 p(x) = f'(x) f(x) = 6*x^2/2 - 6*x^3/3 = 3x^2 - 2x^3 f(1) - f(0) = 3 - 2 = 1 Part C f(x) = 0; x - ? 3 * x^2 - 2 * x^3 = 0 3 - 2 * x = 0 x = 3/2 = 1.5 Exercise 4.5 Purpose: To have you use a normal curve to describe beliefs. It’s also handy to know the area under the normal curve between μ and σ. Part A End of explanation """ meanval = 162 sdval = (177 - 147) / 2 sdval """ Explanation: Part B End of explanation """ # p(food|grade) p_food_grade = [ [0.3, 0.6, 0.1], [0.6, 0.3, 0.1], [0.3, 0.1, 0.6] ] p_food_grade = pd.DataFrame(p_food_grade, index=['grade 1st','grade 6th', 'grade 11th'], columns=['Ice cream', 'Fruit', 'French fries']) p_food_grade # p(grade) p_grade = pd.Series([0.2, 0.2, 0.6], index=['grade 1st','grade 6th', 'grade 11th']) p_grade # p(grade, food) p_join = p_food_grade.copy() p_join['Ice cream'] = p_food_grade['Ice cream'] * p_grade p_join['Fruit'] = p_food_grade['Fruit'] * p_grade p_join['French fries'] = p_food_grade['French fries'] * p_grade p_join p_join.values.sum() """ Explanation: Exercise 4.6 Purpose: Recognize and work with the fact that Equation 4.9 can be solved for the joint probability, which will be crucial for developing Bayes’ theorem. End of explanation """
HNoorazar/PyOpinionGame
Famous_Models.ipynb
gpl-3.0
import numpy as np import pandas as pd from pandas import Series, DataFrame import matplotlib.pyplot as plt import matplotlib.animation as animation import matplotlib.image as mpimg from matplotlib import rcParams import seaborn as sb """ Explanation: Famous Opinion Dynamic Models End of explanation """ def converged_test_stochastic(matrix, threshold=0.0000001): localMatrix = np.copy(matrix).astype(float); e1 = sum(abs(np.sum(localMatrix , axis = 0) - 1)); e2 = sum(abs(np.sum(localMatrix , axis = 1) - 1)); return (e2) > threshold def one_step_stochastic(matrix): """ Here we will do one step towards Making a given matrix a bio-stochastic one It does what OneStep does """ # copy the input so that the original input is not changed. localMatrix = np.copy(matrix).astype(float); # Divide each row by sum of the entries in the given row. localMatrix = np.dot(np.diag(1/np.sum(localMatrix, axis=1)), localMatrix); return localMatrix def make_stochastic(matrix): localMatrix = np.copy(matrix).astype(float); while (converged_test_stochastic(localMatrix)): localMatrix = one_step_stochastic(localMatrix); return localMatrix """ Explanation: DeGroot Model Convert to row stochastic Matrix This code uses the method of the Sinkhorn paper: Sinkhorn, R. (1964). A relationship between arbitrary positive matrices and doubly stochastic matrices. The Annals of Mathematical Statistics, 35(2):876–879.) End of explanation """ np.random.seed(100) pop_size = 10 no_time_steps = 100 # initial opinion of agents at time t=0 initial_opinions = abs(np.random.randn(pop_size,)) # Build the trust matrix (weighted adjacency matrix) trust_matrix = abs(np.random.randn(pop_size,pop_size)) trust_matrix = make_stochastic(trust_matrix) # Initialize history of evolution r_evolution_of_opinions = np.zeros((pop_size, no_time_steps)) r_evolution_of_opinions[:,0] = initial_opinions # Do the game for time_step in xrange(1,no_time_steps): r_evolution_of_opinions[:,time_step] = np.dot(trust_matrix, r_evolution_of_opinions[:,time_step-1]) fig = plt.figure() plt.plot(r_evolution_of_opinions[:,:20].T) plt.xlabel('Time') plt.ylabel('Opinionds') plt.title('Evolution of Opinions') plt.show() """ Explanation: An Example End of explanation """ pop_size = 3 no_time_steps = 100 # initial opinion of agents at time t=0 initial_opinions = abs(np.random.randn(pop_size,)) # Build the trust matrix (weighted adjacency matrix) trust = np.array([[0, .5, .5],[1. , 0., 0.], [1., 0., 0.]]) # Initialize history of evolution non_conv_evolution = np.zeros((pop_size, no_time_steps)) non_conv_evolution[:,0] = initial_opinions # Do the game for time_step in xrange(1,no_time_steps): non_conv_evolution[:,time_step] = np.dot(trust, non_conv_evolution[:,time_step-1]) fig1 = plt.figure() plt.plot(non_conv_evolution[:,:20].T) plt.xlabel('Time') plt.ylabel('Opinionds') plt.title('Evolution of Opinions') plt.show() fig2 = plt.figure() plt.plot(non_conv_evolution.T) plt.xlabel('Time') plt.ylabel('Opinionds') plt.title('Evolution of Opinions (DeGroot)') plt.show() """ Explanation: Non Convergent Example End of explanation """
DB2-Samples/db2jupyter
v1/Db2 11 Time and Date Functions.ipynb
apache-2.0
%run db2.ipynb """ Explanation: <a id="top"></a> Db2 11 Time and Date Functions There are plenty of new date and time functions found in Db2 11. These functions allow you to extract portions from a date and format the date in a variety of different ways. While Db2 already has a number of date and time functions, these new functions allow for greater compatibility with other database implementations, making it easier to port to DB2.ion. End of explanation """ %sql VALUES NOW """ Explanation: Table of Contents Extract Function DATE_PART Function DATE_TRUNC Function Extracting Specific Days from a Month Date Addition Extracting Weeks, Months, Quarters, and Years Next Day Function Between Date/Time Functions Months Between Date Duration Overlaps Predicate UTC Time Conversions Back to Top <a id='extract'></a> Extract Function The EXTRACT function extracts and element from a date/time value. The syntax of the EXTRACT command is: <pre> EXTRACT( element FROM expression ) </pre> This is a slightly different format from most functions that you see in the DB2. Element must be one of the following values: |Element Name | Description |:---------------- | :----------------------------------------------------------------------------------------- |EPOCH | Number of seconds since 1970-01-01 00:00:00.00. The value can be positive or negative. |MILLENNIUM(S) | The millennium is to be returned. |CENTURY(CENTURIES)| The number of full 100-year periods represented by the year. |DECADE(S) | The number of full 10-year periods represented by the year. |YEAR(S) | The year portion is to be returned. |QUARTER | The quarter of the year (1 - 4) is to be returned. |MONTH | The month portion is to be returned. |WEEK | The number of the week of the year (1 - 53) that the specified day is to be returned. |DAY(S) | The day portion is to be returned. |DOW | The day of the week that is to be returned. Note that "1" represents Sunday. |DOY | The day (1 - 366) of the year that is to be returned. |HOUR(S) | The hour portion is to be returned. |MINUTE(S) | The minute portion is to be returned. |SECOND(S) | The second portion is to be returned. |MILLISECOND(S) | The second of the minute, including fractional parts to one thousandth of a second |MICROSECOND(S) | The second of the minute, including fractional parts to one millionth of a second The synonym NOW is going to be used in the next example. NOW is a synonym for CURRENT TIMESTAMP. End of explanation """ %%sql -a WITH DATES(FUNCTION, RESULT) AS ( VALUES ('EPOCH', EXTRACT( EPOCH FROM NOW )), ('MILLENNIUM(S)', EXTRACT( MILLENNIUM FROM NOW )), ('CENTURY(CENTURIES)', EXTRACT( CENTURY FROM NOW )), ('DECADE(S)', EXTRACT( DECADE FROM NOW )), ('YEAR(S)', EXTRACT( YEAR FROM NOW )), ('QUARTER', EXTRACT( QUARTER FROM NOW )), ('MONTH', EXTRACT( MONTH FROM NOW )), ('WEEK', EXTRACT( WEEK FROM NOW )), ('DAY(S)', EXTRACT( DAY FROM NOW )), ('DOW', EXTRACT( DOW FROM NOW )), ('DOY', EXTRACT( DOY FROM NOW )), ('HOUR(S)', EXTRACT( HOURS FROM NOW )), ('MINUTE(S)', EXTRACT( MINUTES FROM NOW )), ('SECOND(S)', EXTRACT( SECONDS FROM NOW )), ('MILLISECOND(S)', EXTRACT( MILLISECONDS FROM NOW )), ('MICROSECOND(S)', EXTRACT( MICROSECONDS FROM NOW )) ) SELECT FUNCTION, CAST(RESULT AS BIGINT) FROM DATES """ Explanation: This SQL will return every possible extract value from the current date.the SQL standard. End of explanation """ %%sql -a WITH DATES(FUNCTION, RESULT) AS ( VALUES ('EPOCH', DATE_PART('EPOCH' ,NOW )), ('MILLENNIUM(S)', DATE_PART('MILLENNIUM' ,NOW )), ('CENTURY(CENTURIES)', DATE_PART('CENTURY' ,NOW )), ('DECADE(S)', DATE_PART('DECADE' ,NOW )), ('YEAR(S)', DATE_PART('YEAR' ,NOW )), ('QUARTER', DATE_PART('QUARTER' ,NOW )), ('MONTH', DATE_PART('MONTH' ,NOW )), ('WEEK', DATE_PART('WEEK' ,NOW )), ('DAY(S)', DATE_PART('DAY' ,NOW )), ('DOW', DATE_PART('DOW' ,NOW )), ('DOY', DATE_PART('DOY' ,NOW )), ('HOUR(S)', DATE_PART('HOURS' ,NOW )), ('MINUTE(S)', DATE_PART('MINUTES' ,NOW )), ('SECOND(S)', DATE_PART('SECONDS' ,NOW )), ('MILLISECOND(S)', DATE_PART('MILLISECONDS' ,NOW )), ('MICROSECOND(S)', DATE_PART('MICROSECONDS' ,NOW )) ) SELECT FUNCTION, CAST(RESULT AS BIGINT) FROM DATES; """ Explanation: Back to Top <a id='part'></a> DATE_PART Function DATE_PART is similar to the EXTRACT function but it uses the more familiar syntax: <pre> DATE_PART(element, expression) </pre> In the case of the function, the element must be placed in quotes, rather than as a keyword in the EXTRACT function. in addition, the DATE_PART always returns a BIGINT, while the EXTRACT function will return a different data type depending on the element being returned. For instance, compare the SECONDs option for both functions. In the case of EXTRACT you get a DECIMAL result while for the DATE_PART you get a truncated BIGINT. End of explanation """ %%sql -a WITH DATES(FUNCTION, RESULT) AS ( VALUES ('MILLENNIUM(S)', DATE_TRUNC('MILLENNIUM' ,NOW )), ('CENTURY(CENTURIES)', DATE_TRUNC('CENTURY' ,NOW )), ('DECADE(S)', DATE_TRUNC('DECADE' ,NOW )), ('YEAR(S)', DATE_TRUNC('YEAR' ,NOW )), ('QUARTER', DATE_TRUNC('QUARTER' ,NOW )), ('MONTH', DATE_TRUNC('MONTH' ,NOW )), ('WEEK', DATE_TRUNC('WEEK' ,NOW )), ('DAY(S)', DATE_TRUNC('DAY' ,NOW )), ('HOUR(S)', DATE_TRUNC('HOURS' ,NOW )), ('MINUTE(S)', DATE_TRUNC('MINUTES' ,NOW )), ('SECOND(S)', DATE_TRUNC('SECONDS' ,NOW )), ('MILLISECOND(S)', DATE_TRUNC('MILLISECONDS' ,NOW )), ('MICROSECOND(S)', DATE_TRUNC('MICROSECONDS' ,NOW )) ) SELECT FUNCTION, RESULT FROM DATES """ Explanation: Back to Top <a id='trunc'></a> DATE_TRUNC Function DATE_TRUNC computes the same results as the DATE_PART function but then truncates the value down. Note that not all values can be truncated. The function syntax is: <pre> DATE_TRUNC(element, expression) </pre> The element must be placed in quotes, rather than as a keyword in the EXTRACT function. Note that DATE_TRUNC always returns a BIGINT. The elements that can be truncated are: |Element Name |Description |:---------------- |:------------------------------------------------------------------------------ |MILLENNIUM(S) |The millennium is to be returned. |CENTURY(CENTURIES) |The number of full 100-year periods represented by the year. |DECADE(S) |The number of full 10-year periods represented by the year. |YEAR(S) |The year portion is to be returned. |QUARTER |The quarter of the year (1 - 4) is to be returned. |MONTH |The month portion is to be returned. |WEEK |The number of the week of the year (1 - 53) that the specified day is to be returned. |DAY(S) |The day portion is to be returned. |HOUR(S) |The hour portion is to be returned. |MINUTE(S) |The minute portion is to be returned. |SECOND(S) |The second portion is to be returned. |MILLISECOND(S) |The second of the minute, including fractional parts to one thousandth of a second |MICROSECOND(S) |The second of the minute, including fractional parts to one millionth of a secondry data types. End of explanation """ %sql VALUES NOW """ Explanation: Back to Top <a id='month'></a> Extracting Specfic Days from a Month There are three functions that retrieve day information from a date. These functions include: DAYOFMONTH - returns an integer between 1 and 31 that represents the day of the argument FIRST_DAY - returns a date or timestamp that represents the first day of the month of the argument DAYS_TO_END_OF_MONTH - returns the number of days to the end of the month This is the current date so that you know what all of the calculations are based on. End of explanation """ %sql VALUES DAYOFMONTH(NOW) """ Explanation: This expression (DAYOFMONTH) returns the day of the month. End of explanation """ %sql VALUES FIRST_DAY(NOW) """ Explanation: FIRST_DAY will return the first day of the month. You could probably compute this with standard SQL date functions, but it is a lot easier just to use this builtin function. End of explanation """ %sql VALUES DAYS_TO_END_OF_MONTH(NOW) """ Explanation: Finally, DAYS_TO_END_OF_MOTNH will return the number of days to the end of the month. A Zero would be returned if you are on the last day of the month. End of explanation """ %%sql WITH DATES(FUNCTION, RESULT) AS ( VALUES ('CURRENT DATE ',NOW), ('ADD_YEARS ',ADD_YEARS(NOW,1)), ('ADD_MONTHS ',ADD_MONTHS(NOW,1)), ('ADD_DAYS ',ADD_DAYS(NOW,1)), ('ADD_HOURS ',ADD_HOURS(NOW,1)), ('ADD_MINUTES ',ADD_MINUTES(NOW,1)), ('ADD_SECONDS ',ADD_SECONDS(NOW,1)) ) SELECT * FROM DATES """ Explanation: Back to Top <a id='add'></a> Date Addition Functions The date addition functions will add or subtract days from a current timestamp. The functions that are available are: ADD_YEARS - Add years to a date ADD_MONTHS - Add months to a date ADD_DAYS - Add days to a date ADD_HOURS - Add hours to a date ADD_MINUTES - Add minutes to a date ADD_SECONDS - Add seconds to a date The format of the function is: <pre> ADD_DAYS ( expression, numeric expression ) </pre> The following SQL will add one "unit" to the current date. End of explanation """ %%sql WITH DATES(FUNCTION, RESULT) AS ( VALUES ('CURRENT DATE ',NOW), ('ADD_YEARS ',ADD_YEARS(NOW,-1)), ('ADD_MONTHS ',ADD_MONTHS(NOW,-1)), ('ADD_DAYS ',ADD_DAYS(NOW,-1)), ('ADD_HOURS ',ADD_HOURS(NOW,-1)), ('ADD_MINUTES ',ADD_MINUTES(NOW,-1)), ('ADD_SECONDS ',ADD_SECONDS(NOW,-1)) ) SELECT * FROM DATES """ Explanation: A negative number can be used to subtract values from the current date. End of explanation """ %%sql WITH DATES(FUNCTION, RESULT) AS ( VALUES ('CURRENT DATE ',NOW), ('THIS_WEEK ',THIS_WEEK(NOW)), ('THIS_MONTH ',THIS_MONTH(NOW)), ('THIS_QUARTER ',THIS_QUARTER(NOW)), ('THIS_YEAR ',THIS_YEAR(NOW)) ) SELECT * FROM DATES """ Explanation: Back to Top <a id='extract'></a> Extracting Weeks, Months, Quarters, and Years from a Date There are four functions that extract different values from a date. These functions include: THIS_QUARTER - returns the first day of the quarter THIS_WEEK - returns the first day of the week (Sunday is considered the first day of that week) THIS_MONTH - returns the first day of the month THIS_YEAR - returns the first day of the year End of explanation """ %%sql WITH DATES(FUNCTION, RESULT) AS ( VALUES ('CURRENT DATE ',NOW), ('NEXT_WEEK ',NEXT_WEEK(NOW)), ('NEXT_MONTH ',NEXT_MONTH(NOW)), ('NEXT_QUARTER ',NEXT_QUARTER(NOW)), ('NEXT_YEAR ',NEXT_YEAR(NOW)) ) SELECT * FROM DATES """ Explanation: There is also a NEXT function for each of these. The NEXT function will return the next week, month, quarter, or year given a current date. End of explanation """ %%sql WITH DATES(FUNCTION, RESULT) AS ( VALUES ('CURRENT DATE ',NOW), ('Monday ',NEXT_DAY(NOW,'Monday')), ('Tuesday ',NEXT_DAY(NOW,'TUE')), ('Wednesday ',NEXT_DAY(NOW,'Wednesday')), ('Thursday ',NEXT_DAY(NOW,'Thursday')), ('Friday ',NEXT_DAY(NOW,'FRI')), ('Saturday ',NEXT_DAY(NOW,'Saturday')), ('Sunday ',NEXT_DAY(NOW,'Sunday')) ) SELECT * FROM DATES """ Explanation: Back to Top <a id='nextday'></a> Next Day Function The previous set of functions returned a date value for the current week, month, quarter, or year (or the next one if you used the NEXT function). The NEXT_DAY function returns the next day (after the date you supply) based on the string representation of the day. The date string will be dependent on the codepage that you are using for the database. The date (from an English perspective) can be: |Day |Short form |:-------- |:--------- |Monday |MON |Tuesday |TUE |Wednesday |WED |Thursday |THU |Friday |FRI |Saturday |SAT |Sunday |SUN The following SQL will show you the "day" after the current date that is Monday through Sunday. End of explanation """ %%sql -q DROP VARIABLE FUTURE_DATE; CREATE VARIABLE FUTURE_DATE TIMESTAMP DEFAULT(NOW + 1 SECOND + 1 MINUTE + 1 HOUR + 8 DAYS + 1 YEAR); WITH DATES(FUNCTION, RESULT) AS ( VALUES ('SECONDS_BETWEEN',SECONDS_BETWEEN(FUTURE_DATE,NOW)), ('MINUTES_BETWEEN',MINUTES_BETWEEN(FUTURE_DATE,NOW)), ('HOURS_BETWEEN ',HOURS_BETWEEN(FUTURE_DATE,NOW)), ('DAYS BETWEEN ',DAYS_BETWEEN(FUTURE_DATE,NOW)), ('WEEKS_BETWEEN ',WEEKS_BETWEEN(FUTURE_DATE,NOW)), ('YEARS_BETWEEN ',YEARS_BETWEEN(FUTURE_DATE,NOW)) ) SELECT * FROM DATES; """ Explanation: Back to Top <a id='between'></a> Between Date/Time Functions These date functions compute the number of full seconds, minutes, hours, days, weeks, and years between two dates. If there isn't a full value between the two objects (like less than a day), a zero will be returned. These new functions are: HOURS_BETWEEN - returns the number of full hours between two arguments MINUTES_BETWEEN - returns the number of full minutes between two arguments SECONDS_BETWEEN - returns the number of full seconds between two arguments DAYS_BETWEEN - returns the number of full days between two arguments WEEKS_BETWEEN - returns the number of full weeks between two arguments YEARS_BETWEEN - returns the number of full years between two arguments The format of the function is: <pre> DAYS_BETWEEN( expression1, expression2 ) </pre> The following SQL will use a date that is in the future with exactly one extra second, minute, hour, day, week and year added to it. End of explanation """ %%sql WITH DATES(FUNCTION, RESULT) AS ( VALUES ('0 MONTH ',MONTHS_BETWEEN(NOW, NOW)), ('1 MONTH ',MONTHS_BETWEEN(NOW + 1 MONTH, NOW)), ('1 MONTH + 1 DAY',MONTHS_BETWEEN(NOW + 1 MONTH + 1 DAY, NOW)), ('LEAP YEAR ',MONTHS_BETWEEN('2016-02-01','2016-03-01')), ('NON-LEAP YEAR ',MONTHS_BETWEEN('2015-02-01','2015-03-01')) ) SELECT * FROM DATES """ Explanation: Back to Top <a id='mbetween'></a> MONTHS_BETWEEN Function You may have noticed that the MONTHS_BETWEEN function was not in the previous list of functions. The reason for this is that the value returned for MONTHS_BETWEEN is different from the other functions. The MONTHS_BETWEEN function returns a DECIMAL value rather than an integer value. The reason for this is that the duration of a month is not as precise as a day, week or year. The following example will show how the duration is a decimal value rather than an integer. You could always truncate the value if you want an integer. End of explanation """ %%sql WITH DATES(FUNCTION, RESULT) AS ( VALUES ('AGE + 1 DAY ',AGE(NOW - 1 DAY)), ('AGE + 1 MONTH ',AGE(NOW - 1 MONTH)), ('AGE + 1 YEAR ',AGE(NOW - 1 YEAR)), ('AGE + 1 DAY + 1 MONTH ',AGE(NOW - 1 DAY - 1 MONTH)), ('AGE + 1 DAY + 1 YEAR ',AGE(NOW - 1 DAY - 1 YEAR)), ('AGE + 1 DAY + 1 MONTH + 1 YEAR',AGE(NOW - 1 DAY - 1 MONTH - 1 YEAR)) ) SELECT * FROM DATES """ Explanation: Back to Top <a id='duration'></a> Date Duration Functions An alternate way of representing date durations is through the use of an integer with the format YYYYMMDD where the YYYY represents the year, MM for the month and DD for the day. Date durations are easier to manipulate than timestamp values and take up substantially less storage. There are two new functions. YMD_BETWEEN returns a numeric value that specifies the number of full years, full months, and full days between two datetime values AGE returns a numeric value that represents the number of full years, full months, and full days between the current timestamp and the argument This SQL statement will return various AGE calculations based on the current timestamp. End of explanation """ %%sql WITH DATES(FUNCTION, RESULT) AS ( VALUES ('1 DAY ',YMD_BETWEEN(NOW,NOW - 1 DAY)), ('1 MONTH ',YMD_BETWEEN(NOW,NOW - 1 MONTH)), ('1 YEAR ',YMD_BETWEEN(NOW,NOW - 1 YEAR)), ('1 DAY + 1 MONTH ',YMD_BETWEEN(NOW,NOW - 1 DAY - 1 MONTH)), ('1 DAY + 1 YEAR ',YMD_BETWEEN(NOW,NOW - 1 DAY - 1 YEAR)), ('1 DAY + 1 MONTH + 1 YEAR',YMD_BETWEEN(NOW,NOW - 1 DAY - 1 MONTH - 1 YEAR)) ) SELECT * FROM DATES """ Explanation: The YMD_BETWEEN function is similar to the AGE function except that it takes two date arguments. We can simulate the AGE function by supplying the NOW function to the YMD_BETWEEN function. End of explanation """ %%sql VALUES CASE WHEN (NOW, NOW + 1 DAY) OVERLAPS (NOW + 1 DAY, NOW + 2 DAYS) THEN 'Overlaps' ELSE 'No Overlap' END """ Explanation: Back to Top <a id='overlaps'></a> OVERLAPS Predicate The OVERLAPS predicate is used to determine whether two chronological periods overlap. This is not a function within DB2, but rather a special SQL syntax extension. A chronological period is specified by a pair of date-time expressions. The first expression specifies the start of a period; the second specifies its end. <pre> (start1,end1) OVERLAPS (start2, end2) </pre> The beginning and end values are not included in the periods. The following summarizes the overlap logic. For example, the periods 2016-10-19 to 2016-10-20 and 2016-10-20 to 2016-10-21 do not overlap. For instance, the following interval does not overlap. End of explanation """ %%sql VALUES CASE WHEN (NOW, NOW + 2 DAYS) OVERLAPS (NOW + 1 DAY, NOW + 2 DAYS) THEN 'Overlaps' ELSE 'No Overlap' END """ Explanation: If the first date range is extended by one day then the range will overlap. End of explanation """ %%sql VALUES CASE WHEN (NOW, NOW + 1 DAY) OVERLAPS (NOW, NOW + 1 DAY) THEN 'Overlaps' ELSE 'No Overlap' END """ Explanation: Identical date ranges will overlap. End of explanation """ %%sql VALUES FROM_UTC_TIMESTAMP(TIMESTAMP '2011-12-25 09:00:00.123456', 'Asia/Tokyo'); """ Explanation: Back to Top <a id='utc'></a> UTC Time Conversions Db2 has two functions that allow you to translate timestamps to and from UTC (Coordinated Universal Time). The FROM_UTC_TIMESTAMP scalar function returns a TIMESTAMP that is converted from Coordinated Universal Time to the time zone specified by the time zone string. The TO_UTC_TIMESTAMP scalar function returns a TIMESTAMP that is converted to Coordinated Universal Time from the timezone that is specified by the timezone string. The format of the two functions is: <pre> FROM_UTC_TIMESTAMP( expression, timezone ) TO_UTC_TIMESTAMP( expression, timezone) </pre> The return value from each of these functions is a timestamp. The "expression" is a timestamp that you want to convert to the local timezone (or convert to UTC). The timezone is an expression that specifies the time zone that the expression is to be adjusted to. The value of the timezone-expression must be a time zone name from the Internet Assigned Numbers Authority (IANA) time zone database. The standard format for a time zone name in the IANA database is Area/Location, where: Area is the English name of a continent, ocean, or the special area 'Etc' Location is the English name of a location within the area; usually a city, or small island Examples: "America/Toronto" "Asia/Sakhalin" "Etc/UTC" (which represents Coordinated Universal Time) For complete details on the valid set of time zone names and the rules that are associated with those time zones, refer to the IANA time zone database. The database server uses version 2010c of the IANA time zone database. The result is a timestamp, adjusted from/to the Coordinated Universal Time time zone to the time zone specified by the timezone-expression. If the timezone-expression returns a value that is not a time zone in the IANA time zone database, then the value of expression is returned without being adjusted. The timestamp adjustment is done by first applying the raw offset from Coordinated Universal Time of the timezone-expression. If Daylight Saving Time is in effect at the adjusted timestamp for the time zone that is specified by the timezone-expression, then the Daylight Saving Time offset is also applied to the timestamp. Time zones that use Daylight Saving Time have ambiguities at the transition dates. When a time zone changes from standard time to Daylight Saving Time, a range of time does not occur as it is skipped during the transition. When a time zone changes from Daylight Saving Time to standard time, a range of time occurs twice. Ambiguous timestamps are treated as if they occurred when standard time was in effect for the time zone. Convert the Coordinated Universal Time timestamp '2011-12-25 09:00:00.123456' to the 'Asia/Tokyo' time zone. The following returns a TIMESTAMP with the value '2011-12-25 18:00:00.123456'. End of explanation """ %%sql VALUES FROM_UTC_TIMESTAMP(TIMESTAMP'2014-11-02 06:55:00', 'America/Toronto'); """ Explanation: Convert the Coordinated Universal Time timestamp '2014-11-02 06:55:00' to the 'America/Toronto' time zone. The following returns a TIMESTAMP with the value '2014-11-02 01:55:00'. End of explanation """ %%sql VALUES FROM_UTC_TIMESTAMP(TIMESTAMP'2015-03-02 06:05:00', 'America/Toronto'); """ Explanation: Convert the Coordinated Universal Time timestamp '2015-03-02 06:05:00' to the 'America/Toronto' time zone. The following returns a TIMESTAMP with the value '2015-03-02 01:05:00'. End of explanation """ %%sql VALUES TO_UTC_TIMESTAMP(TIMESTAMP'1970-01-01 00:00:00', 'America/Denver'); """ Explanation: Convert the timestamp '1970-01-01 00:00:00' to the Coordinated Universal Time timezone from the 'America/Denver' timezone. The following returns a TIMESTAMP with the value '1970-01-01 07:00:00'. End of explanation """ %%sql -q DROP TABLE TXS_BASE; CREATE TABLE TXS_BASE ( ID INTEGER NOT NULL, CUSTID INTEGER NOT NULL, TXTIME_UTC TIMESTAMP NOT NULL ); """ Explanation: Using UTC Functions One of the applications for using the UTC is to take the transaction timestamp and normalize it across all systems that access the data. You can convert the timestamp to UTC on insert and then when it is retrieved, it can be converted to the local timezone. This example will use a number of techniques to hide the complexity of changing timestamps to local timezones. The following SQL will create our base transaction table (TXS_BASE) that will be used throughout the example. End of explanation """ %%sql CREATE OR REPLACE VARIABLE TIME_ZONE VARCHAR(255) DEFAULT('America/Toronto'); """ Explanation: The UTC functions will be written to take advantage of a local timezone variable called TIME_ZONE. This variable will contain the timezone of the server (or user) that is running the transaction. In this case we are using the timezone in Toronto, Canada. End of explanation """ %sql SET TIME_ZONE = 'America/Toronto' """ Explanation: The SET Command can be used to update the TIME_ZONE to the current location we are in. End of explanation """ %%sql CREATE OR REPLACE FUNCTION GET_TIMEZONE() RETURNS VARCHAR(255) LANGUAGE SQL CONTAINS SQL RETURN (TIME_ZONE) """ Explanation: In order to retrieve the value of the current timezone, we take advantage of a simple user-defined function called GET_TIMEZONE. It just retrieves the contents of the current TIME_ZONE variable that we set up. End of explanation """ %%sql CREATE OR REPLACE VIEW TXS AS ( SELECT ID, CUSTID, FROM_UTC_TIMESTAMP(TXTIME_UTC,GET_TIMEZONE()) AS TXTIME FROM TXS_BASE ) """ Explanation: The TXS view is used by all SQL statements rather than the TXS_BASE table. The reason for this is to take advantage of INSTEAD OF triggers that can manipulate the UTC without modifying the original SQL. Note that when the data is returned from the view that the TXTIME field is converted from UTC to the current TIMEZONE that we are in. End of explanation """ %%sql -d CREATE OR REPLACE TRIGGER I_TXS INSTEAD OF INSERT ON TXS REFERENCING NEW AS NEW_TXS FOR EACH ROW MODE DB2SQL BEGIN ATOMIC INSERT INTO TXS_BASE VALUES ( NEW_TXS.ID, NEW_TXS.CUSTID, TO_UTC_TIMESTAMP(NEW_TXS.TXTIME,GET_TIMEZONE()) ); END @ CREATE OR REPLACE TRIGGER U_TXS INSTEAD OF UPDATE ON TXS REFERENCING NEW AS NEW_TXS OLD AS OLD_TXS FOR EACH ROW MODE DB2SQL BEGIN ATOMIC UPDATE TXS_BASE SET (ID, CUSTID, TXTIME_UTC) = (NEW_TXS.ID, NEW_TXS.CUSTID, TO_UTC_TIMESTAMP(NEW_TXS.TXTIME,TIME_ZONE) ) WHERE TXS_BASE.ID = OLD_TXS.ID ; END @ CREATE OR REPLACE TRIGGER D_TXS INSTEAD OF DELETE ON TXS REFERENCING OLD AS OLD_TXS FOR EACH ROW MODE DB2SQL BEGIN ATOMIC DELETE FROM TXS_BASE WHERE TXS_BASE.ID = OLD_TXS.ID ; END @ """ Explanation: An INSTEAD OF trigger (INSERT, UPDATE, and DELETE) is created against the TXS view so that any insert or update on a TXTIME column will be converted back to the UTC value. From an application perspective, we are using the local time, not the UTC time. End of explanation """ %sql VALUES NOW """ Explanation: At this point in time(!) we can start inserting records into our table. We have already set the timezone to be Toronto, so the next insert statement will take the current time (NOW) and insert it into the table. For reference, here is the current time. End of explanation """ %%sql INSERT INTO TXS VALUES(1,1,NOW); SELECT * FROM TXS; """ Explanation: We will insert one record into the table and immediately retrieve the result. End of explanation """ %sql SELECT * FROM TXS_BASE """ Explanation: Note that the timsstamp appears to be the same as what we insert (plus or minus a few seconds). What actually sits in the base table is the UTC time. End of explanation """ %sql SET TIME_ZONE = 'America/Vancouver' """ Explanation: We can modify the time that is returned to us by changing our local timezone. The statement will make the system think we are in Vancouver. End of explanation """ %sql SELECT * FROM TXS """ Explanation: Retrieving the results will show that the timestamp has shifted by 3 hours (Vancouver is 3 hours behind Toronto). End of explanation """ %%sql INSERT INTO TXS VALUES(2,2,NOW); SELECT * FROM TXS; """ Explanation: So what happens if we insert a record into the table now that we are in Vancouver? End of explanation """ %sql SELECT * FROM TXS_BASE """ Explanation: The data retrieved reflects the fact that we are now in Vancouver from an application perspective. Looking at the base table and you will see that everything has been converted to UTC time. End of explanation """ %%sql SET TIME_ZONE = 'America/Toronto'; SELECT * FROM TXS; """ Explanation: Finally, we can switch back to Toronto time and see when the transactions were done. You will see that from a Toronto perspetive tht the transactions were done three hours later because of the timezone differences. End of explanation """
IACS-CS-207/cs207-F17
lectures/L14/L14.ipynb
mit
class SentenceIterator: def __init__(self, words): self.words = words self.index = 0 def __next__(self): try: word = self.words[self.index] except IndexError: raise StopIteration() self.index += 1 return word def __iter__(self): return self class Sentence: # An iterable def __init__(self, text): self.text = text self.words = text.split() def __iter__(self): return SentenceIterator(self.words) def __repr__(self): return 'Sentence(%s)' % reprlib.repr(self.text) """ Explanation: Lecture 14 Wednesday, October 25th 2017 Last time: Iterators and Iterables Trees, Binary trees, and BSTs This time: BST Traversal Generators Memory layouts Heaps? BST Traversal We've stored our data in a BST This seemed like a good idea at the time because BSTs have some nice properties To be able to access/use our data, we need to be able to traverse the tree Traversal Choices There are three traversal choices based on an implicit ordering of the tree from left to right: In-order: Traverse left-subtree, then current root, then right sub tree Post-order: Traverse left subtree, then traverse left subtree, and then current root Pre-order: Current root, then traverse left subtree, then traverse right subtree Traversing a tree means performing some operation In our examples, the operation will be "displaying the data" However, an operation could be "deleting files" Example Traverse the BST below using in-order, post-order, and pre-order traversals. Write the resulting sorted data structure (as a list is fine). Heaps We listed several types of data structures at the beginning of our data structures unit. So far, we have discussed lists and trees (in particular binary trees and binary search trees). Heaps are a type of tree, a little different from binary trees. Some Motivation Priority Queues People may come to your customer service counter in a certain order, but you might want to serve your executive class first! In other words, there is an "ordering" on your customers and you want to serve people in the order of the most VIP. This problem requires us to then sort things by importance and then evaluate things in this sorted order. A priority queue is a data structure for this, which allows us to do things more efficiently than simple sorting every time a new thing comes in. Items are inserted at one end and deleted from the other end of a queue (first in, first out [FIFO] buffer). The basic priority queue is defined to be supporting three primary operations: 1. Insert: insert an item with "key" (e.g. an importance) $k$ into priority queue $Q$. 2. Find Minimum: get the item, or a pointer to the item, whose key value is smaller than any other key in $Q$. 3. Delete Minimum: Remove the item with minimum $k$ from $Q$. Comments on Implementation of Priorty Queues One could use an unsorted array and store a pointer to the minimum index; accessing the minimum is an $O(1)$ operation. * It's cheap to update the pointer when new items are inserted into the array because we update it in $O(1)$ only when the new value is less than the current one. * Finding a new minimum after deleting the old one requires a scan of the array ($O(n)$ operation) and then resetting the pointer. One could alternatively implement the priority queue with a balanced binary tree structure. Then we'll get performance of $O(\log(n))$! This leads us to heaps. Heaps are a type of balanced binary tree. A heap providing access to minimum values is called a min-heap A heap providing access to maximum values is called a max-heap Note that you can't have a min-heap and max-heap together Heapsort Implementing a priority queue with selection sort takes $O(n^{2})$ operations Using a heap takes $O(n\log(n))$ operations Implementing a sorting algorithm using a heap is called heapsort. Heapsort is an in-place sort and requires no extra memory. Note that there are many sorting algorithms nowadays. Python uses Timsort. Back to Heaps A heap has two properties: Shape property A leaf node at depth $k>0$ can exist only if all the nodes at the previous depth exist. Nodes at any partially filled level are added "from left to right". Heap property For a min-heap, each node in the tree contains a key less than or equal to either of its two children (if they exist). This is also known as the labeling of a "parent node" dominating that of its children. For max heaps we use greater-than-or-equal. Heap Mechanics The first element in the array is the root key The next two elements make up the first level of children. This is done from left to right Then the next four and so on. More Details on Heap Mechanics To construct a heap, insert each new element that comes in at the left-most open spot. This maintains the shape property but not the heap property. Restore the Heap Property by "Bubbling Up" Look at the parent and if the child "dominates" we swap parent and child. Repeat this process until we bubble up to the root. Identifying the dominant is now easy because it will be at the top of the tree. This process is called heapify and must also be done at the first construction of the heap. Deletion Removing the dominant key creates a hole at the top (the first position in the array). Fill this hole with the rightmost position in the array, or the rightmost leaf node. This destroys the heap property! So we now bubble this key down until it dominates all its children. Example Construct a min-heap for the array $$\left[1, 8, 5, 9, 23, 2, 45, 6, 7, 99, -5\right].$$ Delete $-5$ and update the min-heap. Iterables/Iterators Again We have been discussing data structures and simultaneously exploring iterators and iterables. End of explanation """ a = Sentence("Dogs will save the world and cats will eat it.") for item in a: print(item) print("\n") it = iter(a) # it is an iterator while True: try: nextval = next(it) print(nextval) except StopIteration: del it break """ Explanation: Example Usage End of explanation """ def gen123(): print("A") yield 1 print("B") yield 2 print("C") yield 3 g = gen123() print(gen123, " ", type(gen123), " ", type(g)) print("A generator is an iterator.") print("It has {} and {}".format(g.__iter__, g.__next__)) """ Explanation: Every collection in Python is iterable. We have already seen iterators are used to make for loops. They are also used to make other collections: To loop over a file line by line from disk In the making of list, dict, and set comprehensions In unpacking tuples In parameter unpacking in function calls (*args syntax) An iterator defines both __iter__ and a __next__ (the first one is only required to make sure an iterator is an iterable). Recap: An iterator retrieves items from a collection. The collection must implement __iter__. Generators A generator function looks like a normal function, but yields values instead of returning them. The syntax is (unfortunately) the same otherwise (PEP 255 -- Simple Generators). A generator is a different beast. When the function runs, it creates a generator. The generator is an iterator and gets an internal implementation of __iter__ and __next__. End of explanation """ print(next(g)) print(next(g)) print(next(g)) print(next(g)) """ Explanation: Some notes on generators When next is called on the generator, the function proceeds until the first yield. The function body is now suspended and the value in the yield is then passed to the calling scope as the outcome of the next. When next is called again, it gets __next__ called again (implicitly) in the generator, and the next value is yielded. This continues until we reach the end of the function, the return of which creates a StopIteration in next. Any Python function that has the yield keyword in its body is a generator function. End of explanation """ for i in gen123(): print(i, "\n") """ Explanation: More notes on generators Generators yield one item at a time In this way, they feed the for loop one item at a time End of explanation """
davidbrough1/pymks
notebooks/stats_checker_board.ipynb
mit
import pymks %matplotlib inline %load_ext autoreload %autoreload 2 import numpy as np import matplotlib.pyplot as plt """ Explanation: Checkerboard Microstructure Introduction - What are 2-Point Spatial Correlations (also called 2-Point Statistics)? The purpose of this example is to introduce 2-point spatial correlations and how they are computed, using PyMKS. The example starts with some introductory information about spatial correlations. PyMKS is used to compute both the periodic and non-periodic 2-point spatial correlations (also referred to as 2-point statistics or autocorrelations and crosscorrelations) for a checkerboard microstructure. This is a relatively simple example that allows an easy discussion of how the spatial correlations capture the main features seen in the original microstructure. If you would like more technical details about 2-point statistics please see the theory section. End of explanation """ from pymks.datasets import make_checkerboard_microstructure X = make_checkerboard_microstructure(square_size=21, n_squares=8) """ Explanation: 2-Point Statistics for Checkerboard Microstructure Let's first start with making a microstructure that looks like a 8 x 8 checkerboard. Although this type of microstructure may not resemble a physical system, it provides solutions that give some intuitive understanding of 2-point statistics. We can create a checkerboard microstructure using make_checkerboard_microstructure function from pymks.datasets. End of explanation """ from pymks.tools import draw_microstructures draw_microstructures(X) print X.shape """ Explanation: Now let's take a look at how the microstructure looks. End of explanation """ from pymks.stats import autocorrelate from pymks import PrimitiveBasis p_basis = PrimitiveBasis(n_states=2) X_auto = autocorrelate(X, p_basis, periodic_axes=(0, 1)) """ Explanation: Compute Periodic 2-Point Statistics Now that we have created a microstructure to work with, we can start computing the 2-point statistics. Let's start by looking at the periodic autocorrelations of the microstructure and then compute the periodic crosscorrelation. This can be done using the autocorrelate and crosscorrelate functions from pymks.states, and using the keyword argument periodic_axes to specify the axes that are periodic. In order to compute 2-pont statistics, we need to select a basis to generate the microstructure function X_ from the microstructure X. Because we only have values of 0 or 1 in our microstructure we will using the PrimitiveBasis with n_states equal to 2. End of explanation """ from pymks.tools import draw_autocorrelations correlations = [('black', 'black'), ('white', 'white')] draw_autocorrelations(X_auto[0], autocorrelations=correlations) """ Explanation: We have now computed the autocorrelations. Let's take a look at them using draw_autocorrelations from pymks.tools. End of explanation """ center = (X_auto.shape[1] + 1) / 2 print 'Volume fraction of black phase', X_auto[0, center, center, 0] print 'Volume fraction of white phase', X_auto[0, center, center, 1] """ Explanation: Notice that for this checkerboard microstructure, the autocorrelation for these 2 local states in the exact same. We have just computed the periodic autocorrelations for a perfectly periodic microstructure with equal volume fractions. In general this is not the case and the autocorrelations will be different, as we will see later in this example. As mentioned in the introduction, because we using an indicator basis and the we have eigen microstructure functions (values are either 0 or 1), the (0, 0) vector equals the volume fraction. Let's double check that both the phases have a volume fraction of 0.5. End of explanation """ from pymks.stats import crosscorrelate X_cross = crosscorrelate(X, p_basis, periodic_axes=(0, 1)) """ Explanation: We can compute the cross-correlation of the microstructure function, using the crosscorrelate function from pymks.stats End of explanation """ from pymks.tools import draw_crosscorrelations correlations = [('black', 'white')] draw_crosscorrelations(X_cross[0], crosscorrelations=correlations) """ Explanation: Let's take a look at the cross correlation using draw_crosscorrelations from pymks.tools. End of explanation """ print 'Center value', X_cross[0, center, center, 0] """ Explanation: Notice that the crosscorrelation is the exact opposite of the 2 autocorrelations. The (0, 0) vector has a value of 0. This statistic reflects the probablity of 2 phases having the same location. In our microstructure, this probability is zero, as we have not allowed the two phases (colored black and white) to co-exist in the same spatial voxel. Let's check that it is zero. End of explanation """ from pymks.stats import correlate X_corr = correlate(X, p_basis) """ Explanation: Compute Non-Periodic 2-Point Statistics We will now compute the non-periodic 2-point statistics for our microstructure. This time, rather than using the autocorrelate and crosscorrelate functions, we will use the correlate function from pymks.stats. The correlate function computes all of the autocorrelations and crosscorrelations at the same time. We will compute the non-periodic statistics by omitting the keyword argument periodic_axes. End of explanation """ from pymks.tools import draw_correlations correlations = [('black', 'black'), ('white', 'white'), ('black', 'white')] draw_correlations(X_corr[0].real, correlations=correlations) """ Explanation: All or some of the correlations can be viewed, using the draw_correlations function from pymks.tools. In this example we will look at all of them. End of explanation """ print 'Volume fraction of black phase', X_corr[0, center, center, 0] print 'Volume fraction of white phase', X_corr[0, center, center, 1] """ Explanation: Notice that the maximum values for the autocorrelations are higher than 0.5. We can still show that the centers or the (0, 0) vectors are still equal to the volume fractions. End of explanation """
neurodata/ndparse
examples/isbi2012_deploy.ipynb
apache-2.0
%load_ext autoreload %autoreload 2 %matplotlib inline import sys, os, copy, logging, socket, time import numpy as np import pylab as plt #from ndparse.algorithms import nddl as nddl #import ndparse as ndp sys.path.append('..'); import ndparse as ndp try: logger except: # do this precisely once logger = logging.getLogger("deploy_model") logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setFormatter(logging.Formatter('[%(asctime)s:%(name)s:%(levelname)s] %(message)s')) logger.addHandler(ch) """ Explanation: Example: Deploying a Classifier This notebook shows how one might use a previously trained deep learning model to classify a subset of the ISBI 2012 data set. This assumes you have access to the ISBI 2012 data, which is available as a download from the ISBI challenge website or via an ndparse database call (see example below). It also assumes you have a local copy of trained weights for a Keras deep learning model; one example weights file is checked into this repository which will provide reasonable (but not state-of-the-art) results. You will also need to have Keras (along with a suitable backend - we use Theano) installed. Step 1: setup python environment End of explanation """ print("Running on system: %s" % socket.gethostname()) # Load previously trained CNN weights weightsFile = './isbi2012_weights_e025.h5' if True: # Using a local copy of data volume #inDir = '/Users/graywr1/code/bio-segmentation/data/ISBI2012/' inDir = '/home/pekalmj1/Data/EM_2012' Xtrain = ndp.nddl.load_cube(os.path.join(inDir, 'train-volume.tif')) Ytrain = ndp.nddl.load_cube(os.path.join(inDir, 'train-labels.tif')) Xtest = ndp.nddl.load_cube(os.path.join(inDir, 'test-volume.tif')) else: # example of using ndio database call import ndio.remote.neurodata as ND tic = time.time() nd = ND() token = 'kasthuri11cc' channel = 'image' xstart, xstop = 5472, 6496 ystart, ystop = 8712, 9736 zstart, zstop = 1000, 1100 res = 1 Xtest = nd.get_cutout(token, channel, xstart, xstop, ystart, ystop, zstart, zstop, resolution=res) Xtest = np.transpose(Xtest, [2, 0, 1]) Xtest = Xtest[:, np.newaxis, :, :] # add a channel dimension print 'time elapsed is: {} seconds'.format(time.time()-tic) # show some details. Note that data tensors are assumed to have dimensions: # (#slices, #channels, #rows, #columns) # print('Test data shape is: %s' % str(Xtest.shape)) plt.imshow(Xtest[0,0,...], interpolation='none', cmap='bone') plt.title('test volume, slice 0') plt.gca().axes.get_xaxis().set_ticks([]) plt.gca().axes.get_yaxis().set_ticks([]) plt.show() """ Explanation: Step 2: Load data and model weights End of explanation """ # In the interest of time, only deploy on one slice (z-dimension) of the test volume # *and* only evaluate a subset of the pixels in that slice. # # Note: depending upon your system (e.g. CPU vs GPU) this may take a few minutes... # tic = time.time() P0 = ndp.nddl.fit(Xtest, weightsFile, slices=[0,], evalPct=.1, log=logger) print("Time to deploy: %0.2f sec" % (time.time() - tic)) # The shape of the probability estimate tensor is: # (#slices, #classes, #rows, #cols) print('Class probabilities shape: %s' % str(P0.shape)) """ Explanation: Step 3: Deploy the model End of explanation """ # Use a simple interpolation scheme to fill in "missing" values # (i.e. those pixels we did not evaluate using the CNN). # Pint = ndp.nddl.interpolate_nn(P0) # visualize plt.imshow(P0[0,0,...]); plt.colorbar() plt.gca().axes.get_xaxis().set_ticks([]) plt.gca().axes.get_yaxis().set_ticks([]) plt.title('Class Estimates (slice 0, subsampled)') plt.show() plt.imshow(Pint[0,0,...]); plt.colorbar() plt.title('Class Estimates: (slice 0, interpolated)') plt.gca().axes.get_xaxis().set_ticks([]) plt.gca().axes.get_yaxis().set_ticks([]) plt.show() """ Explanation: Step 4: Postprocessing Note: in order to do actual science, one would use more sophisticated postprocessing (and also put more effort into the CNN design). End of explanation """
McIntyre-Lab/ipython-demo
r_inside_ipython_pt1.ipynb
gpl-2.0
# Imports import numpy as np import scipy as sp import pandas as pd import matplotlib.pyplot as plt %matplotlib inline # Load R magic %load_ext rmagic # Make data to plot in python x = np.random.uniform(0, 1000, size=1000) y = np.random.normal(1000, size=1000) # Plot using matplotlib plt.scatter(x=x, y=y, color='k') plt.xlabel('X', fontsize=14) plt.ylabel('Y', fontsize=14) plt.grid() """ Explanation: R inside of IPython Part 1 Now IPython notebook is nice, but what if I prefer ggplot2. First try matplotlib, it is really nice. But if you still want to use ggplot2 than you can access R from python using the rpy2 module demonstrated here. End of explanation """ %Rpush x y %R plot(x, y) """ Explanation: Here is a simple plot using R. First we push our x and y arrays over to R and then plot them. End of explanation """ %%R -i x,y library(ggplot2) dat = data.frame(x=x, y=y) ggplot(dat, aes(x=x, y=y)) + geom_point() """ Explanation: A little more complicated plot can be done using the R cell magic command %%R. Here everything in the cell is called with R. End of explanation """
chbehrens/pr_bc_connectivity-1
RBC_subtypes.ipynb
gpl-3.0
import numpy as np from scipy.stats import itemfreq from scipy.io import loadmat from scipy.spatial import ConvexHull import pandas as pd import matplotlib import matplotlib.pyplot as plt import seaborn as sns from PIL import Image from PIL import ImageDraw from sklearn.mixture import GMM from shapely.geometry import Polygon %matplotlib inline matplotlib.rcParams.update({'font.size': 14}) matplotlib.rcParams['font.family'] = 'sans-serif' matplotlib.rcParams['font.sans-serif'] = ['Arial'] sns.set_style("whitegrid") """ Explanation: Analysis of possible RBC subtypes The code in this notebook analyses differences between the RBCs contacting cones and rods and those contacting only rods in order to find evidence for or against two types of RBCs End of explanation """ cone_contacts=pd.read_pickle('data/cone_contact_predictions') rod_contacts=pd.read_pickle('data/rod_contact_predictions') BC_IDs=np.loadtxt('data/BC_IDs_new') ID_table=np.loadtxt('data/cell_IDs.csv',dtype=int,delimiter=',') BC_in_rod_area=np.loadtxt('data/BC_in_rod_area') BC_excluded=np.array([691,709,827,836]) rbc=BC_IDs[(BC_IDs[:,4]>70)&np.in1d(BC_IDs[:,0],BC_in_rod_area)&np.in1d(BC_IDs[:,0],BC_excluded,invert=True),0] rbc_rod_contacts=rod_contacts.ix[(rod_contacts['prediction']==1)] rbc_rod_contacts=rbc_rod_contacts[np.in1d(rbc_rod_contacts['cell'],rbc)].reset_index().drop('index',axis=1) rbc_cone_contacts=cone_contacts.ix[(cone_contacts['prediction']==1)] rbc_cone_input=np.unique(rbc_cone_contacts[np.in1d(rbc_cone_contacts['cell'],BC_excluded,invert=True)&\ np.in1d(rbc_cone_contacts['cell'],BC_IDs[BC_IDs[:,4]==71,0])]['cell']).astype(int) rbc_rod_input=BC_IDs[(BC_IDs[:,4]==71)&np.in1d(BC_IDs[:,0],rbc_cone_input,invert=True)&np.in1d(BC_IDs[:,0],BC_excluded,invert=True),0].astype(int) rbc_cone_contacts=rbc_cone_contacts[np.in1d(rbc_cone_contacts['cell'],rbc)].reset_index().drop('index',axis=1) #mean(on_sac)=6327, mean(off_sac)=5403 x_start=-5403*(0.62-0.28)/(6327-5403)+0.28 x_end=10000*(0.62-0.28)/(6327-5403)+x_start x_scale=np.linspace(x_start*100,x_end*100,10000) skeleton_data=loadmat('data/skeletons_OPL_final.mat') skeleton_ids=skeleton_data['kn_allSkeletons_e2006_IDs'].flatten() skeletons=skeleton_data['kn_allSkeletons_e2006'].flatten() M=np.loadtxt('data/coordinate_rotation') soma_data=loadmat('data/soma_positions.mat') soma_pos=soma_data['kn_e2006_ALLSKELETONS_FINAL2012_allSomata'] soma_internal_ids=soma_data['kn_e2006_ALLSKELETONS_FINAL2012_cellIDs'] soma_line_ids=soma_data['kn_e2006_ALLSKELETONS_FINAL2012_cellIDs_pure_forSomata'] soma_pos=np.dot(M,soma_pos[:,:3].T).T soma_positions=[] for i in range(BC_IDs.shape[0]): soma_positions.append(soma_pos[soma_line_ids[0,np.where(soma_internal_ids==BC_IDs[i,1])[1][0]]-1,:]) soma_positions=np.array(soma_positions) """ Explanation: Import data End of explanation """ stat_bc_contacts=pd.DataFrame(rbc,columns=['cell']) contact_freq_rbc=itemfreq(rbc_rod_contacts['cell'].as_matrix()) for i in range(stat_bc_contacts.shape[0]): stat_bc_contacts.loc[i,'count']=0 try: stat_bc_contacts.ix[i,'count']=contact_freq_rbc[contact_freq_rbc[:,0]==stat_bc_contacts.ix[i,'cell'],1] except ValueError: continue for i in range(stat_bc_contacts.shape[0]): if stat_bc_contacts.ix[i,'cell'] in np.unique(rbc_cone_contacts['cell']): stat_bc_contacts.loc[i,'cone_contacts']=1 else: stat_bc_contacts.loc[i,'cone_contacts']=0 sns.set(font='Arial',style='white',context='paper',rc={"xtick.major.size": 0, "ytick.major.size": 4}) with matplotlib.rc_context({"lines.linewidth": 0.7}): plt.figure(figsize=(3/2.54,4/2.54)) ax=sns.pointplot(x='cone_contacts',y='count',data=stat_bc_contacts,ci=95,order=[0,1], \ linestyles='',markers='s',scale=1.5,color='black') ax.set(ylabel='# rods',ylim=(0,42),yticks=[0,10,20,30,40],xlabel='',xticklabels=['Rods\nonly','Rods +\ncones'],xticks=[-0.1,1.1]) sns.despine() ax.spines['left'].set_position(('outward',3)) # plt.savefig('figures/rbc_rod_contact_comparison.svg',bbox_inches='tight',dpi=300) plt.show() """ Explanation: Number of rod contacts (Supp. figure 3B) End of explanation """ connectivity=loadmat('data/Helmstaedter_et_al_SUPPLinformation5.mat')['kn_allContactData_Interfaces_duplCorr_output_IDconv'] rbc_ids=ID_table[ID_table[:,3]==71,0] ac_ids=ID_table[ID_table[:,3]==24,0] rbc_ac_contacts=connectivity[np.in1d(connectivity[:,0],ac_ids)&np.in1d(connectivity[:,1],rbc_ids)] ac_connectivity=pd.DataFrame(BC_IDs[(BC_IDs[:,4]==71)&np.in1d(BC_IDs[:,0],BC_excluded,invert=True),0],columns=['rbc']) for i in range(ac_connectivity.shape[0]): if ac_connectivity.ix[i,'rbc'] in rbc_cone_input: ac_connectivity.loc[i,'cone_input']=1 else: ac_connectivity.loc[i,'cone_input']=0 ac_connectivity.loc[i,'area']=np.sum(rbc_ac_contacts[rbc_ac_contacts[:,1]==ac_connectivity.ix[i,'rbc'],2]) ac_connectivity=ac_connectivity[ac_connectivity['area']>0].reset_index().drop('index',axis=1) sns.set(font='Arial',style='white',context='paper',rc={"xtick.major.size": 0, "ytick.major.size": 4,"mathtext.default":'regular'}) with matplotlib.rc_context({"lines.linewidth": 0.7}): plt.figure(figsize=(3/2.54,4/2.54)) # plt.figure(figsize=(3,4)) ax=sns.pointplot(x='cone_input',y='area',data=ac_connectivity,ci=95,order=[0,1],\ linestyles='',markers='s',scale=1.5,color='black') ax.set(ylim=(0,27),yticks=[0,5,10,15,20,25],xlabel='',xticklabels=['Rods\nonly','Rods +\ncones'],xticks=[-0.1,1.1]) ax.set_ylabel('AII contact area [$\mu m^2$]') sns.despine() ax.spines['left'].set_position(('outward',3)) # plt.savefig('figures/rbc_AII_contacts.svg',bbox_inches='tight',dpi=300) plt.show() """ Explanation: AII connectivity (Supp. figure 3C) End of explanation """ volume_density=np.loadtxt('data/density_data_BC_flattened.gz') volume_density=volume_density[:,BC_IDs[:,3]==71] plt.figure(figsize=(8/2.54,3/2.54)) sns.set(font='Arial',style='white',context='paper',rc={"xtick.major.size": 4, "ytick.major.size": 0}) plt.fill_between(x_scale,(np.mean(volume_density[:,rbc_cone_input-697],axis=1)-np.std(volume_density[:,rbc_cone_input-697],axis=1)/np.sqrt(len(rbc_cone_input)-1))\ /np.sum(np.mean(volume_density[:,rbc_cone_input-697],axis=1)),\ (np.mean(volume_density[:,rbc_cone_input-697],axis=1)+np.std(volume_density[:,rbc_cone_input-697],axis=1)/np.sqrt(len(rbc_cone_input)-1))\ /np.sum(np.mean(volume_density[:,rbc_cone_input-697],axis=1)),facecolor='0.5',alpha=0.3) plt.fill_between(x_scale,(np.mean(volume_density[:,rbc_rod_input-697],axis=1)-np.std(volume_density[:,rbc_rod_input-697],axis=1)/np.sqrt(len(rbc_rod_input)-1))\ /np.sum(np.mean(volume_density[:,rbc_rod_input-697],axis=1)),\ (np.mean(volume_density[:,rbc_rod_input-697],axis=1)+np.std(volume_density[:,rbc_rod_input-697],axis=1)/np.sqrt(len(rbc_rod_input)-1))\ /np.sum(np.mean(volume_density[:,rbc_rod_input-697],axis=1)),facecolor='0.7',alpha=0.3) plt.plot(x_scale,np.sum(volume_density[:,rbc_cone_input-697],axis=1)/np.sum(volume_density[:,rbc_cone_input-697]),label='rod and cone contacts',c='0.3') plt.plot(x_scale,np.sum(volume_density[:,rbc_rod_input-697],axis=1)/np.sum(volume_density[:,rbc_rod_input-697]),label='rod contacts only',c='0.6') plt.xlabel('IPL depths [%]') plt.ylabel('Density') plt.yticks([]) plt.xlim(50,110) sns.despine() plt.legend(bbox_to_anchor=(0.65, 1.13)) # plt.savefig('figures/rbc_density_comparision.svg',bbox_inches='tight',dpi=300,transparent=True) plt.show() """ Explanation: Density from flattened OFF- and ON-SAC (Supp. figure 3A, top) End of explanation """ rbc_densities=np.loadtxt('data/density_data_RBC_ON_SAC_alignment.gz') plt.figure(figsize=(8/2.54,3/2.54)) # plt.figure(figsize=(8,3)) sns.set(font='Arial',style='white',context='paper',rc={"xtick.major.size": 4, "ytick.major.size": 0}) plt.fill_between(x_scale,(np.mean(rbc_densities[:,rbc_cone_input-697],axis=1)-np.std(rbc_densities[:,rbc_cone_input-697],axis=1)/np.sqrt(len(rbc_cone_input)-1))\ /np.sum(np.mean(rbc_densities[:,rbc_cone_input-697],axis=1)),\ (np.mean(rbc_densities[:,rbc_cone_input-697],axis=1)+np.std(rbc_densities[:,rbc_cone_input-697],axis=1)/np.sqrt(len(rbc_cone_input)-1))\ /np.sum(np.mean(rbc_densities[:,rbc_cone_input-697],axis=1)),facecolor='0.5',alpha=0.3) plt.fill_between(x_scale,(np.mean(rbc_densities[:,rbc_rod_input-697],axis=1)-np.std(rbc_densities[:,rbc_rod_input-697],axis=1)/np.sqrt(len(rbc_rod_input)-1))\ /np.sum(np.mean(rbc_densities[:,rbc_rod_input-697],axis=1)),\ (np.mean(rbc_densities[:,rbc_rod_input-697],axis=1)+np.std(rbc_densities[:,rbc_rod_input-697],axis=1)/np.sqrt(len(rbc_rod_input)-1))\ /np.sum(np.mean(rbc_densities[:,rbc_rod_input-697],axis=1)),facecolor='0.7',alpha=0.3) plt.plot(x_scale,np.sum(rbc_densities[:,rbc_cone_input-697],axis=1)/np.sum(rbc_densities[:,rbc_cone_input-697]),label='rod and cone contacts',c='0.3') plt.plot(x_scale,np.sum(rbc_densities[:,rbc_rod_input-697],axis=1)/np.sum(rbc_densities[:,rbc_rod_input-697]),label='rod contacts only',c='0.6') plt.xlabel('IPL depths [%]') plt.ylabel('Density') plt.yticks([]) plt.xlim(50,110) sns.despine() plt.legend(bbox_to_anchor=(0.65, 1.13)) # plt.savefig('figures/rbc_density_comparision_on_sac_only.svg',bbox_inches='tight',dpi=300,transparent=True) plt.show() """ Explanation: Density aligned on ON-SAC (Supp. figure 3A, bottom) End of explanation """ #Function definition for mosaic plotting def plot_mosaic(selection,layer='OPL'): im=Image.new('RGBA',(4800,3600),(255,255,255,0)) draw = ImageDraw.Draw(im) draw.line([(0,0),(0,3599)],fill=(0,0,255,255),width=1) draw.line([(0,3599),(4799,3599)],fill=(0,0,255,255),width=1) draw.line([(4799,3599),(4799,0)],fill=(0,0,255,255),width=1) draw.line([(4799,0),(0,0)],fill=(0,0,255,255),width=1) del draw for cell in BC_IDs[selection,0]: nodes_cell=np.empty((0,3)) for skel in np.where(skeleton_ids==cell)[0]: nodes=skeletons[skel].item()[list(skeletons[skel].dtype.names).index('nodes')][:,:3]/[16.5,16.5,25] if nodes.shape[0]<2: continue nodes=np.dot(M,nodes.T).T edges=skeletons[skel].item()[list(skeletons[skel].dtype.names).index('edges')] soma_pos_cell=soma_positions[BC_IDs[:,0]==cell,0]*16.5 draw = ImageDraw.Draw(im) if layer=='OPL': nodes=(nodes*[16.5,16.5,25]+[0,3250,15000]).astype(int) for i in range(edges.shape[0]): if (nodes[edges[i,0]-1,0]<soma_pos_cell) and (nodes[edges[i,1]-1,0]<soma_pos_cell): draw.line([tuple(nodes[edges[i,0]-1,1:3]/[25,25]),tuple(nodes[edges[i,1]-1,1:3]/[25,25])],fill=(0,0,0,150),width=5) del draw nodes=nodes[np.unique(edges).astype(int)-1,:] nodes_cell=np.concatenate((nodes_cell,nodes[nodes[:,0]<soma_pos_cell,0:3]),axis=0) elif layer=='IPL': nodes=(nodes*[16.5,16.5,25]+[0,5000,22750]).astype(int) for i in range(edges.shape[0]): if (nodes[edges[i,0]-1,0]>soma_pos_cell) and (nodes[edges[i,1]-1,0]>soma_pos_cell): draw.line([tuple(nodes[edges[i,0]-1,1:3]/[25,25]),tuple(nodes[edges[i,1]-1,1:3]/[25,25])],fill=(0,0,0,150),width=5) del draw nodes=nodes[np.unique(edges).astype(int)-1,:] nodes_cell=np.concatenate((nodes_cell,nodes[nodes[:,0]>soma_pos_cell,0:3]),axis=0) else: print("Layer has to be 'IPL' or 'OPL'") return im if nodes_cell.shape[0]>2: nodes_cell=nodes_cell[:,1:]/[25,25] hull=ConvexHull(nodes_cell) draw = ImageDraw.Draw(im) for simplex in hull.simplices: draw.line([tuple(nodes_cell[simplex[0],:]),tuple(nodes_cell[simplex[1],:])],fill=(0,0,255,255),width=5) del draw return im plot_mosaic(rbc_rod_input-390,'OPL') plot_mosaic(rbc_rod_input-390,'IPL') plot_mosaic(rbc_cone_input-390,'OPL') plot_mosaic(rbc_cone_input-390,'IPL') """ Explanation: Plot dendritic fields/axon terminals (Supp. figure 3D) End of explanation """
hschh86/usersong-extractor
documents/Investigationing.ipynb
mit
from __future__ import print_function, division import itertools import re # numpy imports import numpy as np import matplotlib.pyplot as plt %matplotlib inline def hexbyte(x): return "{:02X}".format(x) def binbyte(x): return "{:08b}".format(x) def tohex(by, sep=" "): return sep.join(hexbyte(x) for x in by) def tobin(by, sep=" "): return sep.join(binbyte(x) for x in by) def hexline(by): if len(by) <= 24: return tohex(by) else: return tohex(by[:20]) + "..." + tohex(by[-4:]) def takebyn(by, n=8): for i in range(0, len(by), n): yield by[i:i+n] def itertaken(seq, n): itr = iter(seq) try: while True: group = [] for _ in range(n): group.append(next(itr)) yield group except StopIteration: if group: yield group def printhex(by, n=8, group=1): grouped = ("".join(hexbyte(x) for x in g) for g in takebyn(by, group)) for x in itertaken(grouped, n): print(" ".join(x)) with open('data/syxout.txt') as hexdata: lines = [bytearray.fromhex(line.strip()) for line in hexdata] """ Explanation: Initial Investigations looking thru: https://www.midi.org/forum/279-yamaha-bull-dump-data-format, http://rnhart.net/articles/bulk-dump.htm End of explanation """ for line in lines: print(hexline(line)) lb = lines[0] ls = lines[-4] """ Explanation: Picking at the message structure End of explanation """ tohex(lb[:7]) """ Explanation: Header sysex-flag/manufacturer/device/data type header: F0 43 73 7F 44 06. If this is using the same conventions as the PSR-225, then 0A is for song data and 09 is for the one-touch panel bank End of explanation """ tohex(lb[7:11]) """ Explanation: 'SS' Size bytes. There appear to be four of them. At the EOS this is '00 01 00 01' the longest messages have this as '10 00 10 00', the last message before EOS has this as '08 68 08 68'. It appears to be two repeated (at least, in the case of the 0A. The 09 data, with only one message, appears to be different again.) End of explanation """ tohex(lb[11:14]) """ Explanation: three 'ZZ' running total bytes. At the EOS this is '7F 7F 7F', then immediately followed by the end F7. End of explanation """ tohex(lb[-2:]) """ Explanation: the 'KK' checksum and end-of-message flag F7. End of explanation """ print([sum(line[7:-1]) % 128 for line in lines]) """ Explanation: This should, and I say should, be of a similar format, so... If this works, sum of SS SS SS to KK inclusive should be 0 (mod 128). End of explanation """ def unpackseven(by): # Ignore the first bit, which is always zero. # The most signifcant bit comes first value = 0 for i, b in enumerate(reversed(by)): value |= b << (i*7) return value def unpacksevenhex(hexstring): return "{:#x}".format(unpackseven(bytearray.fromhex(hexstring))) unpacksevenhex('10 00') len(lb[14:-2]) """ Explanation: It worked, except for the two end-of-section messages, which don't have the checksum byte. Checking the Numbers End of explanation """ unpacksevenhex('08 68') unpackseven(ls[7:9]) == len(ls[14:-2]) len(ls[14:-2]) """ Explanation: 0x800, or 2048 bytes of encoded data seems to be the maximum. the ZZ bytes appear to increment by this amount. that penultimate message should be 0x468, or 1128, bytes... End of explanation """ payloads = [line[14:-2] for line in lines[:-3]] payload = bytearray().join(payloads) """ Explanation: Yep. What's all that data? Instead encoding one byte to two by nybble, with 0F 07 == 7F like in the PSR-225, it seems that the 'payload' data uses all seven available bits, which could mean several things. End of explanation """ len(payload)/8 """ Explanation: From staring at the 'payload' in a hex editor, I have recognised some patterns. I think that every 8th byte is some sort of 'parity' byte or something. Ignoring these bytes you can see the ascii for 'YAMAHA' and 'MTrk' and 'PresetStyle\x00'. So that's a start. End of explanation """ pays = np.array(payload).reshape((9613,8)) plt.plot(pays[:,7], 'r.') np.asarray(np.unique(pays[:,7], return_counts=True)).T tobin(pays[pays[:,7]==65,:][2]) tobin(np.unique(pays[:,7])) """ Explanation: What Is That Eighth Byte? End of explanation """ def reconstitute(by): blob = by[:7] for i in range(7): bitmask = 0b01000000 >> i blob[i] = blob[i] | ((by[7] & bitmask) << i+1) return blob def reconstitute_all(by): return bytearray().join(reconstitute(g) for g in takebyn(by, 8)) payload8 = reconstitute_all(payload) """ Explanation: The Solution If I had to squeeze 8 bits down into seven, how would I do it? Maybe, we take each of the seven bits of the eighth byte and stick them in front of the previous seven? End of explanation """ map(tohex, re.findall(r'\xF0[\x00-\x7F]*[\x80-\xFF]', payload8)) """ Explanation: Note that whenever an F0 occurs it is always followed by F7: End of explanation """ map(tohex, re.findall(r'\x43\x76\x1A\x04.{2}', payload8)) """ Explanation: It sure looks like there are some system exclusive messages in here! (Stored in MIDI file format, of course.) Leafing through the manual (pg 110-111), it seems these are the GM System / volume / tuning / reverb / chorus events And here are some style meta-events: End of explanation """ len(payload8) """ Explanation: Figuring Out The File Structure Assuming the file structure is similar to the PSR-225's block system... End of explanation """ printhex(payload8[:0x15a], 16) """ Explanation: It looks like there are 67291 bytes of decoded data instead of the 66766 bytes in the PSR-225. End of explanation """ printhex(payload8[0x15a:0x162]) """ Explanation: I have no idea what the first 0x15A bytes are. Maybe it's a bunch of 16-bit numbers? End of explanation """ track_bitfields = slice(0x15d, 0x162) print(tobin(payload8[track_bitfields], '\n')) """ Explanation: The DGX-505 has 5 UserSongs compared to the PSR-225's three. Each user song has five normal tracks and one chord/accompaniment track (track A). At the time this dump was gathered, all five songs had data, and used the following tracks as displayed on the panel: 1. 1 _ _ _ _ A 2. 1 2 3 _ _ _ 3. 1 2 3 4 5 _ 4. 1 _ 3 _ _ _ 5. 1 2 3 4 _ _ End of explanation """ printhex(payload8[0x162:0x1f3], 16) """ Explanation: The five bytes from 0x15d to 0x161 appear to correspond to the five usersongs, with the low 5 bits of each representing the active tracks on each (from least to most significant bit, track 1 - 2 - 3 - 4 - 5 - A). This corresponds to offsets 0x00001-0x00003 on the PSR-225. Still don't know what 64 02 BC is, though. End of explanation """ printhex(payload8[0x1f3:0x22f], 12) payload8[0x1f3:0x22f].decode('ascii') """ Explanation: Following are some bytes of more unknown stuff which could be 32 bit integers? End of explanation """ beginning_blocks = slice(0x22f,0x24d) printhex(payload8[beginning_blocks], 6) """ Explanation: ... followed by five repeats of ascii 'PresetStyle' and 00. The PSR-225 only has three of these (starting from 0x00007), so this has probably got something to do with each song. End of explanation """ next_blocks = slice(0x24d,0x2cf) printhex(payload8[next_blocks], 16) """ Explanation: Immediately following that, we have what appears to be the 30 bytes from 0x22f through 0x24c indicating the beginning blocks, corresponding to 0x0002B-0x0003F on the PSR-225. Each group of six bytes corresponds to a usersong in order, and each byte corresponds to a track, from 1-5, followed by the time track with the tempo, SysEx, etc. (Compare with the PSR-225, which has 6+1 tracks for each of its 3 songs and therefore needs 21 (=0x15) bytes total.) Blocks are numbered 0x01 to 0x82 and FF indicates that the track is not in use. End of explanation """ printhex(payload8[0x2cf:0x2d5]) payload8[0x2cf:0x2d5].decode('ascii') """ Explanation: Running from 0x24d to 0x2ce is the table of 0x82 (=130) next blocks, corresponding to 0x00040-0x000C1 on the PSR-225. 00 indicates unused blocks, FF indicates final blocks. End of explanation """ printhex(payload8[0x106d5:]) payload8[0x106d5:].decode('ascii') """ Explanation: At 0x2cf, we find 'PK0001', which corresponds to the PSR-225's 'PK9801' at 0x000C2, and marks the beginning of the block data at 0x2d5. End of explanation """ (0x106d5-0x2d5)/0x82 """ Explanation: The final six bytes, spelling 'PK0001' again from 0x106d5 to 0x106da, mark the end of the block data and correspond to 0x104C8-0x104CD on the PSR-225. End of explanation """ printhex(payload8[0x2d5:0x2d5+8]) payload8[0x2d5:0x2d5+8] """ Explanation: Like the PSR-225, the blocks are 0x200 = 512 bytes in size. Block 0x01 begins at offset 0x2d5: End of explanation """ def chunk_offset(n): if 0x01 <= n <= 0x82: return 0xd5 + (0x200*n) else: raise ValueError("Invalid chunk", n) def block_peek(n, length=4): offset = chunk_offset(n) return payload8[offset:offset+length] def print_block_peek(n): bits = block_peek(n) print(u"Block 0x{:02x} at 0x{:05x}: {} - {}".format(n, chunk_offset(n), tohex(bits), bits.decode('ascii', 'replace'))) track_blocks = [x for x in payload8[beginning_blocks] if x != 0xFF] for n in track_blocks: print_block_peek(n) """ Explanation: And we can see the start of an MTrk chunk. To calculate the start each block we can use a simple formula offset(n) = 0x2d5 + (0x200 * (n-1)) or, to simplify, offset(n) = 0xd5 + (0x200 * n) End of explanation """ print_block_peek(0x5e) """ Explanation: We can see that the blocks marked as the beginning blocks for each track do, indeed, contain the start of the tracks. End of explanation """ printhex(payload8[:0x15d], 16) printhex(payload8[0x162:0x1f3], 16) """ Explanation: We can also see that some blocks have data left over from previous recordings. The Remainder There are two regions left unexplained: End of explanation """ def unpackeight(by): # big-endian. total = 0 for i, b in enumerate(reversed(by)): total |= b << (i*8) return total def unpackwords(by, n=2): # probably not the way to do it, but eh. return [unpackeight(x) for x in takebyn(by, n)] mystery2 = unpackwords(payload8[0x163:0x1f3], 4) plt.bar(np.arange(len(mystery2)), mystery2) """ Explanation: We've accounted for pretty much everything found in the PSR-225's song dump format, except for the very first byte, offset 0x00000, which is supposed to be a bitfield for the songs in use. If the same pattern is used for the DGX-505, then we should expect to see a 00011111 or 1F byte somewhere, as all five songs were in use. The very first byte, offset 0x0, is, in fact, 1F, so that might just be it (I'll have to delete one of the songs to check). There's also one feature 'Step Recording' (offset 0x0004-0x0006 on PSR-225) the DGX-505 doesn't have. If the (abscence of) step-recorded tracks was noted directly after the normal tracks, this would account for five 00 bytes directly after the normal tracks at 0x162, and the rest lines up rather nicely as a bunch of 32 bit numbers... End of explanation """ print(tobin(payload8[0x15d:0x162], '\n')) #track_bitfields print('---') printhex(payload8[0x162:0x167]) print('---') printhex(payload8[0x167:0x17b],1,4) print('---') printhex(payload8[0x17b:0x1f3],6,4) """ Explanation: There's definitely a pattern: End of explanation """ printhex(payload8[beginning_blocks], 6) """ Explanation: It looks like after the track-active bitfields comes five zero bytes, then five 32-bit numbers that seem to correspond to the five songs, then 30 32-bit numbers that correspond to the tracks on the songs, with values of 00000000 for inactive tracks and other numbers (lengths?) for the active tracks. The five numbers seem to be the largest (longest?) value of the tracks for the corresponding song. Looking through the values, they seem to be roughly the duration of the tracks, in measures. End of explanation """ printhex(payload8[:0x15d], 16) """ Explanation: The Final Mystery Region End of explanation """ printhex(payload8[:0x19], 16) """ Explanation: Let's describe some patterns. From offset 0x0 to 0x18, there's a bunch of bytes. 0x0 may be the song-usage-bitfield thing. End of explanation """ printhex(payload8[0x19:0x159], 16) """ Explanation: From 0x19 to 0x11c, we get 320 bytes of some increasing and decreasing patterns End of explanation """ printhex(payload8[0x159:0x15d], 16) mystery_region = payload8[0x019:0x159] mystery_numbers = unpackwords(mystery_region) plt.bar(np.arange(160), mystery_numbers, width=1, linewidth=0) """ Explanation: And then four more bytes of whatever. I'm baffled, really. End of explanation """ printhex(mystery_numbers[:-30], 10) print() printhex(payload8[next_blocks], 10) """ Explanation: Staring at this, we can see four regions that run from indexes 0-49, 50-99, 100-129 and 130-159. Note that there are 160 numbers here, and 130 blocks in the file system, and 30 tracks. I'm guessing that this has something to do with the blocks just by the numbers here, but I don't know what. Probably some internal file system thing End of explanation """ printhex(payload8[0x0:0x19], 32) printhex(payload8[0x159:0x15d]) """ Explanation: And that still doesn't explain 0x0-0x18, 0x159-0x15c. More experimentation required. End of explanation """ !diff -y --width=100 --suppress-common-lines data/syxout.txt data/syxout1.txt with open('data/syxout1.txt') as newdump: newdumpline = bytearray.fromhex(newdump.readline().strip()) newpayload8 = reconstitute_all(newdumpline[14:-2]) printhex(payload8[:0x19], 32) printhex(newpayload8[:0x19], 32) print(binbyte(payload8[0])) print(binbyte(newpayload8[0])) """ Explanation: More Experimentation I deleted User Song 4 and saved a new dump. The only difference is the first message. End of explanation """ payload8[0x19:0x15d] == newpayload8[0x19:0x15d] """ Explanation: The beginning mystery bytes are the same, except for the first byte, which does indeed seem to be the song usage bitfield, with the least significant bit = user song 1 etc. End of explanation """ print(tobin(newpayload8[0x15d:0x162], '\n')) #track_bitfields print('---') printhex(newpayload8[0x162:0x167]) print('---') printhex(newpayload8[0x167:0x17b],1,4) print('---') printhex(newpayload8[0x17b:0x1f3],6,4) """ Explanation: The mystery region is the same for both files. End of explanation """ printhex(newpayload8[beginning_blocks], 6) print('---') printhex(newpayload8[next_blocks], 16) """ Explanation: Song 4 is not in use anymore, neither should its tracks be. Their durations are all zero as well. End of explanation """ payload8[0x2cf:0x700] == newpayload8[0x2cf:] """ Explanation: Blocks 07, 08, 09 and 0B have been marked empty for reuse. End of explanation """ for line in lines[-2:]: print(hexline(line)) """ Explanation: The rest of the data is the same. Registration Memory, a.k.a. One Touch Settings Let's look at the final two messages. In the PSR-225, the one-touch settings are sent first, but the DGX-505 sends 'em last. End of explanation """ otsline = lines[-2] otspayload = otsline[14:-2] unpackseven(otsline[7:9]) unpackseven(otsline[9:11]) len(otspayload) otspayload8 = reconstitute_all(otspayload) len(otspayload8) hexline(otspayload8) """ Explanation: Assuming the messages are of roughly the same format, we have: | field | bytes | content | |-------|-------|---------| |Header | F0 43 73 7F 44 06 | Manufacturer/Device/etc| |Section type | 09 | (one-touch settings)| |Size?? | 06 30 06 2E | Different format? | |Running total | 00 00 00 | First (and only) message, so zero | |Data | 50 53 52 03 01 00...00 00 | payload data| |Checksum | 30 | We checked this earlier| |End tag | F7 | | So what's with 06 30 06 2E? End of explanation """ print(tohex(otspayload8[:4])) print(tohex(otspayload8[-6:])) """ Explanation: The PSR-225 had 1576 encoded bytes (788 bytes decoded) of OTS data. The DGX-505 has 816 encoded bytes (714 bytes decoded), but the two length values differ by two, 06 30 (=816) and 06 2E (=814). The PSR-225 has 4 buttons × 4 banks = 16 settings, each using 0x31 = 49 bytes. The remaining 4 bytes are bitfields for the active banks on each button. The DGX-505 has 2 buttons × 8 banks = 16 settings, and has 714 bytes to store it in, which is not a nice number to work with. Maybe the two different length values mean something. Perhaps there are empty bytes at the end that were included so the 7-bit encoding would work. When the dump was taken, all 16 settings were in use, so maybe we should see two FF bytes? End of explanation """ otsgroups = list(takebyn(otspayload8[4:-6], 44)) for group in otsgroups: print(hexline(group)) """ Explanation: The data begins with 50 53 52 03 and ends with 50 53 52 03 00 00. Perhaps those two extra bytes are the reason for the differing sizes. (As an aside, 50 53 52 is ASCII for 'PSR', but the PSR-225 doesn't have them.) Disregarding these ten bytes for now leaves us with 704 bytes which divide cleanly into 16 groups of 44. End of explanation """ def grab_4_2(filename): with open(filename) as hexdata: message = bytearray.fromhex(hexdata.readlines()[-2].strip()) return reconstitute_all(message[14:-2])[4+11*44:4+12*44] # if it's stupid and it works, then... it's still stupid, but hey, at least it works. old_42 = otsgroups[11] new_42 = grab_4_2('data/syxout2.txt') newer_42 = grab_4_2('data/syxout3.txt') def columnise(some_lines, height): for i in range(height): print("".join(some_lines[j] for j in range(i, len(some_lines), height))) print("|ofst| old | new | newer | "*3) columnise(["| {0:02X} | {1:02X} {1:3d} | {2:02X} {2:3d} | {3:02X} {3:3d} | ".format(i, oldv, newv, newerv) for i, (oldv, newv, newerv) in enumerate(zip(old_42, new_42, newer_42))], 16) """ Explanation: Like the PSR-225, the settings are stored by button and then by bank. Decoding the Format According to the manual (page 68), the following settings can be saved: - Style (when using style features): - Style number - Auto accompaniment (ON/OFF) - Split Point - Style Volume - Tempo - Voice: - Main Voice: - Voice Number - Volume - Octave - Pan - Reverb send level - Chorus send level - Dual Voice: - ON/OFF - ... and the same settings as for Main Voice - Split Voice: - ON/OFF - ... and the same settings as for Main Voice - Effect: - Reverb type - Chorus type - Panel Sustain (ON/OFF) - Harmony: - ON/OFF - Harmony type - Harmony volume - Other: - Transpose - Pitch Bend Range After changing every setting from its previous value in Bank 4, button 2, I saved a new dump... and then I did it again, several times. (Bank 4, button 2 wasn't a particularly good setting anyway.) End of explanation """
kubeflow/pipelines
components/gcp/bigquery/query/sample.ipynb
apache-2.0
%%capture --no-stderr !pip3 install kfp --upgrade """ Explanation: Name Gather training data by querying BigQuery Labels GCP, BigQuery, Kubeflow, Pipeline Summary A Kubeflow Pipeline component to submit a query to BigQuery and store the result in a Cloud Storage bucket. Details Intended use Use this Kubeflow component to: * Select training data by submitting a query to BigQuery. * Output the training data into a Cloud Storage bucket as CSV files. Runtime arguments: | Argument | Description | Optional | Data type | Accepted values | Default | |----------|-------------|----------|-----------|-----------------|---------| | query | The query used by BigQuery to fetch the results. | No | String | | | | project_id | The project ID of the Google Cloud Platform (GCP) project to use to execute the query. | No | GCPProjectID | | | | dataset_id | The ID of the persistent BigQuery dataset to store the results of the query. If the dataset does not exist, the operation will create a new one. | Yes | String | | None | | table_id | The ID of the BigQuery table to store the results of the query. If the table ID is absent, the operation will generate a random ID for the table. | Yes | String | | None | | output_gcs_path | The path to the Cloud Storage bucket to store the query output. | Yes | GCSPath | | None | | dataset_location | The location where the dataset is created. Defaults to US. | Yes | String | | US | | job_config | The full configuration specification for the query job. See QueryJobConfig for details. | Yes | Dict | A JSONobject which has the same structure as QueryJobConfig | None | Input data schema The input data is a BigQuery job containing a query that pulls data f rom various sources. Output: Name | Description | Type :--- | :---------- | :--- output_gcs_path | The path to the Cloud Storage bucket containing the query output in CSV format. | GCSPath Cautions & requirements To use the component, the following requirements must be met: The BigQuery API is enabled. The component can authenticate to use GCP APIs. Refer to Authenticating Pipelines to GCP for details. The Kubeflow user service account is a member of the roles/bigquery.admin role of the project. The Kubeflow user service account is a member of the roles/storage.objectCreatorrole of the Cloud Storage output bucket. Detailed description This Kubeflow Pipeline component is used to: * Submit a query to BigQuery. * The query results are persisted in a dataset table in BigQuery. * An extract job is created in BigQuery to extract the data from the dataset table and output it to a Cloud Storage bucket as CSV files. Use the code below as an example of how to run your BigQuery job. Sample Note: The following sample code works in an IPython notebook or directly in Python code. Set sample parameters End of explanation """ import kfp.components as comp bigquery_query_op = comp.load_component_from_url( 'https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/bigquery/query/component.yaml') help(bigquery_query_op) """ Explanation: Load the component using KFP SDK End of explanation """ QUERY = 'SELECT * FROM `bigquery-public-data.stackoverflow.posts_questions` LIMIT 10' """ Explanation: Sample Note: The following sample code works in IPython notebook or directly in Python code. In this sample, we send a query to get the top questions from stackdriver public data and output the data to a Cloud Storage bucket. Here is the query: End of explanation """ # Required Parameters PROJECT_ID = '<Please put your project ID here>' GCS_WORKING_DIR = 'gs://<Please put your GCS path here>' # No ending slash # Optional Parameters EXPERIMENT_NAME = 'Bigquery -Query' OUTPUT_PATH = '{}/bigquery/query/questions.csv'.format(GCS_WORKING_DIR) """ Explanation: Set sample parameters End of explanation """ import kfp.dsl as dsl import json @dsl.pipeline( name='Bigquery query pipeline', description='Bigquery query pipeline' ) def pipeline( query=QUERY, project_id = PROJECT_ID, dataset_id='', table_id='', output_gcs_path=OUTPUT_PATH, dataset_location='US', job_config='' ): bigquery_query_op( query=query, project_id=project_id, dataset_id=dataset_id, table_id=table_id, output_gcs_path=output_gcs_path, dataset_location=dataset_location, job_config=job_config) """ Explanation: Run the component as a single pipeline End of explanation """ pipeline_func = pipeline pipeline_filename = pipeline_func.__name__ + '.zip' import kfp.compiler as compiler compiler.Compiler().compile(pipeline_func, pipeline_filename) """ Explanation: Compile the pipeline End of explanation """ #Specify pipeline argument values arguments = {} #Get or create an experiment and submit a pipeline run import kfp client = kfp.Client() experiment = client.create_experiment(EXPERIMENT_NAME) #Submit a pipeline run run_name = pipeline_func.__name__ + ' run' run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments) """ Explanation: Submit the pipeline for execution End of explanation """ !gsutil cat $OUTPUT_PATH """ Explanation: Inspect the output End of explanation """
sidazhang/udacity-dlnd
intro-to-tflearn/TFLearn_Sentiment_Analysis_Solution.ipynb
mit
import pandas as pd import numpy as np import tensorflow as tf import tflearn from tflearn.data_utils import to_categorical """ Explanation: Sentiment analysis with TFLearn In this notebook, we'll continue Andrew Trask's work by building a network for sentiment analysis on the movie review data. Instead of a network written with Numpy, we'll be using TFLearn, a high-level library built on top of TensorFlow. TFLearn makes it simpler to build networks just by defining the layers. It takes care of most of the details for you. We'll start off by importing all the modules we'll need, then load and prepare the data. End of explanation """ reviews = pd.read_csv('reviews.txt', header=None) labels = pd.read_csv('labels.txt', header=None) """ Explanation: Preparing the data Following along with Andrew, our goal here is to convert our reviews into word vectors. The word vectors will have elements representing words in the total vocabulary. If the second position represents the word 'the', for each review we'll count up the number of times 'the' appears in the text and set the second position to that count. I'll show you examples as we build the input data from the reviews data. Check out Andrew's notebook and video for more about this. Read the data Use the pandas library to read the reviews and postive/negative labels from comma-separated files. The data we're using has already been preprocessed a bit and we know it uses only lower case characters. If we were working from raw data, where we didn't know it was all lower case, we would want to add a step here to convert it. That's so we treat different variations of the same word, like The, the, and THE, all the same way. End of explanation """ from collections import Counter total_counts = Counter() for _, row in reviews.iterrows(): total_counts.update(row[0].split(' ')) print("Total words in data set: ", len(total_counts)) """ Explanation: Counting word frequency To start off we'll need to count how often each word appears in the data. We'll use this count to create a vocabulary we'll use to encode the review data. This resulting count is known as a bag of words. We'll use it to select our vocabulary and build the word vectors. You should have seen how to do this in Andrew's lesson. Try to implement it here using the Counter class. Exercise: Create the bag of words from the reviews data and assign it to total_counts. The reviews are stores in the reviews Pandas DataFrame. If you want the reviews as a Numpy array, use reviews.values. You can iterate through the rows in the DataFrame with for idx, row in reviews.iterrows(): (documentation). When you break up the reviews into words, use .split(' ') instead of .split() so your results match ours. End of explanation """ vocab = sorted(total_counts, key=total_counts.get, reverse=True)[:10000] print(vocab[:60]) """ Explanation: Let's keep the first 10000 most frequent words. As Andrew noted, most of the words in the vocabulary are rarely used so they will have little effect on our predictions. Below, we'll sort vocab by the count value and keep the 10000 most frequent words. End of explanation """ print(vocab[-1], ': ', total_counts[vocab[-1]]) """ Explanation: What's the last word in our vocabulary? We can use this to judge if 10000 is too few. If the last word is pretty common, we probably need to keep more words. End of explanation """ word2idx = {word: i for i, word in enumerate(vocab)} """ Explanation: The last word in our vocabulary shows up in 30 reviews out of 25000. I think it's fair to say this is a tiny proportion of reviews. We are probably fine with this number of words. Note: When you run, you may see a different word from the one shown above, but it will also have the value 30. That's because there are many words tied for that number of counts, and the Counter class does not guarantee which one will be returned in the case of a tie. Now for each review in the data, we'll make a word vector. First we need to make a mapping of word to index, pretty easy to do with a dictionary comprehension. Exercise: Create a dictionary called word2idx that maps each word in the vocabulary to an index. The first word in vocab has index 0, the second word has index 1, and so on. End of explanation """ def text_to_vector(text): word_vector = np.zeros(len(vocab), dtype=np.int_) for word in text.split(' '): idx = word2idx.get(word, None) if idx is None: continue else: word_vector[idx] += 1 return np.array(word_vector) """ Explanation: Text to vector function Now we can write a function that converts a some text to a word vector. The function will take a string of words as input and return a vector with the words counted up. Here's the general algorithm to do this: Initialize the word vector with np.zeros, it should be the length of the vocabulary. Split the input string of text into a list of words with .split(' '). Again, if you call .split() instead, you'll get slightly different results than what we show here. For each word in that list, increment the element in the index associated with that word, which you get from word2idx. Note: Since all words aren't in the vocab dictionary, you'll get a key error if you run into one of those words. You can use the .get method of the word2idx dictionary to specify a default returned value when you make a key error. For example, word2idx.get(word, None) returns None if word doesn't exist in the dictionary. End of explanation """ text_to_vector('The tea is for a party to celebrate ' 'the movie so she has no time for a cake')[:65] """ Explanation: If you do this right, the following code should return ``` text_to_vector('The tea is for a party to celebrate ' 'the movie so she has no time for a cake')[:65] array([0, 1, 0, 0, 2, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0]) ``` End of explanation """ word_vectors = np.zeros((len(reviews), len(vocab)), dtype=np.int_) for ii, (_, text) in enumerate(reviews.iterrows()): word_vectors[ii] = text_to_vector(text[0]) # Printing out the first 5 word vectors word_vectors[:5, :23] """ Explanation: Now, run through our entire review data set and convert each review to a word vector. End of explanation """ Y = (labels=='positive').astype(np.int_) records = len(labels) shuffle = np.arange(records) np.random.shuffle(shuffle) test_fraction = 0.9 train_split, test_split = shuffle[:int(records*test_fraction)], shuffle[int(records*test_fraction):] trainX, trainY = word_vectors[train_split,:], to_categorical(Y.values[train_split], 2) testX, testY = word_vectors[test_split,:], to_categorical(Y.values[test_split], 2) trainY """ Explanation: Train, Validation, Test sets Now that we have the word_vectors, we're ready to split our data into train, validation, and test sets. Remember that we train on the train data, use the validation data to set the hyperparameters, and at the very end measure the network performance on the test data. Here we're using the function to_categorical from TFLearn to reshape the target data so that we'll have two output units and can classify with a softmax activation function. We actually won't be creating the validation set here, TFLearn will do that for us later. End of explanation """ # Network building def build_model(): # This resets all parameters and variables, leave this here tf.reset_default_graph() # Inputs net = tflearn.input_data([None, 10000]) # Hidden layer(s) net = tflearn.fully_connected(net, 200, activation='ReLU') net = tflearn.fully_connected(net, 25, activation='ReLU') # Output layer net = tflearn.fully_connected(net, 2, activation='softmax') net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy') model = tflearn.DNN(net) return model """ Explanation: Building the network TFLearn lets you build the network by defining the layers. Input layer For the input layer, you just need to tell it how many units you have. For example, net = tflearn.input_data([None, 100]) would create a network with 100 input units. The first element in the list, None in this case, sets the batch size. Setting it to None here leaves it at the default batch size. The number of inputs to your network needs to match the size of your data. For this example, we're using 10000 element long vectors to encode our input data, so we need 10000 input units. Adding layers To add new hidden layers, you use net = tflearn.fully_connected(net, n_units, activation='ReLU') This adds a fully connected layer where every unit in the previous layer is connected to every unit in this layer. The first argument net is the network you created in the tflearn.input_data call. It's telling the network to use the output of the previous layer as the input to this layer. You can set the number of units in the layer with n_units, and set the activation function with the activation keyword. You can keep adding layers to your network by repeated calling net = tflearn.fully_connected(net, n_units). Output layer The last layer you add is used as the output layer. There for, you need to set the number of units to match the target data. In this case we are predicting two classes, positive or negative sentiment. You also need to set the activation function so it's appropriate for your model. Again, we're trying to predict if some input data belongs to one of two classes, so we should use softmax. net = tflearn.fully_connected(net, 2, activation='softmax') Training To set how you train the network, use net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy') Again, this is passing in the network you've been building. The keywords: optimizer sets the training method, here stochastic gradient descent learning_rate is the learning rate loss determines how the network error is calculated. In this example, with the categorical cross-entropy. Finally you put all this together to create the model with tflearn.DNN(net). So it ends up looking something like net = tflearn.input_data([None, 10]) # Input net = tflearn.fully_connected(net, 5, activation='ReLU') # Hidden net = tflearn.fully_connected(net, 2, activation='softmax') # Output net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy') model = tflearn.DNN(net) Exercise: Below in the build_model() function, you'll put together the network using TFLearn. You get to choose how many layers to use, how many hidden units, etc. End of explanation """ model = build_model() """ Explanation: Intializing the model Next we need to call the build_model() function to actually build the model. In my solution I haven't included any arguments to the function, but you can add arguments so you can change parameters in the model if you want. Note: You might get a bunch of warnings here. TFLearn uses a lot of deprecated code in TensorFlow. Hopefully it gets updated to the new TensorFlow version soon. End of explanation """ # Training model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=128, n_epoch=100) """ Explanation: Training the network Now that we've constructed the network, saved as the variable model, we can fit it to the data. Here we use the model.fit method. You pass in the training features trainX and the training targets trainY. Below I set validation_set=0.1 which reserves 10% of the data set as the validation set. You can also set the batch size and number of epochs with the batch_size and n_epoch keywords, respectively. Below is the code to fit our the network to our word vectors. You can rerun model.fit to train the network further if you think you can increase the validation accuracy. Remember, all hyperparameter adjustments must be done using the validation set. Only use the test set after you're completely done training the network. End of explanation """ predictions = (np.array(model.predict(testX))[:,0] >= 0.5).astype(np.int_) test_accuracy = np.mean(predictions == testY[:,0], axis=0) print("Test accuracy: ", test_accuracy) """ Explanation: Testing After you're satisified with your hyperparameters, you can run the network on the test set to measure it's performance. Remember, only do this after finalizing the hyperparameters. End of explanation """ # Helper function that uses your model to predict sentiment def test_sentence(sentence): positive_prob = model.predict([text_to_vector(sentence.lower())])[0][1] print('Sentence: {}'.format(sentence)) print('P(positive) = {:.3f} :'.format(positive_prob), 'Positive' if positive_prob > 0.5 else 'Negative') sentence = "Moonlight is by far the best movie of 2016." test_sentence(sentence) sentence = "It's amazing anyone could be talented enough to make something this spectacularly awful" test_sentence(sentence) """ Explanation: Try out your own text! End of explanation """
statsmodels/statsmodels.github.io
v0.13.0/examples/notebooks/generated/statespace_dfm_coincident.ipynb
bsd-3-clause
%matplotlib inline import numpy as np import pandas as pd import statsmodels.api as sm import matplotlib.pyplot as plt np.set_printoptions(precision=4, suppress=True, linewidth=120) from pandas_datareader.data import DataReader # Get the datasets from FRED start = '1979-01-01' end = '2014-12-01' indprod = DataReader('IPMAN', 'fred', start=start, end=end) income = DataReader('W875RX1', 'fred', start=start, end=end) sales = DataReader('CMRMTSPL', 'fred', start=start, end=end) emp = DataReader('PAYEMS', 'fred', start=start, end=end) # dta = pd.concat((indprod, income, sales, emp), axis=1) # dta.columns = ['indprod', 'income', 'sales', 'emp'] """ Explanation: Dynamic factors and coincident indices Factor models generally try to find a small number of unobserved "factors" that influence a substantial portion of the variation in a larger number of observed variables, and they are related to dimension-reduction techniques such as principal components analysis. Dynamic factor models explicitly model the transition dynamics of the unobserved factors, and so are often applied to time-series data. Macroeconomic coincident indices are designed to capture the common component of the "business cycle"; such a component is assumed to simultaneously affect many macroeconomic variables. Although the estimation and use of coincident indices (for example the Index of Coincident Economic Indicators) pre-dates dynamic factor models, in several influential papers Stock and Watson (1989, 1991) used a dynamic factor model to provide a theoretical foundation for them. Below, we follow the treatment found in Kim and Nelson (1999), of the Stock and Watson (1991) model, to formulate a dynamic factor model, estimate its parameters via maximum likelihood, and create a coincident index. Macroeconomic data The coincident index is created by considering the comovements in four macroeconomic variables (versions of these variables are available on FRED; the ID of the series used below is given in parentheses): Industrial production (IPMAN) Real aggregate income (excluding transfer payments) (W875RX1) Manufacturing and trade sales (CMRMTSPL) Employees on non-farm payrolls (PAYEMS) In all cases, the data is at the monthly frequency and has been seasonally adjusted; the time-frame considered is 1972 - 2005. End of explanation """ # HMRMT = DataReader('HMRMT', 'fred', start='1967-01-01', end=end) # CMRMT = DataReader('CMRMT', 'fred', start='1997-01-01', end=end) # HMRMT_growth = HMRMT.diff() / HMRMT.shift() # sales = pd.Series(np.zeros(emp.shape[0]), index=emp.index) # # Fill in the recent entries (1997 onwards) # sales[CMRMT.index] = CMRMT # # Backfill the previous entries (pre 1997) # idx = sales.loc[:'1997-01-01'].index # for t in range(len(idx)-1, 0, -1): # month = idx[t] # prev_month = idx[t-1] # sales.loc[prev_month] = sales.loc[month] / (1 + HMRMT_growth.loc[prev_month].values) dta = pd.concat((indprod, income, sales, emp), axis=1) dta.columns = ['indprod', 'income', 'sales', 'emp'] dta.index.freq = dta.index.inferred_freq dta.loc[:, 'indprod':'emp'].plot(subplots=True, layout=(2, 2), figsize=(15, 6)); """ Explanation: Note: in a recent update on FRED (8/12/15) the time series CMRMTSPL was truncated to begin in 1997; this is probably a mistake due to the fact that CMRMTSPL is a spliced series, so the earlier period is from the series HMRMT and the latter period is defined by CMRMT. This has since (02/11/16) been corrected, however the series could also be constructed by hand from HMRMT and CMRMT, as shown below (process taken from the notes in the Alfred xls file). End of explanation """ # Create log-differenced series dta['dln_indprod'] = (np.log(dta.indprod)).diff() * 100 dta['dln_income'] = (np.log(dta.income)).diff() * 100 dta['dln_sales'] = (np.log(dta.sales)).diff() * 100 dta['dln_emp'] = (np.log(dta.emp)).diff() * 100 # De-mean and standardize dta['std_indprod'] = (dta['dln_indprod'] - dta['dln_indprod'].mean()) / dta['dln_indprod'].std() dta['std_income'] = (dta['dln_income'] - dta['dln_income'].mean()) / dta['dln_income'].std() dta['std_sales'] = (dta['dln_sales'] - dta['dln_sales'].mean()) / dta['dln_sales'].std() dta['std_emp'] = (dta['dln_emp'] - dta['dln_emp'].mean()) / dta['dln_emp'].std() """ Explanation: Stock and Watson (1991) report that for their datasets, they could not reject the null hypothesis of a unit root in each series (so the series are integrated), but they did not find strong evidence that the series were co-integrated. As a result, they suggest estimating the model using the first differences (of the logs) of the variables, demeaned and standardized. End of explanation """ # Get the endogenous data endog = dta.loc['1979-02-01':, 'std_indprod':'std_emp'] # Create the model mod = sm.tsa.DynamicFactor(endog, k_factors=1, factor_order=2, error_order=2) initial_res = mod.fit(method='powell', disp=False) res = mod.fit(initial_res.params, disp=False) """ Explanation: Dynamic factors A general dynamic factor model is written as: $$ \begin{align} y_t & = \Lambda f_t + B x_t + u_t \ f_t & = A_1 f_{t-1} + \dots + A_p f_{t-p} + \eta_t \qquad \eta_t \sim N(0, I)\ u_t & = C_1 u_{t-1} + \dots + C_q u_{t-q} + \varepsilon_t \qquad \varepsilon_t \sim N(0, \Sigma) \end{align} $$ where $y_t$ are observed data, $f_t$ are the unobserved factors (evolving as a vector autoregression), $x_t$ are (optional) exogenous variables, and $u_t$ is the error, or "idiosyncratic", process ($u_t$ is also optionally allowed to be autocorrelated). The $\Lambda$ matrix is often referred to as the matrix of "factor loadings". The variance of the factor error term is set to the identity matrix to ensure identification of the unobserved factors. This model can be cast into state space form, and the unobserved factor estimated via the Kalman filter. The likelihood can be evaluated as a byproduct of the filtering recursions, and maximum likelihood estimation used to estimate the parameters. Model specification The specific dynamic factor model in this application has 1 unobserved factor which is assumed to follow an AR(2) process. The innovations $\varepsilon_t$ are assumed to be independent (so that $\Sigma$ is a diagonal matrix) and the error term associated with each equation, $u_{i,t}$ is assumed to follow an independent AR(2) process. Thus the specification considered here is: $$ \begin{align} y_{i,t} & = \lambda_i f_t + u_{i,t} \ u_{i,t} & = c_{i,1} u_{1,t-1} + c_{i,2} u_{i,t-2} + \varepsilon_{i,t} \qquad & \varepsilon_{i,t} \sim N(0, \sigma_i^2) \ f_t & = a_1 f_{t-1} + a_2 f_{t-2} + \eta_t \qquad & \eta_t \sim N(0, I)\ \end{align} $$ where $i$ is one of: [indprod, income, sales, emp ]. This model can be formulated using the DynamicFactor model built-in to statsmodels. In particular, we have the following specification: k_factors = 1 - (there is 1 unobserved factor) factor_order = 2 - (it follows an AR(2) process) error_var = False - (the errors evolve as independent AR processes rather than jointly as a VAR - note that this is the default option, so it is not specified below) error_order = 2 - (the errors are autocorrelated of order 2: i.e. AR(2) processes) error_cov_type = 'diagonal' - (the innovations are uncorrelated; this is again the default) Once the model is created, the parameters can be estimated via maximum likelihood; this is done using the fit() method. Note: recall that we have demeaned and standardized the data; this will be important in interpreting the results that follow. Aside: in their empirical example, Kim and Nelson (1999) actually consider a slightly different model in which the employment variable is allowed to also depend on lagged values of the factor - this model does not fit into the built-in DynamicFactor class, but can be accommodated by using a subclass to implement the required new parameters and restrictions - see Appendix A, below. Parameter estimation Multivariate models can have a relatively large number of parameters, and it may be difficult to escape from local minima to find the maximized likelihood. In an attempt to mitigate this problem, I perform an initial maximization step (from the model-defined starting parameters) using the modified Powell method available in Scipy (see the minimize documentation for more information). The resulting parameters are then used as starting parameters in the standard LBFGS optimization method. End of explanation """ print(res.summary(separate_params=False)) """ Explanation: Estimates Once the model has been estimated, there are two components that we can use for analysis or inference: The estimated parameters The estimated factor Parameters The estimated parameters can be helpful in understanding the implications of the model, although in models with a larger number of observed variables and / or unobserved factors they can be difficult to interpret. One reason for this difficulty is due to identification issues between the factor loadings and the unobserved factors. One easy-to-see identification issue is the sign of the loadings and the factors: an equivalent model to the one displayed below would result from reversing the signs of all factor loadings and the unobserved factor. Here, one of the easy-to-interpret implications in this model is the persistence of the unobserved factor: we find that exhibits substantial persistence. End of explanation """ fig, ax = plt.subplots(figsize=(13,3)) # Plot the factor dates = endog.index._mpl_repr() ax.plot(dates, res.factors.filtered[0], label='Factor') ax.legend() # Retrieve and also plot the NBER recession indicators rec = DataReader('USREC', 'fred', start=start, end=end) ylim = ax.get_ylim() ax.fill_between(dates[:-3], ylim[0], ylim[1], rec.values[:-4,0], facecolor='k', alpha=0.1); """ Explanation: Estimated factors While it can be useful to plot the unobserved factors, it is less useful here than one might think for two reasons: The sign-related identification issue described above. Since the data was differenced, the estimated factor explains the variation in the differenced data, not the original data. It is for these reasons that the coincident index is created (see below). With these reservations, the unobserved factor is plotted below, along with the NBER indicators for US recessions. It appears that the factor is successful at picking up some degree of business cycle activity. End of explanation """ res.plot_coefficients_of_determination(figsize=(8,2)); """ Explanation: Post-estimation Although here we will be able to interpret the results of the model by constructing the coincident index, there is a useful and generic approach for getting a sense for what is being captured by the estimated factor. By taking the estimated factors as given, regressing them (and a constant) each (one at a time) on each of the observed variables, and recording the coefficients of determination ($R^2$ values), we can get a sense of the variables for which each factor explains a substantial portion of the variance and the variables for which it does not. In models with more variables and more factors, this can sometimes lend interpretation to the factors (for example sometimes one factor will load primarily on real variables and another on nominal variables). In this model, with only four endogenous variables and one factor, it is easy to digest a simple table of the $R^2$ values, but in larger models it is not. For this reason, a bar plot is often employed; from the plot we can easily see that the factor explains most of the variation in industrial production index and a large portion of the variation in sales and employment, it is less helpful in explaining income. End of explanation """ usphci = DataReader('USPHCI', 'fred', start='1979-01-01', end='2014-12-01')['USPHCI'] usphci.plot(figsize=(13,3)); dusphci = usphci.diff()[1:].values def compute_coincident_index(mod, res): # Estimate W(1) spec = res.specification design = mod.ssm['design'] transition = mod.ssm['transition'] ss_kalman_gain = res.filter_results.kalman_gain[:,:,-1] k_states = ss_kalman_gain.shape[0] W1 = np.linalg.inv(np.eye(k_states) - np.dot( np.eye(k_states) - np.dot(ss_kalman_gain, design), transition )).dot(ss_kalman_gain)[0] # Compute the factor mean vector factor_mean = np.dot(W1, dta.loc['1972-02-01':, 'dln_indprod':'dln_emp'].mean()) # Normalize the factors factor = res.factors.filtered[0] factor *= np.std(usphci.diff()[1:]) / np.std(factor) # Compute the coincident index coincident_index = np.zeros(mod.nobs+1) # The initial value is arbitrary; here it is set to # facilitate comparison coincident_index[0] = usphci.iloc[0] * factor_mean / dusphci.mean() for t in range(0, mod.nobs): coincident_index[t+1] = coincident_index[t] + factor[t] + factor_mean # Attach dates coincident_index = pd.Series(coincident_index, index=dta.index).iloc[1:] # Normalize to use the same base year as USPHCI coincident_index *= (usphci.loc['1992-07-01'] / coincident_index.loc['1992-07-01']) return coincident_index """ Explanation: Coincident Index As described above, the goal of this model was to create an interpretable series which could be used to understand the current status of the macroeconomy. This is what the coincident index is designed to do. It is constructed below. For readers interested in an explanation of the construction, see Kim and Nelson (1999) or Stock and Watson (1991). In essence, what is done is to reconstruct the mean of the (differenced) factor. We will compare it to the coincident index on published by the Federal Reserve Bank of Philadelphia (USPHCI on FRED). End of explanation """ fig, ax = plt.subplots(figsize=(13,3)) # Compute the index coincident_index = compute_coincident_index(mod, res) # Plot the factor dates = endog.index._mpl_repr() ax.plot(dates, coincident_index, label='Coincident index') ax.plot(usphci.index._mpl_repr(), usphci, label='USPHCI') ax.legend(loc='lower right') # Retrieve and also plot the NBER recession indicators ylim = ax.get_ylim() ax.fill_between(dates[:-3], ylim[0], ylim[1], rec.values[:-4,0], facecolor='k', alpha=0.1); """ Explanation: Below we plot the calculated coincident index along with the US recessions and the comparison coincident index USPHCI. End of explanation """ from statsmodels.tsa.statespace import tools class ExtendedDFM(sm.tsa.DynamicFactor): def __init__(self, endog, **kwargs): # Setup the model as if we had a factor order of 4 super(ExtendedDFM, self).__init__( endog, k_factors=1, factor_order=4, error_order=2, **kwargs) # Note: `self.parameters` is an ordered dict with the # keys corresponding to parameter types, and the values # the number of parameters of that type. # Add the new parameters self.parameters['new_loadings'] = 3 # Cache a slice for the location of the 4 factor AR # parameters (a_1, ..., a_4) in the full parameter vector offset = (self.parameters['factor_loadings'] + self.parameters['exog'] + self.parameters['error_cov']) self._params_factor_ar = np.s_[offset:offset+2] self._params_factor_zero = np.s_[offset+2:offset+4] @property def start_params(self): # Add three new loading parameters to the end of the parameter # vector, initialized to zeros (for simplicity; they could # be initialized any way you like) return np.r_[super(ExtendedDFM, self).start_params, 0, 0, 0] @property def param_names(self): # Add the corresponding names for the new loading parameters # (the name can be anything you like) return super(ExtendedDFM, self).param_names + [ 'loading.L%d.f1.%s' % (i, self.endog_names[3]) for i in range(1,4)] def transform_params(self, unconstrained): # Perform the typical DFM transformation (w/o the new parameters) constrained = super(ExtendedDFM, self).transform_params( unconstrained[:-3]) # Redo the factor AR constraint, since we only want an AR(2), # and the previous constraint was for an AR(4) ar_params = unconstrained[self._params_factor_ar] constrained[self._params_factor_ar] = ( tools.constrain_stationary_univariate(ar_params)) # Return all the parameters return np.r_[constrained, unconstrained[-3:]] def untransform_params(self, constrained): # Perform the typical DFM untransformation (w/o the new parameters) unconstrained = super(ExtendedDFM, self).untransform_params( constrained[:-3]) # Redo the factor AR unconstrained, since we only want an AR(2), # and the previous unconstrained was for an AR(4) ar_params = constrained[self._params_factor_ar] unconstrained[self._params_factor_ar] = ( tools.unconstrain_stationary_univariate(ar_params)) # Return all the parameters return np.r_[unconstrained, constrained[-3:]] def update(self, params, transformed=True, **kwargs): # Peform the transformation, if required if not transformed: params = self.transform_params(params) params[self._params_factor_zero] = 0 # Now perform the usual DFM update, but exclude our new parameters super(ExtendedDFM, self).update(params[:-3], transformed=True, **kwargs) # Finally, set our new parameters in the design matrix self.ssm['design', 3, 1:4] = params[-3:] """ Explanation: Appendix 1: Extending the dynamic factor model Recall that the previous specification was described by: $$ \begin{align} y_{i,t} & = \lambda_i f_t + u_{i,t} \ u_{i,t} & = c_{i,1} u_{1,t-1} + c_{i,2} u_{i,t-2} + \varepsilon_{i,t} \qquad & \varepsilon_{i,t} \sim N(0, \sigma_i^2) \ f_t & = a_1 f_{t-1} + a_2 f_{t-2} + \eta_t \qquad & \eta_t \sim N(0, I)\ \end{align} $$ Written in state space form, the previous specification of the model had the following observation equation: $$ \begin{bmatrix} y_{\text{indprod}, t} \ y_{\text{income}, t} \ y_{\text{sales}, t} \ y_{\text{emp}, t} \ \end{bmatrix} = \begin{bmatrix} \lambda_\text{indprod} & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \ \lambda_\text{income} & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \ \lambda_\text{sales} & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \ \lambda_\text{emp} & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \ \end{bmatrix} \begin{bmatrix} f_t \ f_{t-1} \ u_{\text{indprod}, t} \ u_{\text{income}, t} \ u_{\text{sales}, t} \ u_{\text{emp}, t} \ u_{\text{indprod}, t-1} \ u_{\text{income}, t-1} \ u_{\text{sales}, t-1} \ u_{\text{emp}, t-1} \ \end{bmatrix} $$ and transition equation: $$ \begin{bmatrix} f_t \ f_{t-1} \ u_{\text{indprod}, t} \ u_{\text{income}, t} \ u_{\text{sales}, t} \ u_{\text{emp}, t} \ u_{\text{indprod}, t-1} \ u_{\text{income}, t-1} \ u_{\text{sales}, t-1} \ u_{\text{emp}, t-1} \ \end{bmatrix} = \begin{bmatrix} a_1 & a_2 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \ 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \ 0 & 0 & c_{\text{indprod}, 1} & 0 & 0 & 0 & c_{\text{indprod}, 2} & 0 & 0 & 0 \ 0 & 0 & 0 & c_{\text{income}, 1} & 0 & 0 & 0 & c_{\text{income}, 2} & 0 & 0 \ 0 & 0 & 0 & 0 & c_{\text{sales}, 1} & 0 & 0 & 0 & c_{\text{sales}, 2} & 0 \ 0 & 0 & 0 & 0 & 0 & c_{\text{emp}, 1} & 0 & 0 & 0 & c_{\text{emp}, 2} \ 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \ 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \ 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \ 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \ \end{bmatrix} \begin{bmatrix} f_{t-1} \ f_{t-2} \ u_{\text{indprod}, t-1} \ u_{\text{income}, t-1} \ u_{\text{sales}, t-1} \ u_{\text{emp}, t-1} \ u_{\text{indprod}, t-2} \ u_{\text{income}, t-2} \ u_{\text{sales}, t-2} \ u_{\text{emp}, t-2} \ \end{bmatrix} + R \begin{bmatrix} \eta_t \ \varepsilon_{t} \end{bmatrix} $$ the DynamicFactor model handles setting up the state space representation and, in the DynamicFactor.update method, it fills in the fitted parameter values into the appropriate locations. The extended specification is the same as in the previous example, except that we also want to allow employment to depend on lagged values of the factor. This creates a change to the $y_{\text{emp},t}$ equation. Now we have: $$ \begin{align} y_{i,t} & = \lambda_i f_t + u_{i,t} \qquad & i \in {\text{indprod}, \text{income}, \text{sales} }\ y_{i,t} & = \lambda_{i,0} f_t + \lambda_{i,1} f_{t-1} + \lambda_{i,2} f_{t-2} + \lambda_{i,2} f_{t-3} + u_{i,t} \qquad & i = \text{emp} \ u_{i,t} & = c_{i,1} u_{i,t-1} + c_{i,2} u_{i,t-2} + \varepsilon_{i,t} \qquad & \varepsilon_{i,t} \sim N(0, \sigma_i^2) \ f_t & = a_1 f_{t-1} + a_2 f_{t-2} + \eta_t \qquad & \eta_t \sim N(0, I)\ \end{align} $$ Now, the corresponding observation equation should look like the following: $$ \begin{bmatrix} y_{\text{indprod}, t} \ y_{\text{income}, t} \ y_{\text{sales}, t} \ y_{\text{emp}, t} \ \end{bmatrix} = \begin{bmatrix} \lambda_\text{indprod} & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \ \lambda_\text{income} & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \ \lambda_\text{sales} & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \ \lambda_\text{emp,1} & \lambda_\text{emp,2} & \lambda_\text{emp,3} & \lambda_\text{emp,4} & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \ \end{bmatrix} \begin{bmatrix} f_t \ f_{t-1} \ f_{t-2} \ f_{t-3} \ u_{\text{indprod}, t} \ u_{\text{income}, t} \ u_{\text{sales}, t} \ u_{\text{emp}, t} \ u_{\text{indprod}, t-1} \ u_{\text{income}, t-1} \ u_{\text{sales}, t-1} \ u_{\text{emp}, t-1} \ \end{bmatrix} $$ Notice that we have introduced two new state variables, $f_{t-2}$ and $f_{t-3}$, which means we need to update the transition equation: $$ \begin{bmatrix} f_t \ f_{t-1} \ f_{t-2} \ f_{t-3} \ u_{\text{indprod}, t} \ u_{\text{income}, t} \ u_{\text{sales}, t} \ u_{\text{emp}, t} \ u_{\text{indprod}, t-1} \ u_{\text{income}, t-1} \ u_{\text{sales}, t-1} \ u_{\text{emp}, t-1} \ \end{bmatrix} = \begin{bmatrix} a_1 & a_2 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \ 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \ 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \ 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \ 0 & 0 & 0 & 0 & c_{\text{indprod}, 1} & 0 & 0 & 0 & c_{\text{indprod}, 2} & 0 & 0 & 0 \ 0 & 0 & 0 & 0 & 0 & c_{\text{income}, 1} & 0 & 0 & 0 & c_{\text{income}, 2} & 0 & 0 \ 0 & 0 & 0 & 0 & 0 & 0 & c_{\text{sales}, 1} & 0 & 0 & 0 & c_{\text{sales}, 2} & 0 \ 0 & 0 & 0 & 0 & 0 & 0 & 0 & c_{\text{emp}, 1} & 0 & 0 & 0 & c_{\text{emp}, 2} \ 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \ 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \ 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \ 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \ \end{bmatrix} \begin{bmatrix} f_{t-1} \ f_{t-2} \ f_{t-3} \ f_{t-4} \ u_{\text{indprod}, t-1} \ u_{\text{income}, t-1} \ u_{\text{sales}, t-1} \ u_{\text{emp}, t-1} \ u_{\text{indprod}, t-2} \ u_{\text{income}, t-2} \ u_{\text{sales}, t-2} \ u_{\text{emp}, t-2} \ \end{bmatrix} + R \begin{bmatrix} \eta_t \ \varepsilon_{t} \end{bmatrix} $$ This model cannot be handled out-of-the-box by the DynamicFactor class, but it can be handled by creating a subclass when alters the state space representation in the appropriate way. First, notice that if we had set factor_order = 4, we would almost have what we wanted. In that case, the last line of the observation equation would be: $$ \begin{bmatrix} \vdots \ y_{\text{emp}, t} \ \end{bmatrix} = \begin{bmatrix} \vdots & & & & & & & & & & & \vdots \ \lambda_\text{emp,1} & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \ \end{bmatrix} \begin{bmatrix} f_t \ f_{t-1} \ f_{t-2} \ f_{t-3} \ \vdots \end{bmatrix} $$ and the first line of the transition equation would be: $$ \begin{bmatrix} f_t \ \vdots \end{bmatrix} = \begin{bmatrix} a_1 & a_2 & a_3 & a_4 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \ \vdots & & & & & & & & & & & \vdots \ \end{bmatrix} \begin{bmatrix} f_{t-1} \ f_{t-2} \ f_{t-3} \ f_{t-4} \ \vdots \end{bmatrix} + R \begin{bmatrix} \eta_t \ \varepsilon_{t} \end{bmatrix} $$ Relative to what we want, we have the following differences: In the above situation, the $\lambda_{\text{emp}, j}$ are forced to be zero for $j > 0$, and we want them to be estimated as parameters. We only want the factor to transition according to an AR(2), but under the above situation it is an AR(4). Our strategy will be to subclass DynamicFactor, and let it do most of the work (setting up the state space representation, etc.) where it assumes that factor_order = 4. The only things we will actually do in the subclass will be to fix those two issues. First, here is the full code of the subclass; it is discussed below. It is important to note at the outset that none of the methods defined below could have been omitted. In fact, the methods __init__, start_params, param_names, transform_params, untransform_params, and update form the core of all state space models in statsmodels, not just the DynamicFactor class. End of explanation """ # Create the model extended_mod = ExtendedDFM(endog) initial_extended_res = extended_mod.fit(maxiter=1000, disp=False) extended_res = extended_mod.fit(initial_extended_res.params, method='nm', maxiter=1000) print(extended_res.summary(separate_params=False)) """ Explanation: So what did we just do? __init__ The important step here was specifying the base dynamic factor model which we were operating with. In particular, as described above, we initialize with factor_order=4, even though we will only end up with an AR(2) model for the factor. We also performed some general setup-related tasks. start_params start_params are used as initial values in the optimizer. Since we are adding three new parameters, we need to pass those in. If we had not done this, the optimizer would use the default starting values, which would be three elements short. param_names param_names are used in a variety of places, but especially in the results class. Below we get a full result summary, which is only possible when all the parameters have associated names. transform_params and untransform_params The optimizer selects possibly parameter values in an unconstrained way. That's not usually desired (since variances cannot be negative, for example), and transform_params is used to transform the unconstrained values used by the optimizer to constrained values appropriate to the model. Variances terms are typically squared (to force them to be positive), and AR lag coefficients are often constrained to lead to a stationary model. untransform_params is used for the reverse operation (and is important because starting parameters are usually specified in terms of values appropriate to the model, and we need to convert them to parameters appropriate to the optimizer before we can begin the optimization routine). Even though we do not need to transform or untransform our new parameters (the loadings can in theory take on any values), we still need to modify this function for two reasons: The version in the DynamicFactor class is expecting 3 fewer parameters than we have now. At a minimum, we need to handle the three new parameters. The version in the DynamicFactor class constrains the factor lag coefficients to be stationary as though it was an AR(4) model. Since we actually have an AR(2) model, we need to re-do the constraint. We also set the last two autoregressive coefficients to be zero here. update The most important reason we need to specify a new update method is because we have three new parameters that we need to place into the state space formulation. In particular we let the parent DynamicFactor.update class handle placing all the parameters except the three new ones in to the state space representation, and then we put the last three in manually. End of explanation """ extended_res.plot_coefficients_of_determination(figsize=(8,2)); fig, ax = plt.subplots(figsize=(13,3)) # Compute the index extended_coincident_index = compute_coincident_index(extended_mod, extended_res) # Plot the factor dates = endog.index._mpl_repr() ax.plot(dates, coincident_index, '-', linewidth=1, label='Basic model') ax.plot(dates, extended_coincident_index, '--', linewidth=3, label='Extended model') ax.plot(usphci.index._mpl_repr(), usphci, label='USPHCI') ax.legend(loc='lower right') ax.set(title='Coincident indices, comparison') # Retrieve and also plot the NBER recession indicators ylim = ax.get_ylim() ax.fill_between(dates[:-3], ylim[0], ylim[1], rec.values[:-4,0], facecolor='k', alpha=0.1); """ Explanation: Although this model increases the likelihood, it is not preferred by the AIC and BIC measures which penalize the additional three parameters. Furthermore, the qualitative results are unchanged, as we can see from the updated $R^2$ chart and the new coincident index, both of which are practically identical to the previous results. End of explanation """