repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
cells
list
types
list
ES-DOC/esdoc-jupyterhub
notebooks/cccma/cmip6/models/sandbox-3/atmoschem.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Atmoschem\nMIP Era: CMIP6\nInstitute: CCCMA\nSource ID: SANDBOX-3\nTopic: Atmoschem\nSub-Topics: Transport, Emissions Concentrations, Gas Phase Chemistry, Stratospheric Heterogeneous Chemistry, Tropospheric Heterogeneous Chemistry, Photo Chemistry. \nProperties: 84 (39 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:53:47\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'cccma', 'sandbox-3', 'atmoschem')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --> Software Properties\n3. Key Properties --> Timestep Framework\n4. Key Properties --> Timestep Framework --> Split Operator Order\n5. Key Properties --> Tuning Applied\n6. Grid\n7. Grid --> Resolution\n8. Transport\n9. Emissions Concentrations\n10. Emissions Concentrations --> Surface Emissions\n11. Emissions Concentrations --> Atmospheric Emissions\n12. Emissions Concentrations --> Concentrations\n13. Gas Phase Chemistry\n14. Stratospheric Heterogeneous Chemistry\n15. Tropospheric Heterogeneous Chemistry\n16. Photo Chemistry\n17. Photo Chemistry --> Photolysis \n1. Key Properties\nKey properties of the atmospheric chemistry\n1.1. Model Overview\nIs Required: TRUE    Type: STRING    Cardinality: 1.1\nOverview of atmospheric chemistry model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE    Type: STRING    Cardinality: 1.1\nName of atmospheric chemistry model code.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Chemistry Scheme Scope\nIs Required: TRUE    Type: ENUM    Cardinality: 1.N\nAtmospheric domains covered by the atmospheric chemistry model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"troposhere\" \n# \"stratosphere\" \n# \"mesosphere\" \n# \"mesosphere\" \n# \"whole atmosphere\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.4. Basic Approximations\nIs Required: TRUE    Type: STRING    Cardinality: 1.1\nBasic approximations made in the atmospheric chemistry model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.basic_approximations') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.5. Prognostic Variables Form\nIs Required: TRUE    Type: ENUM    Cardinality: 1.N\nForm of prognostic variables in the atmospheric chemistry component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"3D mass/mixing ratio for gas\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.6. Number Of Tracers\nIs Required: TRUE    Type: INTEGER    Cardinality: 1.1\nNumber of advected tracers in the atmospheric chemistry model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "1.7. Family Approach\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nAtmospheric chemistry calculations (not advection) generalized into families of species?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.family_approach') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "1.8. Coupling With Chemical Reactivity\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nAtmospheric chemistry transport scheme turbulence is couple with chemical reactivity?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "2. Key Properties --> Software Properties\nSoftware properties of aerosol code\n2.1. Repository\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nLocation of code for this component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.2. Code Version\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nCode version identifier.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.3. Code Languages\nIs Required: FALSE    Type: STRING    Cardinality: 0.N\nCode language(s).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3. Key Properties --> Timestep Framework\nTimestepping in the atmospheric chemistry model\n3.1. Method\nIs Required: TRUE    Type: ENUM    Cardinality: 1.1\nMathematical method deployed to solve the evolution of a given variable", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Operator splitting\" \n# \"Integrated\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3.2. Split Operator Advection Timestep\nIs Required: FALSE    Type: INTEGER    Cardinality: 0.1\nTimestep for chemical species advection (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.3. Split Operator Physical Timestep\nIs Required: FALSE    Type: INTEGER    Cardinality: 0.1\nTimestep for physics (in seconds).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.4. Split Operator Chemistry Timestep\nIs Required: FALSE    Type: INTEGER    Cardinality: 0.1\nTimestep for chemistry (in seconds).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.5. Split Operator Alternate Order\nIs Required: FALSE    Type: BOOLEAN    Cardinality: 0.1\n?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "3.6. Integrated Timestep\nIs Required: TRUE    Type: INTEGER    Cardinality: 1.1\nTimestep for the atmospheric chemistry model (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.7. Integrated Scheme Type\nIs Required: TRUE    Type: ENUM    Cardinality: 1.1\nSpecify the type of timestep scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Implicit\" \n# \"Semi-implicit\" \n# \"Semi-analytic\" \n# \"Impact solver\" \n# \"Back Euler\" \n# \"Newton Raphson\" \n# \"Rosenbrock\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "4. Key Properties --> Timestep Framework --> Split Operator Order\n**\n4.1. Turbulence\nIs Required: FALSE    Type: INTEGER    Cardinality: 0.1\nCall order for turbulence scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "4.2. Convection\nIs Required: FALSE    Type: INTEGER    Cardinality: 0.1\nCall order for convection scheme This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "4.3. Precipitation\nIs Required: FALSE    Type: INTEGER    Cardinality: 0.1\nCall order for precipitation scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "4.4. Emissions\nIs Required: FALSE    Type: INTEGER    Cardinality: 0.1\nCall order for emissions scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "4.5. Deposition\nIs Required: FALSE    Type: INTEGER    Cardinality: 0.1\nCall order for deposition scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "4.6. Gas Phase Chemistry\nIs Required: FALSE    Type: INTEGER    Cardinality: 0.1\nCall order for gas phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "4.7. Tropospheric Heterogeneous Phase Chemistry\nIs Required: FALSE    Type: INTEGER    Cardinality: 0.1\nCall order for tropospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "4.8. Stratospheric Heterogeneous Phase Chemistry\nIs Required: FALSE    Type: INTEGER    Cardinality: 0.1\nCall order for stratospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "4.9. Photo Chemistry\nIs Required: FALSE    Type: INTEGER    Cardinality: 0.1\nCall order for photo chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "4.10. Aerosols\nIs Required: FALSE    Type: INTEGER    Cardinality: 0.1\nCall order for aerosols scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5. Key Properties --> Tuning Applied\nTuning methodology for atmospheric chemistry component\n5.1. Description\nIs Required: TRUE    Type: STRING    Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Global Mean Metrics Used\nIs Required: FALSE    Type: STRING    Cardinality: 0.N\nList set of metrics of the global mean state used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.3. Regional Metrics Used\nIs Required: FALSE    Type: STRING    Cardinality: 0.N\nList of regional metrics of mean state used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.4. Trend Metrics Used\nIs Required: FALSE    Type: STRING    Cardinality: 0.N\nList observed trend metrics used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6. Grid\nAtmospheric chemistry grid\n6.1. Overview\nIs Required: TRUE    Type: STRING    Cardinality: 1.1\nDescribe the general structure of the atmopsheric chemistry grid", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.grid.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.2. Matches Atmosphere Grid\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\n* Does the atmospheric chemistry grid match the atmosphere grid?*", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "7. Grid --> Resolution\nResolution in the atmospheric chemistry grid\n7.1. Name\nIs Required: TRUE    Type: STRING    Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.grid.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. Canonical Horizontal Resolution\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.3. Number Of Horizontal Gridpoints\nIs Required: FALSE    Type: INTEGER    Cardinality: 0.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "7.4. Number Of Vertical Levels\nIs Required: FALSE    Type: INTEGER    Cardinality: 0.1\nNumber of vertical levels resolved on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "7.5. Is Adaptive Grid\nIs Required: FALSE    Type: BOOLEAN    Cardinality: 0.1\nDefault is False. Set true if grid resolution changes during execution.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "8. Transport\nAtmospheric chemistry transport\n8.1. Overview\nIs Required: TRUE    Type: STRING    Cardinality: 1.1\nGeneral overview of transport implementation", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.transport.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Use Atmospheric Transport\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs transport handled by the atmosphere, rather than within atmospheric cehmistry?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "8.3. Transport Details\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nIf transport is handled within the atmospheric chemistry scheme, describe it.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.transport.transport_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Emissions Concentrations\nAtmospheric chemistry emissions\n9.1. Overview\nIs Required: TRUE    Type: STRING    Cardinality: 1.1\nOverview atmospheric chemistry emissions", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.emissions_concentrations.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Emissions Concentrations --> Surface Emissions\n**\n10.1. Sources\nIs Required: FALSE    Type: ENUM    Cardinality: 0.N\nSources of the chemical species emitted at the surface that are taken into account in the emissions scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Vegetation\" \n# \"Soil\" \n# \"Sea surface\" \n# \"Anthropogenic\" \n# \"Biomass burning\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.2. Method\nIs Required: FALSE    Type: ENUM    Cardinality: 0.N\nMethods used to define chemical species emitted directly into model layers above the surface (several methods allowed because the different species may not use the same method).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Climatology\" \n# \"Spatially uniform mixing ratio\" \n# \"Spatially uniform concentration\" \n# \"Interactive\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.3. Prescribed Climatology Emitted Species\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nList of chemical species emitted at the surface and prescribed via a climatology, and the nature of the climatology (E.g. CO (monthly), C2H6 (constant))", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10.4. Prescribed Spatially Uniform Emitted Species\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nList of chemical species emitted at the surface and prescribed as spatially uniform", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10.5. Interactive Emitted Species\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nList of chemical species emitted at the surface and specified via an interactive method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10.6. Other Emitted Species\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nList of chemical species emitted at the surface and specified via any other method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11. Emissions Concentrations --> Atmospheric Emissions\nTO DO\n11.1. Sources\nIs Required: FALSE    Type: ENUM    Cardinality: 0.N\nSources of chemical species emitted in the atmosphere that are taken into account in the emissions scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Aircraft\" \n# \"Biomass burning\" \n# \"Lightning\" \n# \"Volcanos\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "11.2. Method\nIs Required: FALSE    Type: ENUM    Cardinality: 0.N\nMethods used to define the chemical species emitted in the atmosphere (several methods allowed because the different species may not use the same method).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Climatology\" \n# \"Spatially uniform mixing ratio\" \n# \"Spatially uniform concentration\" \n# \"Interactive\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "11.3. Prescribed Climatology Emitted Species\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nList of chemical species emitted in the atmosphere and prescribed via a climatology (E.g. CO (monthly), C2H6 (constant))", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.4. Prescribed Spatially Uniform Emitted Species\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nList of chemical species emitted in the atmosphere and prescribed as spatially uniform", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.5. Interactive Emitted Species\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nList of chemical species emitted in the atmosphere and specified via an interactive method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.6. Other Emitted Species\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nList of chemical species emitted in the atmosphere and specified via an "other method"", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12. Emissions Concentrations --> Concentrations\nTO DO\n12.1. Prescribed Lower Boundary\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nList of species prescribed at the lower boundary.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12.2. Prescribed Upper Boundary\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nList of species prescribed at the upper boundary.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Gas Phase Chemistry\nAtmospheric chemistry transport\n13.1. Overview\nIs Required: TRUE    Type: STRING    Cardinality: 1.1\nOverview gas phase atmospheric chemistry", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13.2. Species\nIs Required: FALSE    Type: ENUM    Cardinality: 0.N\nSpecies included in the gas phase chemistry scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"HOx\" \n# \"NOy\" \n# \"Ox\" \n# \"Cly\" \n# \"HSOx\" \n# \"Bry\" \n# \"VOCs\" \n# \"isoprene\" \n# \"H2O\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.3. Number Of Bimolecular Reactions\nIs Required: TRUE    Type: INTEGER    Cardinality: 1.1\nThe number of bi-molecular reactions in the gas phase chemistry scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "13.4. Number Of Termolecular Reactions\nIs Required: TRUE    Type: INTEGER    Cardinality: 1.1\nThe number of ter-molecular reactions in the gas phase chemistry scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "13.5. Number Of Tropospheric Heterogenous Reactions\nIs Required: TRUE    Type: INTEGER    Cardinality: 1.1\nThe number of reactions in the tropospheric heterogeneous chemistry scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "13.6. Number Of Stratospheric Heterogenous Reactions\nIs Required: TRUE    Type: INTEGER    Cardinality: 1.1\nThe number of reactions in the stratospheric heterogeneous chemistry scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "13.7. Number Of Advected Species\nIs Required: TRUE    Type: INTEGER    Cardinality: 1.1\nThe number of advected species in the gas phase chemistry scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "13.8. Number Of Steady State Species\nIs Required: TRUE    Type: INTEGER    Cardinality: 1.1\nThe number of gas phase species for which the concentration is updated in the chemical solver assuming photochemical steady state", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "13.9. Interactive Dry Deposition\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "13.10. Wet Deposition\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs wet deposition included? Wet deposition describes the moist processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "13.11. Wet Oxidation\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs wet oxidation included? Oxidation describes the loss of electrons or an increase in oxidation state by a molecule", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "14. Stratospheric Heterogeneous Chemistry\nAtmospheric chemistry startospheric heterogeneous chemistry\n14.1. Overview\nIs Required: TRUE    Type: STRING    Cardinality: 1.1\nOverview stratospheric heterogenous atmospheric chemistry", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.2. Gas Phase Species\nIs Required: FALSE    Type: ENUM    Cardinality: 0.N\nGas phase species included in the stratospheric heterogeneous chemistry scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Cly\" \n# \"Bry\" \n# \"NOy\" \n# TODO - please enter value(s)\n", "14.3. Aerosol Species\nIs Required: FALSE    Type: ENUM    Cardinality: 0.N\nAerosol species included in the stratospheric heterogeneous chemistry scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sulphate\" \n# \"Polar stratospheric ice\" \n# \"NAT (Nitric acid trihydrate)\" \n# \"NAD (Nitric acid dihydrate)\" \n# \"STS (supercooled ternary solution aerosol particule))\" \n# TODO - please enter value(s)\n", "14.4. Number Of Steady State Species\nIs Required: TRUE    Type: INTEGER    Cardinality: 1.1\nThe number of steady state species in the stratospheric heterogeneous chemistry scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14.5. Sedimentation\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs sedimentation is included in the stratospheric heterogeneous chemistry scheme or not?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "14.6. Coagulation\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs coagulation is included in the stratospheric heterogeneous chemistry scheme or not?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "15. Tropospheric Heterogeneous Chemistry\nAtmospheric chemistry tropospheric heterogeneous chemistry\n15.1. Overview\nIs Required: TRUE    Type: STRING    Cardinality: 1.1\nOverview tropospheric heterogenous atmospheric chemistry", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.2. Gas Phase Species\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nList of gas phase species included in the tropospheric heterogeneous chemistry scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.3. Aerosol Species\nIs Required: FALSE    Type: ENUM    Cardinality: 0.N\nAerosol species included in the tropospheric heterogeneous chemistry scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sulphate\" \n# \"Nitrate\" \n# \"Sea salt\" \n# \"Dust\" \n# \"Ice\" \n# \"Organic\" \n# \"Black carbon/soot\" \n# \"Polar stratospheric ice\" \n# \"Secondary organic aerosols\" \n# \"Particulate organic matter\" \n# TODO - please enter value(s)\n", "15.4. Number Of Steady State Species\nIs Required: TRUE    Type: INTEGER    Cardinality: 1.1\nThe number of steady state species in the tropospheric heterogeneous chemistry scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15.5. Interactive Dry Deposition\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "15.6. Coagulation\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs coagulation is included in the tropospheric heterogeneous chemistry scheme or not?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "16. Photo Chemistry\nAtmospheric chemistry photo chemistry\n16.1. Overview\nIs Required: TRUE    Type: STRING    Cardinality: 1.1\nOverview atmospheric photo chemistry", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.photo_chemistry.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16.2. Number Of Reactions\nIs Required: TRUE    Type: INTEGER    Cardinality: 1.1\nThe number of reactions in the photo-chemistry scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "17. Photo Chemistry --> Photolysis\nPhotolysis scheme\n17.1. Method\nIs Required: TRUE    Type: ENUM    Cardinality: 1.1\nPhotolysis scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Offline (clear sky)\" \n# \"Offline (with clouds)\" \n# \"Online\" \n# TODO - please enter value(s)\n", "17.2. Environmental Conditions\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nDescribe any environmental conditions taken into account by the photolysis scheme (e.g. whether pressure- and temperature-sensitive cross-sections and quantum yields in the photolysis calculations are modified to reflect the modelled conditions.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
chengsoonong/didbits
CrossVal/expt_protocol.ipynb
apache-2.0
[ "A results oriented view: double loop validation\nModel selection is one of the basic skills needed by a machine learning practitioner. This notebook introduces a frequentist approach for model selection based on cross validation. Instead of considering the standard perspective of using a \"double loop\" for training and prediction of classifiers, we consider an alternative perspective of identifying the results that need to be produced.\nWe consider the question of \"Which classifier is better on my favourite datasets?\"\nAs an example, we use three well known datasets: ionosphere, bupa, pima; which are binary classification tasks. We will use two classifiers from scikit-learn, the support vector machine (SVM) and the random forest (RF).\nSetting up python environment (do not use pylab)", "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n%matplotlib inline", "Cross validation and hold out sets\nValidation is a popular method for estimating generalisation error in machine learning. There are many resources available that describe it (for example this FAQ), and why it is important to optimise for generalisation error and not training error. Please look these up if you are not familiar with this concept. The key idea is that we need to set some data aside that was not used for estimating the model so that we can properly measure the performance of our classifier. In particular we will use two strategies for keeping data aside:\n\nsplit sample or hold out validation\ncross validation\n\nIn split sample validation, a random subset of the data is held out for testing and the remaining data is used for training. To obtain multiple validation splits, the process is just repeated. For cross validation (for example 5 fold cross validation), the data is split into 5 pieces, and 4 of the pieces are used for training and 1 piece is used for validation. By considering each of the 5 pieces as a validation set in turn, we obtain 5 training/test splits that give us 5 estimates of validation error.\nConventions used in this notebook\nThe problem of model selection\nMachine learning algorithms often have parameters that need to be tuned. In the example below, we consider the support vector machine, where we have to tune the regularisation parameter $C$ and the Gaussian kernel bandwidth $\\sigma$. How do we choose the best parameters? Well, by checking which parameters performs best on a validation set! If we only have one layer of validation, then we no longer have any data left to estimate the performance of this classifier with the best parameters.\nTraining, validation and test sets\nIt quickly becomes confusing when we think about how to do model selection. When reading literature about this issue, different words are used. For convenience, we will refer to the data that is used to train a classifier as training set, the data used for model selection as validation set, and the data used to report the final performance of the classifier as test set.\nA recommended double loop strategy\nWe will use random splits to separate data into (training, validation) and (test) sets, and use cross validation for the \"inner loop\" of model selection that splits data into the (training) and (validation) sets. This makes the variable naming easier as we can refer to the iterates of the outer loop as splits, and the inner loop as folds.\nComparing SVM and random forest\nWe would like to see whether the SVM is better or the random forest is better on the ionosphere data. We do so using the following experimental protocol:\n\n10 random splits, with 30% of the data reserved for testing\n5 fold cross validation to tune hyperparameters\naccuracy as a performance measure\n\nIn our imaginary paper, we would like to have a figure containing boxplots for a particular dataset.", "from IPython.display import Image\nImage('dummy_perf.png')", "The (imaginary) data used to generate this plot would be a matrix with two columns, one each for SVM and RF. Each column would contain the accuracy for each random split of the data. So for 10 splits, this means we have 10 rows. Note that this is only for one single dataset, and we would have 6 columns in total since we are considering three datasets. Pandas provides a very useful concept of DataFrames, which would be familiar to people used to R.", "datasets = ['bupa','ionosphere','pima']\nmethods = ['rf','svm']\nnum_splits = 10\nsplit_idx = range(num_splits)\ncol_idx = pd.MultiIndex.from_product([datasets, methods], names=['dataset','method'])\n# Create a dataframe with col_idx columns and split_idx rows\naccuracy = pd.DataFrame(columns=col_idx, index=split_idx)\naccuracy.index.name = 'split'\nprint(accuracy)", "The SVM has a regularisation parameter that needs to be set, and potentially its kernel also has parameters. As mentioned before, this is done using an inner loop. What is the information that is needed? For each split and dataset, we need to store the best (hyper)parameters of the SVM. Note that different parameters may be best for different splits of the data.", "params = ['C','sigma']\ncol_idx = pd.MultiIndex.from_product([datasets, params], names=['dataset','param'])\nbest_param = pd.DataFrame(columns=col_idx, index=split_idx)\nbest_param.index.name = 'split'\nprint(best_param)", "What needs to be done to find these best parameters? We need to fill in a table that contains the accuracy of five fold cross validation for each combination of parameters. Note that we only need to do this for SVM", "Cs = [0.1, 1., 10.0]\nsigmas = [1e-2, 1e-1]\nnum_cv = 5\nfolds = range(5)\ncol_idx = pd.MultiIndex.from_product([datasets, Cs, sigmas], names=['dataset','C','sigma'])\nrow_idx = pd.MultiIndex.from_product([split_idx, folds], names=['split','fold'])\ncv_accuracy = pd.DataFrame(columns=col_idx, index=row_idx)\nprint(cv_accuracy.head(13))", "That is all there is for the conceptual part of the double loop procedure for doing model selection with the inner loop and then using the best parameters for each split to estimate the performance on the test set. Therefore all we need to do now is implement the code to fill in the values in the dataframes appropriately.\nExperimental protocol (implementation details)\nImplementation of data splitting for validation\nRecall that for split sample validation, a random subset of the data is held out for testing and the remaining data is used for training. To obtain multiple validation splits, the process is just repeated. For cross validation (for example 5 fold cross validation), the data is split into 5 pieces, and 4 of the pieces are used for training and 1 piece is used for validation. By considering each of the 5 pieces as a validation set in turn, we obtain 5 training/test splits that give us 5 estimates of validation error.", "def random_split(orig_idx, split_idx, frac_train=0.7):\n \"\"\"Returns training and prediction indices which are subsets of orig_idx.\n split_idx is for reproducibility of the random permutation.\n \n np.random.RandomState is not portable between machines.\n \"\"\"\n prng = np.random.RandomState(split_idx)\n num_data = len(orig_idx)\n num_train = int(np.round(frac_train*num_data))\n perm_idx = prng.permutation(num_data)\n train_idx = orig_idx[perm_idx[:num_train]]\n pred_idx = orig_idx[perm_idx[num_train:]]\n return train_idx, pred_idx\n\ndef cross_val(orig_idx, fold, num_cv=5):\n \"\"\"Returns training and prediction indices of cross validation,\n for fold 'fold' in num_cv.\n \"\"\"\n num_data = len(orig_idx)\n train = []\n pred = []\n for ix in range(num_data):\n if ix % num_cv == fold:\n pred.append(ix)\n else:\n train.append(ix)\n train_idx = orig_idx[np.array(train)]\n pred_idx = orig_idx[np.array(pred)]\n return train_idx, pred_idx\n\norig_idx = np.arange(20, dtype=int)\nprint(random_split(orig_idx, 1))\nprint(random_split(orig_idx, 4))\nprint(random_split(orig_idx, 1))\nprint(cross_val(orig_idx, 0))\nprint(cross_val(orig_idx, 3))", "A recommended double loop strategy\nWe will use random splits to separate data into (training, validation) and (test) sets, and use cross validation for the \"inner loop\" of model selection that splits data into the (training) and (validation) sets.", "# Split data into (training,validation) and (test) sets\nsplit_train_val, split_test = random_split(orig_idx, 1)\n# First fold of the cross validation\ntrain_idx, pred_idx = cross_val(split_train_val,0)\nprint(train_idx, pred_idx)\n# Second fold of the cross validation\ntrain_idx, pred_idx = cross_val(split_train_val,1)\nprint(train_idx, pred_idx)\n", "Interface to classifier\nWe have hidden away the complexity of the classifier in a separate file. In particular, there is only one common interface, which is a train_and_predict function. All this function needs to know is the dataset under consideration, the index of the training and prediction instances, the classifier to be used, and hyperparameter values (if any).\ndef train_and_predict(data_name, train_idx, pred_idx, method, C=None, sigma=None, verbose=False):\n \"\"\"Work out which classifier to call on data_name.\n Then train the classifier, and predict on the validation set.\n\n return the accuracy of the classifier on the prediction set.\n \"\"\"\n\nThe actual code that does the computations\nWe summarise the setup that we have:\n\nmanage the indices of examples to be used for training and prediction\nan interface to train a classifer and predict on the prediction set\n\nFor random forest, there is no need for any hyperparameter selection, so we can directly fill in the final accuracy results.", "from classifier import train_and_predict\n\nidx = pd.IndexSlice\nnum_ex = {'bupa': 345, 'ionosphere': 351, 'pima': 768}\nfor data_name in datasets:\n orig_idx = np.arange(num_ex[data_name], dtype=int)\n for split in split_idx:\n train_idx, pred_idx = random_split(orig_idx, split)\n acc = train_and_predict(data_name, train_idx, pred_idx, 'rf')\n accuracy.loc[split,idx[data_name,'rf']] = acc\nprint(accuracy)", "The SVM requires an inner loop for finding the best parameters", "for data_name in datasets:\n orig_idx = np.arange(num_ex[data_name], dtype=int)\n for split in split_idx:\n tv_idx, test_idx = random_split(orig_idx, split)\n for fold in folds:\n train_idx, pred_idx = cross_val(tv_idx, fold)\n for C in Cs:\n for sigma in sigmas:\n acc = train_and_predict(data_name, train_idx, pred_idx, 'svm', C=C, sigma=sigma)\n cv_accuracy.loc[idx[split,fold],idx[data_name,C,sigma]] = acc\nprint(cv_accuracy.head())", "Now that we have all the values for the inner loop, we can collect the best parameters for each split.", "def find_best_param(cv_accuracy, dataset, split):\n \"\"\"Given the (inner) cross validation accuracies for all parameters,\n find the parameters that give the best average performance.\n \"\"\"\n idx = pd.IndexSlice\n cv = cv_accuracy.loc[idx[split],idx[dataset]]\n avg_cv = cv.mean(axis=0)\n best_param = avg_cv.idxmax()\n return best_param\n\n\nfor data_name in datasets:\n for split in split_idx:\n C, sigma = find_best_param(cv_accuracy, data_name, split)\n best_param.loc[split, idx[data_name,'C']] = C\n best_param.loc[split, idx[data_name,'sigma']] = sigma\nprint(best_param)", "Note that different splits could have different optimal parameters. As an important aside, it is good to reflect on what conclusions you can draw by looking at the boxplots of accuracy. There are really multiple classifiers here, and not just one single SVM. Not only are the training data different, but the parameters are different too. Now we are ready to train the final SVMs.", "for data_name in datasets:\n orig_idx = np.arange(num_ex[data_name], dtype=int)\n for split in split_idx:\n train_idx, pred_idx = random_split(orig_idx, split)\n C = best_param.loc[split, idx[data_name,'C']]\n sigma = best_param.loc[split, idx[data_name,'sigma']]\n acc = train_and_predict(data_name, train_idx, pred_idx, 'svm', C=C, sigma=sigma)\n accuracy.loc[split,idx[data_name,'svm']] = acc\nprint(accuracy)", "That's it!. The results of the experiment are in the above table. We can visualise this table as boxplots.", "fig, axes = plt.subplots(1, 3, sharey=True, figsize=(8,6))\naxes[0].set_ylabel('Accuracy')\nfor ix,data_name in enumerate(datasets):\n ax = axes[ix]\n acc = accuracy.loc[:,data_name].values\n ax.boxplot([list(acc[:,0]), list(acc[:,1])])\n ax.set_xticklabels(methods, rotation=60)\n ax.set_title(data_name)\n ax.set_ylim(bottom=0.55, top=1.)\n ax.yaxis.grid(True)\nfig.suptitle('Performance comparison of SVM and random forest')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
beralt85/current_cumulants
kinesin.ipynb
mit
[ "Script for the analysis of kinesin models (See Ref. [1])\nReferences\n[1] Fluctuating Currents in Stochastic Thermodynamics II. Energy Conversion and Nonequilibrium Response in Kinesin Models. Altaner, Wachtel and Vollmer (2015)\n[2] Non-equilibrium fluctuations and mechanochemical couplings of a molecular motor. Lau, Lacoste, and Mallick, Phys. Rev. Lett., 99, 158102 (2007)\n[3] Kinesin's Network of Chemomechanical Motor Cycles. Liepelt and Lipowsky. Phys. Rev. Lett. 98, 258102 (2007)\nSet-up jupyter (ipython-notebook) and sympy libraries", "# inline plotting/interaction\n%pylab inline \n\nfrom sympy import * # symbolic python\ninit_printing() # pretty printing\n\nimport numpy as np # numeric python\n\nimport time # timing, for performance monitoring\n\n# activate latex text rendering\nfrom matplotlib import rc\nrc('text', usetex=True)\n\n# use matplotlib for plotting\nimport matplotlib.pyplot as plt \nfrom pylab import meshgrid,cm,imshow,contour,clabel,colorbar,axis,title,show", "Initialize the kinesin model(s)\nOverview of libraries:\n\nmodels.py: contains the information about several kinesin models\ncumulants.py: implements the algorithm described in [1]\nlambdification.py: simplifies and transforms sympy expressions obtained for the models to lambda functions which are used to calculate numerical values", "# The lambda functions for the expressions discussed and analyzed in Ref. [1]\nfrom lambdification import lau, ll \n\n# velocity, diffusion and normalized mechanical response for the kinesin model of Ref. [2]\nvelocityLa, diffusionLa, tmechLa = lau() # \n\n# velocity, hydrolysis rate, etc. for the 6-state [3] and 4-state [2] kinesin models\n# NOTE: quick=False yields expressions that are numerically more stable, but take much more time and memory\nvel6, hyd6, dif6, coupling6, invfano6, tmech6, \\\nvel4, hyd4, dif4, coupling4, invfano4, tmech4, \\\nvel_relerr, hyd_relerr, dif_relerr = ll(quick=True) ", "Set-up plot range and resolution\nFull parameter space (Figs. 5 -- 9 in Ref. [1])", "[fmin, fmax, mumin, mumax] = [-30,30,-30,30] # boundaries of full phase diagram\nresolution = 400 # plot resolution\n\nplotarea = [fmin, fmax, mumin, mumax] # full plotarea\n\n# prepare the plotting grid for kinesin 6 figures\nxpts = linspace(fmin, fmax, resolution)\nypts = linspace(mumin, mumax, resolution)\nX, Y = meshgrid(xpts, ypts)\n", "Zoomed parameter space (Fig. 10 in Ref [1])", "[fmin, fmax, mumin, mumax] = [0,20,5,30] # boundaries of zoomed phase diagram\nresolution = 400 # plot resolution\n\nzoomarea = [fmin, fmax, mumin, mumax] # zoomed plotarea\n\n# prepare the plotting grid for kinesin 6 figures\nxpts = linspace(fmin, fmax, resolution)\nypts = linspace(mumin, mumax, resolution)\nXX, YY = meshgrid(xpts, ypts)\n", "Use the lambda functions to create data arrays\nLambda function evaluate the expressions at every point in the plotting grid", "# Full plot area\nV6 = vel6(X,Y)\nlog_V6 = (np.log( np.abs( V6 ))/np.log(10))\nH6 = hyd6(X,Y)\nlog_H6 = (np.log( np.abs( H6 ))/np.log(10))\nD6 = dif6(X,Y)\nlog_D6 = (np.log( np.abs( D6 ))/np.log(10))\nQTC6 = coupling6(X,Y)\nF6 = invfano6(X,Y)\nT6 = tmech6(X,Y)\n\nV_relerr = vel_relerr(X,Y)\nH_relerr = hyd_relerr(X,Y)\nD_relerr = dif_relerr(X,Y)\n\n# Zoomed plot area (comparison plot Fig. 10)\nVLa_comp = velocityLa(XX,YY)\nTLa_comp = tmechLa(XX,YY)\n\nV6_comp = vel6(XX,YY)\nT6_comp = tmech6(XX,YY)", "General Figure Settings", "## Setup for Figures (for instance for talks, slides, posters)\n\nfig_size = (12,10) # in inch\nfs = 25 # font size\n\nfont = {'family' : 'serif',\n 'color' : 'black',\n 'weight' : 'normal',\n 'size' : fs,\n }\n\nfigdir = \"plotdata/\"\n\ndef pplot(g, t, logplot=True, highlight=0, crop=[], numerical=False):\n fig = figure(figsize=fig_size)\n \n if(numerical):\n G = g\n GG = G\n ccmm = cm.YlOrBr\n elif(logplot):\n G = np.log(np.abs(g(X,Y)))/np.log(10)\n GG = g(X,Y)\n ccmm = cm.gist_earth\n else:\n G = g(X,Y)\n GG = G\n ccmm = cm.coolwarm\n\n # the slicing parameter [::-1] reverses the y-axis before plotting\n \n im = imshow( G[::-1], cmap=ccmm, extent=plotarea ) # drawing the function\n if(len(crop)==2):\n im.set_clim(vmin=crop[0], vmax=crop[1])\n \n # adding the Contour lines with labels\n if( not numerical):\n cset1 = contour( X,Y, G, arange(-20,20,1),linewidths=1,linestyles=\"-\",colors='black')\n stalling = contour( X,Y, GG, [highlight], linewidths=3,linestyles=\"-\",colors='white')\n \n # adding the colobar on the right\n cb = colorbar(im)\n \n # latex fashion title\n title(t, fontdict=font)\n xlabel('$f$', fontdict=font)\n ylabel(r'$\\Delta \\mu$', fontdict=font) # r'\\newcommand' is a raw python string, meaning \\n is not replaced by a newline, etc.\n \n #savefig(figdir+t+\".png\")\n return(fig)\n", "Create some actual plots\nSix-state model of Ref [2]", "# Some exemplary figures\n\npplot(vel6,\"Velocity 6-state (Fig. 5b)\", crop=[-20,2])\npplot(dif6,\"Diffusion 6-state (Fig. 7a)\", crop=[-20,2])\npplot(coupling6,\"Current ratio 6-state (Fig. 5c)\",logplot=False, crop=[-2,2], highlight=1)\npplot(tmech6,\"Mechanical Response 6-state (Fig. 8)\",logplot=False, crop=[-2,2],highlight=1)\n\n# Efficiency of energy conversion, see Fig. 6\nP_mech = X*vel6(X,Y) # mechanical power\nP_chem = Y*hyd6(X,Y) # chemical power\n\ninversion = np.sign(P_mech)*np.sign(P_chem)\neff = P_mech/P_chem\nEFF = np.minimum((eff)**inversion,(eff)**(-inversion))\n\npplot(EFF,\"Efficiency\",crop=[0,1],numerical=True)\nprint(\"Done\")", "Corresponding plots in four-state model of Ref [1] (Figures are not in the paper, only relative errors are shown in Fig. 9)", "pplot(vel4,\"Velocity 4-state\", crop=[-20,2])\npplot(dif4,\"Diffusion 4-state\", crop=[-20,2])\npplot(coupling4,\"Current ratio 4-state\",logplot=False, crop=[-2,2], highlight=1)\npplot(tmech4,\"Normalized mechanical response 4-state\",logplot=False, crop=[-2,2],highlight=1)\nprint(\"Done\")", "Some plots for the model of Ref [3] (Note the deviation to the models above in physically unrealistic parameter regions)", "pplot(velocityLa,\"Velocity Lau et. al. (2007)\", crop=[-20,2])\npplot(diffusionLa,\"Diffusion Lau et. al. (2007)\", crop=[-20,2])\npplot(tmechLa,\"Mechanical Response Lau et. al. (2007)\",logplot=False, crop=[-2,2],highlight=1)\nprint(\"Done\")", "Code that generates Fig. 10 in Ref. [1]", "# Colors\ntango_chameleon3 = \"#4e9a06\"\ntango_sky3 = \"#204a87\"\n\n#Fonts\noverlay = {'family' : 'serif',\n 'color' : 'white',\n 'weight' : 'normal',\n 'size' : 55,\n }\n\na=1.5 # scaling factor: figure size/font size\n\nf = figure(figsize=(3.5*a,1.8*a))\ngs = GridSpec(100,100)\n\n\naxLL = f.add_subplot(gs[:,0:43])\nim = axLL.imshow( T6_comp[::-1], cmap=cm.coolwarm, extent=zoomarea, aspect=1 ) # drawing the function\n# adding the Contour lines with labels\nstalling = contour( XX,YY, V6_comp, [0], linewidths=1.4,linestyles=\"-\",colors='white')\n\nFDR = contour( XX,YY, T6_comp , [1], linewidths=1,linestyles=\"--\",colors=tango_chameleon3)\nfor c in FDR.collections:\n c.set_dashes([(0, (6, 3))])\nNDR = contour( XX,YY, T6_comp , [0], linewidths=1,linestyles=\"--\",colors=tango_sky3)\nfor c in NDR.collections:\n c.set_dashes([(0, (1.0, 1.0))])\n\n#axLL.set_title(r'Liepelt, Lipowsky,' '\\n' r'Phys. Rev. Lett. \\textbf{98} (2007)', fontsize=5)\ntext(.5, 27, r\"Liepelt \\& Lipowsky\" '\\n' r\"Phys.~Rev.~Lett.~\\textbf{98} (2007)\", fontdict=overlay,fontsize=6,color='black') # u'Unícôdè ∫triŋs'\n\n\nxlabel('$f$', labelpad = 2)\nylabel(r'$\\Delta\\mu$', labelpad=2) #labelpad: move label closer to axis\n\nim.set_clim(vmin=-2, vmax=2)\n\n\naxLa = f.add_subplot(gs[:,32:100])\nim = axLa.imshow( TLa_comp [::-1], cmap=cm.coolwarm, extent=zoomarea, aspect=1 ) # drawing the function\n# adding the Contour lines with labels\nstalling = contour( XX,YY, VLa_comp, [0], linewidths=1.4,linestyles=\"-\",colors='white')\nFDR = contour( XX,YY, TLa_comp , [1], linewidths=1,linestyles=\"--\",colors=tango_chameleon3)\nfor c in FDR.collections:\n c.set_dashes([(0, (6, 3))])\nNDR = contour( XX,YY, TLa_comp , [0], linewidths=1,linestyles=\"--\",colors=tango_sky3)\nfor c in NDR.collections:\n c.set_dashes([(0, (1.0, 1.0))])\ntext(.5, 27, r\"Lau, Lacoste \\& Mallick\" '\\n' r\"Phys.~Rev.~Lett.~\\textbf{99} (2007)\", fontdict=overlay,fontsize=6,color='black') # u'Unícôdè ∫triŋs'\n\n## Modify labels\nsetp( axLa.get_yticklabels(), visible=False) # hide y labels on La plot\n#axLa.set_xticks(arange(5,25,5)) # don't display zero\naxLL.set_xticks(arange(0,20,5)) # don't display \"20\"\n\nxlabel('$f$', labelpad = 2)\nim.set_clim(vmin=-2, vmax=2)\n\n\ncb=colorbar(im,ticks=arange(-2,2.1,.5))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
junhwanjang/DataSchool
Lecture/08. 기초 확률론 2 - 확률분포/4) 누적 분포 함수와 확률 밀도 함수.ipynb
mit
[ "%install_ext https://raw.githubusercontent.com/meduz/ipython_magics/master/tikzmagic.py\n\n%load_ext tikzmagic", "누적 분포 함수와 확률 밀도 함수\n누적 분포 함수(cumulative distribution function)와 확률 밀도 함수(probabiligy density function)는 확률 변수의 분포 즉, 확률 분포를 수학적으로 정의하기 위한 수식이다.\n확률 분포의 묘사\n확률의 정의에서 확률은 사건(event)이라는 표본의 집합에 대해 할당된 숫자라고 하였다. 데이터 분석을 하려면 확률이 구체적으로 어떻게 할당되었는지를 묘사(describe)하거 전달(communicate)해야 할 필요가 있다. 어떤 사건에 어느 정도의 확률이 할당되었는지를 묘사한 것을 확률 분포(distribution)라고 한다.\n확률 분포를 묘사하기 위해서는 모든 사건(event)들을 하나 하나 제시하고 거기에 할당된 숫자를 보여야 하기 때문에 확률 분포의 묘사는 결코 쉽지 않은 작업이다. 그러나 확률 변수를 이용하면 이러한 묘사 작업이 좀 더 쉬워진다. 왜냐하면 사건(event)이 구간(interval)이 되고 이 구산을 지정하는데는 시작점과 끝점이라는 두 개의 숫자만 있으면 되기 때문이다.\n[[school_notebook:4bcfe70a64de40ec945639236b0e911d]]\n그러나 사건(event) 즉, 구간(interval) 하나를 정의하기 위해 숫자가 하나가 아닌 두 개가 필요하다는 점은 아무래도 불편하다. 숫자 하나만으로 사건 즉, 구간을 정의할 수 있는 방법은 없을까? 이를 해결하기 위한 아이디어 중 하나는 구간의 시작을 나타내는 숫자를 모두 같은 숫자인 음수 무한대($-\\infty$)로 통일하는 것이다. 여러가지 구간들 중에서 시작점이 음수 무한대인 구간만 사용하는 것이라고 볼 수 있다. \n$${ -\\infty \\leq X < -1 } $$\n$${ -\\infty \\leq X < 0 } $$\n$${ -\\infty \\leq X < 1 } $$\n$${ -\\infty \\leq X < 2 } $$\n$$ \\vdots $$\n$$ { -\\infty \\leq X < x } $$\n$$ \\vdots $$\n물론 이러한 구간들은 시그마 필드를 구성하는 전체 사건(event)들 중 일부에 지나지 않는다. 그러나 확률 공간과 시그마 필드의 정의를 이용하면 이러한 구간들로부터 시작점이 음수 무한대가 아닌 다른 구간들을 생성할 수 있다. 또한 새로 생성된 구간들에 대한 확률값도 확률의 정의에 따라 계산할 수 있다.\n누적 확률 분포\n위와 같은 방법으로 서술된 확률 분포를 누적 분포 함수 (cumulative distribution function) 또는 누적 확률 분포라고 하고 약자로 cdf라고 쓴다. 일반적으로 cdf는 대문자를 사용하여 $F(x)$와 같은 기호로 표시하며 이 때 독립 변수 $x$는 범위의 끝을 뜻한다. 범위의 시작은 음의 무한대(negative infinity, $-\\infty$)이다.\n확률 변수 $X$에 대한 누적 확률 분포 $F(x)$의 수학적 정의는 다음과 같다.\n$$ F(x) = P({X < x}) = P(X < x)$$\n몇가지 누적 확률 분포 표시의 예를 들면 다음과 같다.\n$$ \\vdots $$\n* $F(-1)$ : 확률 변수가 $-\\infty$이상 -1 미만인 구간 내에 존재할 확률 즉, $P( { -\\infty \\leq X < -1 })$\n* $F(0)$ : 확률 변수가 $-\\infty$이상 0 미만인 구간 내에 존재할 확률 즉, $P( { -\\infty \\leq X < 0 })$\n* $F(1)$ : 확률 변수가 $-\\infty$이상 1 미만인 구간 내에 존재할 확률 즉, $P( { -\\infty \\leq X < 1 })$\n$$ \\vdots $$\n* $F(10)$ : 확률 변수가 $-\\infty$이상 10 미만인 구간 내에 존재할 확률 즉, $P( { -\\infty \\leq X < 10 })$\n$$ \\vdots $$", "%%tikz\n\\filldraw [fill=white] (0,0) circle [radius=1cm];\n\\foreach \\angle in {60,30,...,-270} {\n \\draw[line width=1pt] (\\angle:0.9cm) -- (\\angle:1cm);\n}\n\\draw (0,0) -- (90:0.8cm);", "시계 바늘 확률 문제의 경우를 예로 들어보자. 이 경우에는 각도가 0도부터 360까지이지만 음의 무한대를 시작점으로 해도 상관없다.\n$$ F(0) = P({ -\\infty {}^{\\circ} \\leq \\theta < 0 {}^{\\circ} }) = 0 $$\n$$ F(10) = P({ -\\infty {}^{\\circ} \\leq \\theta < 10 {}^{\\circ} }) = \\dfrac{1}{36} $$\n$$ F(20) = P({ -\\infty {}^{\\circ} \\leq \\theta < 20 {}^{\\circ} }) = \\dfrac{2}{36} $$\n$$ \\vdots $$\n$$ F(350) = P({ -\\infty {}^{\\circ} \\leq \\theta < 350 {}^{\\circ} }) = \\dfrac{35}{36} $$\n$$ F(360) = P({ -\\infty {}^{\\circ} \\leq \\theta < 360 {}^{\\circ} }) = 1 $$\n$$ F(370) = P({ -\\infty {}^{\\circ} \\leq \\theta < 370 {}^{\\circ} }) = 1 $$\n$$ F(380) = P({ -\\infty {}^{\\circ} \\leq \\theta < 380 {}^{\\circ} }) = 1 $$\n$$ \\vdots $$\n이를 NumPy와 matplotlib를 사용하여 그래프로 그래면 다음과 같다.", "t = np.linspace(-100, 500, 100)\nF = t / 360\nF[t < 0] = 0\nF[t > 360] = 1\nplt.plot(t, F)\nplt.ylim(-0.1, 1.1)\nplt.xticks([0, 180, 360]);\nplt.title(\"Cumulative Distribution Function\");\nplt.xlabel(\"$x$ (deg.)\");\nplt.ylabel(\"$F(x)$\");", "누적 밀도 함수 즉 cdf는 다음과 같은 특징을 가진다.\n\n$F(-\\infty) = 0$\n$F(+\\infty) = 1$\n$F(x) \\geq F(y) \\;\\; \\text{ if } \\;\\; x > y $\n\n확률 밀도 함수\n누적 분포 함수는 확률 분포를 함수라는 편리한 상태로 바꾸어 주었다. 누적 분포 함수는 확률이 어느 사건(event)에 어느 정도 분포되어 있는지 수학적으로 명확하게 표현해 준다. \n그러나 누적 분포 함수가 표현하는 사건이 음수 무한대를 시작점으로 하고 변수 $x$를 끝점으로 하는 구간이다보니 분포의 형상을 직관적으로 이해하기는 힘든 단점이 있다. 다시 말해서 어떤 확률 변수 값이 더 자주 나오는지에 대한 정보를 알기 힘들다는 점이다.\n이를 알기 위해서는 확률 변수가 나올 수 있는 전체 구간 ($-\\infty$ ~ $\\infty$)을 아주 작은 폭을 가지는 구간들로 나눈 다음 각 구간의 확률을 살펴보는 것이 편리하다. 다만 이렇게 되면 구간의 폭(width)을 어느 정도로 정의해야 하는지에 대한 추가적인 약속이 필요하기 때문에 실효성이 떨어진다.\n이러한 단점을 보완하기 위해 생각한 것이 절대적인 확률이 아닌 상대적인 확률 분포 형태만을 보기 위한 확률 밀도 함수(probability density function)이다. \n누적 확률 분포 그래프의 x축의 오른쪽으로 이동하면서 크기의 변화를 살펴보자.만약 특정한 $x$값 근처의 구간에 확률이 배정되지 않았다면 누적 분포 함수는 그 구간을 지나도 증가하지 않는다. 즉, 기울기가 0이다. 왜냐하면 $x$ 값이 커졌다(x축의 오른쪽으로 이동하였다)는 것은 앞의 구간을 포함하는 더 큰 구간(사건)에 대한 확률을 묘사하고 있는 것인데 추가적으로 포함된 신규 구간에 확률이 없다면 그 신규 구간을 포함한 구간이나 포함하지 않은 구간이나 배정된 확률이 같기 때문이다.\n누적 분포 함수의 기울기가 0이 아닌 경우는 추가적으로 포함된 구간에 0이 아닌 확률이 할당되어 있는 경우이다. 만약 더 많은 확률이 할당되었다면 누적 분포 함수는 그 구간을 지나면서 더 빠른 속도로 증가할 것이다. 다시 말해서 함수의 기울기가 커진다. 이러한 방식으로 누적 분포의 기울기의 크기를 보면 각 위치에 배정된 확률의 상대적 크기를 알 수 있다.\n기울기를 구하는 수학적 연산이 미분(differentiation)이므로 확률 밀도 함수는 누적 분포 함수의 미분으로 정의한다.\n$$ \\dfrac{dF(x)}{dx} = f(x) $$\n이를 적분으로 나타내면 다음과 같다.\n$$ F(x) = \\int_{-\\infty}^{x} f(u) du $$\n확률 밀도 함수는 특정 확률 변수 구간의 확률이 다른 구간에 비해 상대적으로 얼마나 높은가를 나타내는 것이며 그 값 자체가 확률은 아니다라는 점을 명심해야 한다.\n확률 밀도 함수는 다음과 같은 특징을 가진다.\n\n$-\\infty$ 부터 $\\infty$ 까지 적분하면 그 값은 1이 된다.\n\n$$ \\int_{-\\infty}^{\\infty} f(u)du = 1$$\n\n확률 밀도 함수는 0보다 같거나 크다.\n\n$$ f(x) \\geq 0 $$\n앞서 보인 시계 바늘 문제에서 확률 밀도함수를 구하면 다음과 같다.", "t = np.linspace(-100, 500, 1000)\nF = t / 360\nF[t < 0] = 0\nF[t > 360] = 1\nf = np.gradient(F) # 수치미분\nplt.plot(t, f)\nplt.ylim(-0.0001, f.max()*1.1)\nplt.xticks([0, 180, 360]);\nplt.title(\"Probability Density Function\");\nplt.xlabel(\"$x$ (deg.)\");\nplt.ylabel(\"$f(x)$\");", "확률 질량 함수\n이산 확률 분포는 확률 밀도 함수를 정의할 수 없는 대신 확률 질량 함수가 존재한다. 확률 질량 함수(probability mass funtion)는 이산 확률 변수의 가능한 값 하나 하나에 대해 확률을 정의한 함수이다. 예를 들어 6면체인 주사위를 던져서 나올 수 있는 값은 1부터 6까지의 이산적인 값을 가지는데 이러한 이산 확률 변수는 예를 들어 다음과 같은 확률 질량 함수를 가질 수 있다. 이 경우에는 공정하지 않은(unfair) 주사위의 확률 분포를 보이고 있다.", "x = np.arange(1,7)\ny = np.array([0.0, 0.1, 0.1, 0.2, 0.2, 0.4])\nplt.stem(x, y);\nplt.xlim(0, 7);\nplt.ylim(-0.01, 0.5);", "위의 확률 질량 함수는 주사위 눈금 1이 나오지 않고 6이 비정상적으로 많이 나오게 만든 비정상적인 주사위(unfair dice)를 묘사한다.\n이 확률 변수에 대해 각 값을 누적하여 더하면 이산 확률 변수의 누적 분포 함수(cumulative distribution function)를 구할 수 있다.", "x = np.arange(1,7)\ny = np.array([0.0, 0.1, 0.1, 0.2, 0.2, 0.4])\nz = np.cumsum(y)\nplt.step(x, z);\nplt.xlim(0, 7);\nplt.ylim(-0.01, 1.1);" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
sbu-python-summer/python-tutorial
day-5/scipy-exercises.ipynb
bsd-3-clause
[ "import numpy as np\n%matplotlib inline\nimport matplotlib.pyplot as plt", "Q1: integrating a sampled vs. analytic function\nNumerical integration methods work differently depending on whether you have the analytic function available (in which case you can evaluate it freely at any point you please) or if it is sampled for you.\nCreate a function to integrate, and use NumPy to sample it at $N$ points. Compare the answer you get from integrating the function directly (using integrate.quad to the integral of the sampled function (using integrate.simps).\nTo get a better sense of the accuracy, vary $N$, and look at how the error changes (if you plot the error vs. $N$, you can measure the convergence).\nQ2: Condition number\nFor a linear system, ${\\bf A x} = {\\bf b}$, we can only solve for $x$ if the determinant of the matrix ${\\bf A}$ is non-zero. If the determinant is zero, then we call the matrix singular. The condition number of a matrix is a measure of how close we are to being singular. The formal definition is:\n\\begin{equation}\n\\mathrm{cond}({\\bf A}) = \\| {\\bf A}\\| \\| {\\bf A}^{-1} \\|\n\\end{equation}\nBut we can think of it as a measure of how much ${\\bf x}$ would change due to a small change in ${\\bf b}$. A large condition number means that our solution for ${\\bf x}$ could be inaccurate.\nA Hilbert matrix has $H_{ij} = (i + j + 1)^{-1}$, and is known to have a large condition number. Here's a routine to generate a Hilbert matrix", "def hilbert(n):\n \"\"\" return a Hilbert matrix, H_ij = (i + j - 1)^{-1} \"\"\"\n\n H = np.zeros((n,n), dtype=np.float64)\n\n for i in range(1, n+1):\n for j in range(1, n+1):\n H[i-1,j-1] = 1.0/(i + j - 1.0)\n return H", "Let's solve ${\\bf Hx} ={\\bf b}$. Create a linear system by picking an ${\\bf x}$ and generating a ${\\bf b}$ by multiplying by the matrix ${\\bf H}$. Then use the scipy.linalg.solve() function to recover ${\\bf x}$. Compute the error in ${\\bf x}$ as a function of the size of the matrix.\nYou won't need a large matrix, $n \\sim 13$ or so, will start showing big errors.\nYou can compute the condition number with numpy.linalg.cond()\nThere are methods that can do a better job with nearly-singular matricies. Take a look at scipy.linalg.lstsq() for example.\nQ3: damped driven pendulum and chaos\nThere are a large class of ODE integration methods available through the scipy.integrate.ode() function. Not all of them provide dense output -- most will just give you the value at the end of the integration. \nThe explicit dopri5 integrator will store the solution at intermediate points and allow you to access them. We'll use that here. You'll need to use the set_solout() method to define a function that takes the current integration solution and store it).\nThe damped driven pendulum obeys the following equations:\n$$\\dot{\\theta} = \\omega$$\n$$\\dot{\\omega} = -q \\omega - \\sin \\theta + b \\cos \\omega_d t$$\nhere, $\\theta$ is the angle of the pendulum from vertical and $\\omega$ is the angular velocity. $q$ is a damping coefficient, $b$ is a forcing amplitude, and $\\omega_d$ is a driving frequency.\nChoose $q = 0.5$ and $\\omega_d = 2/3$.\nIntegrate the system for different values of $b$ (start with $b = 0.9$ and increase by $0.05$, and plot the results ($\\theta$ vs. $t$). Here's a RHS function to get you started:", "def rhs(t, Y, q, omega_d, b):\n \"\"\" damped driven pendulum system derivatives. Here, Y = (theta, omega) are\n the solution variables. \"\"\"\n f = np.zeros_like(Y)\n \n f[0] = Y[1]\n f[1] = -q*Y[1] - np.sin(Y[0]) + b*np.cos(omega_d*t)\n\n return f", "Note that the pendulum can flip over, giving values of $\\theta$ outside of $[-\\pi, \\pi]$. The following function can be used to restrict it back to $[-\\pi, \\pi]$ for plotting.", "def restrict_theta(theta):\n \"\"\" convert theta to be restricted to lie between -pi and pi\"\"\"\n tnew = theta + np.pi\n tnew += -2.0*np.pi*np.floor(tnew/(2.0*np.pi))\n tnew -= np.pi\n return tnew", "Write a function that takes an initial angle, $\\theta_0$, and integrates the system and returns the solution. For the parameters that are part of the rhs() function, you'll need to use the set_f_params() method. \nSome values of $b$ will show very non-periodic behavior. To see chaos, integrate two different pendula that are the same except for $\\theta_0$, with only a small difference between then (like 60 degrees and 60.0001 degrees. You'll see the solutions track for a while, but then diverge.\nQ4: Let's find the errors on our fit\nWe looked at fits, but not what the errors are on the fit. Look at scipy.optimize.curve_fit(). This is a simplified wrapper on the least squares fitting. It can return the convariance matrix, the diagonals of which can give the error of the fit for the parameters. \nMake up some data that models a non-linear function (by introducing some random noise) and perform a fit and find the errors on the parameters." ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
deepfield/ibis
docs/source/notebooks/tutorial/1-Intro-and-Setup.ipynb
apache-2.0
[ "Impala/HDFS intro and Setup\nGetting started\nYou're going to want to make sure you can import ibis", "import ibis\nimport os", "If you have WebHDFS available, connect to HDFS with according to your WebHDFS config. For kerberized or more complex HDFS clusters please look at http://hdfscli.readthedocs.org/en/latest/ for info on connecting. You can use a connection from that library instead of using hdfs_connect", "hdfs_port = os.environ.get('IBIS_WEBHDFS_PORT', 50070)\nhdfs = ibis.hdfs_connect(host='quickstart.cloudera', port=hdfs_port)", "Finally, create the Ibis client", "con = ibis.impala.connect('quickstart.cloudera', hdfs_client=hdfs)\ncon", "Obviously, substitute the parameters that are appropriate for your environment (see docstring for ibis.impala.connect). impala.connect uses the same parameters as Impyla's (https://pypi.python.org/pypi/impyla) DBAPI interface" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
muniri92/Echo-Pod
Statistics 4.ipynb
mit
[ "Statistics Set 4\nThis notbeook is for practice using baseball data to render univariate and multivariant linear models.\nUnivariate Linear Regression Model\nCreate a ULR model using some player stat to predict salary. \nDo the following:\n1. With the baseball data linked above (some of it was explored in Statistics 4), create a univariate Linear Regression model predicting player salary using some player stat.\n2. You will need to join a second table to the “Salaries.csv” table.\n3. Cross-validate your model, and produce 68, 95, and 99.7% confidence intervals for your “slope”.\n4. Report the R2R2 score for your univariate model.\n5. Make a scatter plot of your data, with your model predictions overlaid in red.\nImport and Clean data\nWe import the two files of salary and batting stats and remove the batter stats that have NaN in a column so we can process later.", "import pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nbaseball_dir = \"lahman-csv_2015-01-24/\"\nsalaries = pd.read_csv(baseball_dir + \"Salaries.csv\", sep=\",\")\nbatting = pd.read_csv(baseball_dir + \"Batting.csv\", sep=\",\")\nbatting.dropna(inplace=True)\nbatting.info()\n", "We are combining the two sheets by linking by player ID below and combines them into one giant table, then create a plot of all data points of batters who have hit at least one home run.", "total_salaries = salaries.groupby([\"playerID\"])[\"salary\"].sum()\ntotal_batting = batting.groupby([\"playerID\"])[[\"HR\", 'HBP', 'G','stint']].sum()\n\nall_stats = pd.concat((total_batting, total_salaries), axis=1)\nall_stats = all_stats[(all_stats.HR > 0) & (all_stats.salary > 0)]\n\nplt.figure(figsize=(12, 4))\nplt.scatter(all_stats.HR, all_stats.salary, edgecolor=\"None\",\n s=5, c='k', alpha=0.2)\nplt.yscale(\"log\")\nplt.xlabel(\"Home Runs\", fontsize=12); plt.ylabel(\"Salary ($)\", fontsize=12)\nplt.minorticks_on()\nplt.xlim(-50, 800)\nplt.show()", "We are creating a linear regression univariate model to best fit our data points that takes the home runs as our 'x' input and salary as our 'y'.", "from sklearn import linear_model\nimport sklearn.cross_validation as cv\n\nkfolds = cv.KFold(len(all_stats), n_folds=10)\n\nregressor = linear_model.LinearRegression()\nXvals = np.array(all_stats.HR)[:, np.newaxis]\nyvals = np.array(all_stats.salary)\n\nslopes, intercepts = [], []\n\nfor train_index, test_index in kfolds:\n X_train, X_test = Xvals[train_index], Xvals[test_index]\n y_train, y_test = yvals[train_index], yvals[test_index]\n regressor.fit(X_train, y_train)\n slopes.append(regressor.coef_)\n intercepts.append(regressor.intercept_)\n\nslope = np.mean(slopes)\nintercept = np.mean(intercepts)\n\nregressor.coef_ = slope\nregressor.intercept_ = intercept\n\nprint(\"Our model is:\\n\\tSalary = %.2f x N_HomeRuns + %.2f\" % (slope, intercept))\n\nplt.figure(figsize=(12, 4))\nplt.scatter(all_stats.HR, all_stats.salary, edgecolor=\"None\",\n s=5, c='k', alpha=0.2)\nplt.scatter(Xvals, regressor.predict(Xvals), edgecolor=\"None\",\n s=2, c='r')\nplt.yscale(\"log\")\nplt.xlabel(\"Home Runs\", fontsize=12); plt.ylabel(\"Salary ($)\", fontsize=12)\nplt.minorticks_on()\nplt.xlim(-50, 800)\nplt.show()", "Our r^2 value is .376 as seen below.", "print(\"Score: {0}\".format(regressor.score(Xvals, yvals)))", "Multivariate Linear Regression Model\n\nUsing no more than 4 characteristics, create a multivariate Linear Regression model predicting player salary.\nReport the R2R2 score for your multivariate model. Aim for R2>0.5R2>0.5.\nMake a scatter plot of your data side-by-side with a scatter plot of your model predictions.\n\nWe have included the categories games, home runs, hit by pitcher, and stint for our four mulitvariant categories.", "N_folds = 10\nkfolds = cv.KFold(len(all_stats), n_folds=N_folds)\n\nregressor = linear_model.LinearRegression()\nvalid_data = [\"HR\", 'HBP', 'G', 'stint']\nXvals = np.array(all_stats[valid_data])\nyvals = np.array(all_stats.salary)\n\ncoeffs, intercepts = [], []\n\nfor train_index, test_index in kfolds:\n X_train, X_test = Xvals[train_index], Xvals[test_index]\n y_train, y_test = yvals[train_index], yvals[test_index]\n regressor.fit(X_train, y_train)\n coeffs.append(regressor.coef_)\n intercepts.append(regressor.intercept_)\n\ncoeffs = np.array(coeffs).mean(axis=0) #averages each column\nintercept = np.array(intercepts).mean(axis=0)\n\nregressor.coef_ = coeffs\nregressor.intercept_ = intercept", "Using these four categories we were able to get an r^2 value of .414 as seen below.", "print(\"Score: {0}\".format(regressor.score(Xvals, yvals)))", "Below gives us our model versus the actual data.", "fig = plt.figure(figsize=(12, 4))\nfig.subplots_adjust(wspace=0)\nax = plt.subplot(121)\nax.scatter(all_stats.HR, all_stats.salary, edgecolor=\"None\",\n s=5, c='k', alpha=0.2)\nax.set_yscale(\"log\")\nax.set_xlabel(\"Home Runs\", fontsize=12); ax.set_ylabel(\"Salary ($)\", fontsize=12)\nax.set_xlim(-50, 800); ax.minorticks_on()\n\nax = plt.subplot(122)\nax.scatter(Xvals[:, 1], regressor.predict(Xvals), edgecolor=\"None\",\n s=2, c='r')\nax.set_xlabel(\"Home Runs\", fontsize=12)\nax.set_ylim(1E4, 1E9)\nax.set_yscale(\"log\"); ax.set_yticklabels([])\nax.set_xlim(-50, 800); ax.minorticks_on()\n\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ES-DOC/esdoc-jupyterhub
notebooks/fio-ronm/cmip6/models/sandbox-2/atmos.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Atmos\nMIP Era: CMIP6\nInstitute: FIO-RONM\nSource ID: SANDBOX-2\nTopic: Atmos\nSub-Topics: Dynamical Core, Radiation, Turbulence Convection, Microphysics Precipitation, Cloud Scheme, Observation Simulation, Gravity Waves, Solar, Volcanos. \nProperties: 156 (127 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:01\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'fio-ronm', 'sandbox-2', 'atmos')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties --&gt; Overview\n2. Key Properties --&gt; Resolution\n3. Key Properties --&gt; Timestepping\n4. Key Properties --&gt; Orography\n5. Grid --&gt; Discretisation\n6. Grid --&gt; Discretisation --&gt; Horizontal\n7. Grid --&gt; Discretisation --&gt; Vertical\n8. Dynamical Core\n9. Dynamical Core --&gt; Top Boundary\n10. Dynamical Core --&gt; Lateral Boundary\n11. Dynamical Core --&gt; Diffusion Horizontal\n12. Dynamical Core --&gt; Advection Tracers\n13. Dynamical Core --&gt; Advection Momentum\n14. Radiation\n15. Radiation --&gt; Shortwave Radiation\n16. Radiation --&gt; Shortwave GHG\n17. Radiation --&gt; Shortwave Cloud Ice\n18. Radiation --&gt; Shortwave Cloud Liquid\n19. Radiation --&gt; Shortwave Cloud Inhomogeneity\n20. Radiation --&gt; Shortwave Aerosols\n21. Radiation --&gt; Shortwave Gases\n22. Radiation --&gt; Longwave Radiation\n23. Radiation --&gt; Longwave GHG\n24. Radiation --&gt; Longwave Cloud Ice\n25. Radiation --&gt; Longwave Cloud Liquid\n26. Radiation --&gt; Longwave Cloud Inhomogeneity\n27. Radiation --&gt; Longwave Aerosols\n28. Radiation --&gt; Longwave Gases\n29. Turbulence Convection\n30. Turbulence Convection --&gt; Boundary Layer Turbulence\n31. Turbulence Convection --&gt; Deep Convection\n32. Turbulence Convection --&gt; Shallow Convection\n33. Microphysics Precipitation\n34. Microphysics Precipitation --&gt; Large Scale Precipitation\n35. Microphysics Precipitation --&gt; Large Scale Cloud Microphysics\n36. Cloud Scheme\n37. Cloud Scheme --&gt; Optical Cloud Properties\n38. Cloud Scheme --&gt; Sub Grid Scale Water Distribution\n39. Cloud Scheme --&gt; Sub Grid Scale Ice Distribution\n40. Observation Simulation\n41. Observation Simulation --&gt; Isscp Attributes\n42. Observation Simulation --&gt; Cosp Attributes\n43. Observation Simulation --&gt; Radar Inputs\n44. Observation Simulation --&gt; Lidar Inputs\n45. Gravity Waves\n46. Gravity Waves --&gt; Orographic Gravity Waves\n47. Gravity Waves --&gt; Non Orographic Gravity Waves\n48. Solar\n49. Solar --&gt; Solar Pathways\n50. Solar --&gt; Solar Constant\n51. Solar --&gt; Orbital Parameters\n52. Solar --&gt; Insolation Ozone\n53. Volcanos\n54. Volcanos --&gt; Volcanoes Treatment \n1. Key Properties --&gt; Overview\nTop level key properties\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of atmosphere model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.overview.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of atmosphere model code (CAM 4.0, ARPEGE 3.2,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.overview.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Model Family\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of atmospheric model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.overview.model_family') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"AGCM\" \n# \"ARCM\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.4. Basic Approximations\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nBasic approximations made in the atmosphere.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"primitive equations\" \n# \"non-hydrostatic\" \n# \"anelastic\" \n# \"Boussinesq\" \n# \"hydrostatic\" \n# \"quasi-hydrostatic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Resolution\nCharacteristics of the model resolution\n2.1. Horizontal Resolution Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of the model grid, e.g. T42, N48.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.2. Canonical Horizontal Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nExpression quoted for gross comparisons of resolution, e.g. 2.5 x 3.75 degrees lat-lon.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.3. Range Horizontal Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRange of horizontal resolution with spatial details, eg. 1 deg (Equator) - 0.5 deg", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.4. Number Of Vertical Levels\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of vertical levels resolved on the computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "2.5. High Top\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the atmosphere have a high-top? High-Top atmospheres have a fully resolved stratosphere with a model top above the stratopause.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.high_top') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Timestepping\nCharacteristics of the atmosphere model time stepping\n3.1. Timestep Dynamics\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTimestep for the dynamics, e.g. 30 min.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3.2. Timestep Shortwave Radiative Transfer\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTimestep for the shortwave radiative transfer, e.g. 1.5 hours.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3.3. Timestep Longwave Radiative Transfer\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTimestep for the longwave radiative transfer, e.g. 3 hours.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Orography\nCharacteristics of the model orography\n4.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime adaptation of the orography.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.orography.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"present day\" \n# \"modified\" \n# TODO - please enter value(s)\n", "4.2. Changes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nIf the orography type is modified describe the time adaptation changes.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.orography.changes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"related to ice sheets\" \n# \"related to tectonics\" \n# \"modified mean\" \n# \"modified variance if taken into account in model (cf gravity waves)\" \n# TODO - please enter value(s)\n", "5. Grid --&gt; Discretisation\nAtmosphere grid discretisation\n5.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of grid discretisation in the atmosphere", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6. Grid --&gt; Discretisation --&gt; Horizontal\nAtmosphere discretisation in the horizontal\n6.1. Scheme Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHorizontal discretisation type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"spectral\" \n# \"fixed grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "6.2. Scheme Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHorizontal discretisation method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"finite elements\" \n# \"finite volumes\" \n# \"finite difference\" \n# \"centered finite difference\" \n# TODO - please enter value(s)\n", "6.3. Scheme Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHorizontal discretisation function order", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"second\" \n# \"third\" \n# \"fourth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "6.4. Horizontal Pole\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nHorizontal discretisation pole singularity treatment", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"filter\" \n# \"pole rotation\" \n# \"artificial island\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "6.5. Grid Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHorizontal grid type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Gaussian\" \n# \"Latitude-Longitude\" \n# \"Cubed-Sphere\" \n# \"Icosahedral\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "7. Grid --&gt; Discretisation --&gt; Vertical\nAtmosphere discretisation in the vertical\n7.1. Coordinate Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nType of vertical coordinate system", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"isobaric\" \n# \"sigma\" \n# \"hybrid sigma-pressure\" \n# \"hybrid pressure\" \n# \"vertically lagrangian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8. Dynamical Core\nCharacteristics of the dynamical core\n8.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of atmosphere dynamical core", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCommonly used name for the dynamical core of the model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.3. Timestepping Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTimestepping framework type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.timestepping_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Adams-Bashforth\" \n# \"explicit\" \n# \"implicit\" \n# \"semi-implicit\" \n# \"leap frog\" \n# \"multi-step\" \n# \"Runge Kutta fifth order\" \n# \"Runge Kutta second order\" \n# \"Runge Kutta third order\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.4. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of the model prognostic variables", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"surface pressure\" \n# \"wind components\" \n# \"divergence/curl\" \n# \"temperature\" \n# \"potential temperature\" \n# \"total water\" \n# \"water vapour\" \n# \"water liquid\" \n# \"water ice\" \n# \"total water moments\" \n# \"clouds\" \n# \"radiation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9. Dynamical Core --&gt; Top Boundary\nType of boundary layer at the top of the model\n9.1. Top Boundary Condition\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTop boundary condition", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"sponge layer\" \n# \"radiation boundary condition\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.2. Top Heat\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTop boundary heat treatment", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.3. Top Wind\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTop boundary wind treatment", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Dynamical Core --&gt; Lateral Boundary\nType of lateral boundary condition (if the model is a regional model)\n10.1. Condition\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nType of lateral boundary condition", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"sponge layer\" \n# \"radiation boundary condition\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "11. Dynamical Core --&gt; Diffusion Horizontal\nHorizontal diffusion scheme\n11.1. Scheme Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nHorizontal diffusion scheme name", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.2. Scheme Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHorizontal diffusion scheme method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"iterated Laplacian\" \n# \"bi-harmonic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12. Dynamical Core --&gt; Advection Tracers\nTracer advection scheme\n12.1. Scheme Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTracer advection scheme name", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Heun\" \n# \"Roe and VanLeer\" \n# \"Roe and Superbee\" \n# \"Prather\" \n# \"UTOPIA\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12.2. Scheme Characteristics\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nTracer advection scheme characteristics", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Eulerian\" \n# \"modified Euler\" \n# \"Lagrangian\" \n# \"semi-Lagrangian\" \n# \"cubic semi-Lagrangian\" \n# \"quintic semi-Lagrangian\" \n# \"mass-conserving\" \n# \"finite volume\" \n# \"flux-corrected\" \n# \"linear\" \n# \"quadratic\" \n# \"quartic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12.3. Conserved Quantities\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nTracer advection scheme conserved quantities", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"dry mass\" \n# \"tracer mass\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12.4. Conservation Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTracer advection scheme conservation method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"conservation fixer\" \n# \"Priestley algorithm\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13. Dynamical Core --&gt; Advection Momentum\nMomentum advection scheme\n13.1. Scheme Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nMomentum advection schemes name", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"VanLeer\" \n# \"Janjic\" \n# \"SUPG (Streamline Upwind Petrov-Galerkin)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.2. Scheme Characteristics\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMomentum advection scheme characteristics", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"2nd order\" \n# \"4th order\" \n# \"cell-centred\" \n# \"staggered grid\" \n# \"semi-staggered grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.3. Scheme Staggering Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMomentum advection scheme staggering type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Arakawa B-grid\" \n# \"Arakawa C-grid\" \n# \"Arakawa D-grid\" \n# \"Arakawa E-grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.4. Conserved Quantities\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMomentum advection scheme conserved quantities", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Angular momentum\" \n# \"Horizontal momentum\" \n# \"Enstrophy\" \n# \"Mass\" \n# \"Total energy\" \n# \"Vorticity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.5. Conservation Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMomentum advection scheme conservation method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"conservation fixer\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14. Radiation\nCharacteristics of the atmosphere radiation process\n14.1. Aerosols\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nAerosols whose radiative effect is taken into account in the atmosphere model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.aerosols') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"sulphate\" \n# \"nitrate\" \n# \"sea salt\" \n# \"dust\" \n# \"ice\" \n# \"organic\" \n# \"BC (black carbon / soot)\" \n# \"SOA (secondary organic aerosols)\" \n# \"POM (particulate organic matter)\" \n# \"polar stratospheric ice\" \n# \"NAT (nitric acid trihydrate)\" \n# \"NAD (nitric acid dihydrate)\" \n# \"STS (supercooled ternary solution aerosol particle)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15. Radiation --&gt; Shortwave Radiation\nProperties of the shortwave radiation scheme\n15.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of shortwave radiation in the atmosphere", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.2. Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCommonly used name for the shortwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.3. Spectral Integration\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nShortwave radiation scheme spectral integration", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"wide-band model\" \n# \"correlated-k\" \n# \"exponential sum fitting\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.4. Transport Calculation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nShortwave radiation transport calculation methods", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"two-stream\" \n# \"layer interaction\" \n# \"bulk\" \n# \"adaptive\" \n# \"multi-stream\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.5. Spectral Intervals\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nShortwave radiation scheme number of spectral intervals", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "16. Radiation --&gt; Shortwave GHG\nRepresentation of greenhouse gases in the shortwave radiation scheme\n16.1. Greenhouse Gas Complexity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nComplexity of greenhouse gases whose shortwave radiative effects are taken into account in the atmosphere model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CO2\" \n# \"CH4\" \n# \"N2O\" \n# \"CFC-11 eq\" \n# \"CFC-12 eq\" \n# \"HFC-134a eq\" \n# \"Explicit ODSs\" \n# \"Explicit other fluorinated gases\" \n# \"O3\" \n# \"H2O\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.2. ODS\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nOzone depleting substances whose shortwave radiative effects are explicitly taken into account in the atmosphere model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CFC-12\" \n# \"CFC-11\" \n# \"CFC-113\" \n# \"CFC-114\" \n# \"CFC-115\" \n# \"HCFC-22\" \n# \"HCFC-141b\" \n# \"HCFC-142b\" \n# \"Halon-1211\" \n# \"Halon-1301\" \n# \"Halon-2402\" \n# \"methyl chloroform\" \n# \"carbon tetrachloride\" \n# \"methyl chloride\" \n# \"methylene chloride\" \n# \"chloroform\" \n# \"methyl bromide\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.3. Other Flourinated Gases\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nOther flourinated gases whose shortwave radiative effects are explicitly taken into account in the atmosphere model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"HFC-134a\" \n# \"HFC-23\" \n# \"HFC-32\" \n# \"HFC-125\" \n# \"HFC-143a\" \n# \"HFC-152a\" \n# \"HFC-227ea\" \n# \"HFC-236fa\" \n# \"HFC-245fa\" \n# \"HFC-365mfc\" \n# \"HFC-43-10mee\" \n# \"CF4\" \n# \"C2F6\" \n# \"C3F8\" \n# \"C4F10\" \n# \"C5F12\" \n# \"C6F14\" \n# \"C7F16\" \n# \"C8F18\" \n# \"c-C4F8\" \n# \"NF3\" \n# \"SF6\" \n# \"SO2F2\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17. Radiation --&gt; Shortwave Cloud Ice\nShortwave radiative properties of ice crystals in clouds\n17.1. General Interactions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral shortwave radiative interactions with cloud ice crystals", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.2. Physical Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPhysical representation of cloud ice crystals in the shortwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"bi-modal size distribution\" \n# \"ensemble of ice crystals\" \n# \"mean projected area\" \n# \"ice water path\" \n# \"crystal asymmetry\" \n# \"crystal aspect ratio\" \n# \"effective crystal radius\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.3. Optical Methods\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nOptical methods applicable to cloud ice crystals in the shortwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"T-matrix\" \n# \"geometric optics\" \n# \"finite difference time domain (FDTD)\" \n# \"Mie theory\" \n# \"anomalous diffraction approximation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "18. Radiation --&gt; Shortwave Cloud Liquid\nShortwave radiative properties of liquid droplets in clouds\n18.1. General Interactions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral shortwave radiative interactions with cloud liquid droplets", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "18.2. Physical Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPhysical representation of cloud liquid droplets in the shortwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"cloud droplet number concentration\" \n# \"effective cloud droplet radii\" \n# \"droplet size distribution\" \n# \"liquid water path\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "18.3. Optical Methods\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nOptical methods applicable to cloud liquid droplets in the shortwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"geometric optics\" \n# \"Mie theory\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "19. Radiation --&gt; Shortwave Cloud Inhomogeneity\nCloud inhomogeneity in the shortwave radiation scheme\n19.1. Cloud Inhomogeneity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod for taking into account horizontal cloud inhomogeneity", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Monte Carlo Independent Column Approximation\" \n# \"Triplecloud\" \n# \"analytic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20. Radiation --&gt; Shortwave Aerosols\nShortwave radiative properties of aerosols\n20.1. General Interactions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral shortwave radiative interactions with aerosols", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20.2. Physical Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPhysical representation of aerosols in the shortwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"number concentration\" \n# \"effective radii\" \n# \"size distribution\" \n# \"asymmetry\" \n# \"aspect ratio\" \n# \"mixing state\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20.3. Optical Methods\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nOptical methods applicable to aerosols in the shortwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"T-matrix\" \n# \"geometric optics\" \n# \"finite difference time domain (FDTD)\" \n# \"Mie theory\" \n# \"anomalous diffraction approximation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "21. Radiation --&gt; Shortwave Gases\nShortwave radiative properties of gases\n21.1. General Interactions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral shortwave radiative interactions with gases", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22. Radiation --&gt; Longwave Radiation\nProperties of the longwave radiation scheme\n22.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of longwave radiation in the atmosphere", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.2. Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCommonly used name for the longwave radiation scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.3. Spectral Integration\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nLongwave radiation scheme spectral integration", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"wide-band model\" \n# \"correlated-k\" \n# \"exponential sum fitting\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22.4. Transport Calculation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nLongwave radiation transport calculation methods", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"two-stream\" \n# \"layer interaction\" \n# \"bulk\" \n# \"adaptive\" \n# \"multi-stream\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22.5. Spectral Intervals\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nLongwave radiation scheme number of spectral intervals", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "23. Radiation --&gt; Longwave GHG\nRepresentation of greenhouse gases in the longwave radiation scheme\n23.1. Greenhouse Gas Complexity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nComplexity of greenhouse gases whose longwave radiative effects are taken into account in the atmosphere model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CO2\" \n# \"CH4\" \n# \"N2O\" \n# \"CFC-11 eq\" \n# \"CFC-12 eq\" \n# \"HFC-134a eq\" \n# \"Explicit ODSs\" \n# \"Explicit other fluorinated gases\" \n# \"O3\" \n# \"H2O\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.2. ODS\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nOzone depleting substances whose longwave radiative effects are explicitly taken into account in the atmosphere model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CFC-12\" \n# \"CFC-11\" \n# \"CFC-113\" \n# \"CFC-114\" \n# \"CFC-115\" \n# \"HCFC-22\" \n# \"HCFC-141b\" \n# \"HCFC-142b\" \n# \"Halon-1211\" \n# \"Halon-1301\" \n# \"Halon-2402\" \n# \"methyl chloroform\" \n# \"carbon tetrachloride\" \n# \"methyl chloride\" \n# \"methylene chloride\" \n# \"chloroform\" \n# \"methyl bromide\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.3. Other Flourinated Gases\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nOther flourinated gases whose longwave radiative effects are explicitly taken into account in the atmosphere model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"HFC-134a\" \n# \"HFC-23\" \n# \"HFC-32\" \n# \"HFC-125\" \n# \"HFC-143a\" \n# \"HFC-152a\" \n# \"HFC-227ea\" \n# \"HFC-236fa\" \n# \"HFC-245fa\" \n# \"HFC-365mfc\" \n# \"HFC-43-10mee\" \n# \"CF4\" \n# \"C2F6\" \n# \"C3F8\" \n# \"C4F10\" \n# \"C5F12\" \n# \"C6F14\" \n# \"C7F16\" \n# \"C8F18\" \n# \"c-C4F8\" \n# \"NF3\" \n# \"SF6\" \n# \"SO2F2\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "24. Radiation --&gt; Longwave Cloud Ice\nLongwave radiative properties of ice crystals in clouds\n24.1. General Interactions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral longwave radiative interactions with cloud ice crystals", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "24.2. Physical Reprenstation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPhysical representation of cloud ice crystals in the longwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"bi-modal size distribution\" \n# \"ensemble of ice crystals\" \n# \"mean projected area\" \n# \"ice water path\" \n# \"crystal asymmetry\" \n# \"crystal aspect ratio\" \n# \"effective crystal radius\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "24.3. Optical Methods\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nOptical methods applicable to cloud ice crystals in the longwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"T-matrix\" \n# \"geometric optics\" \n# \"finite difference time domain (FDTD)\" \n# \"Mie theory\" \n# \"anomalous diffraction approximation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25. Radiation --&gt; Longwave Cloud Liquid\nLongwave radiative properties of liquid droplets in clouds\n25.1. General Interactions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral longwave radiative interactions with cloud liquid droplets", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.2. Physical Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPhysical representation of cloud liquid droplets in the longwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"cloud droplet number concentration\" \n# \"effective cloud droplet radii\" \n# \"droplet size distribution\" \n# \"liquid water path\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.3. Optical Methods\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nOptical methods applicable to cloud liquid droplets in the longwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"geometric optics\" \n# \"Mie theory\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "26. Radiation --&gt; Longwave Cloud Inhomogeneity\nCloud inhomogeneity in the longwave radiation scheme\n26.1. Cloud Inhomogeneity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod for taking into account horizontal cloud inhomogeneity", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Monte Carlo Independent Column Approximation\" \n# \"Triplecloud\" \n# \"analytic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "27. Radiation --&gt; Longwave Aerosols\nLongwave radiative properties of aerosols\n27.1. General Interactions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral longwave radiative interactions with aerosols", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "27.2. Physical Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPhysical representation of aerosols in the longwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"number concentration\" \n# \"effective radii\" \n# \"size distribution\" \n# \"asymmetry\" \n# \"aspect ratio\" \n# \"mixing state\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "27.3. Optical Methods\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nOptical methods applicable to aerosols in the longwave radiation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"T-matrix\" \n# \"geometric optics\" \n# \"finite difference time domain (FDTD)\" \n# \"Mie theory\" \n# \"anomalous diffraction approximation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "28. Radiation --&gt; Longwave Gases\nLongwave radiative properties of gases\n28.1. General Interactions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral longwave radiative interactions with gases", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "29. Turbulence Convection\nAtmosphere Convective Turbulence and Clouds\n29.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of atmosphere convection and turbulence", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30. Turbulence Convection --&gt; Boundary Layer Turbulence\nProperties of the boundary layer turbulence scheme\n30.1. Scheme Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nBoundary layer turbulence scheme name", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Mellor-Yamada\" \n# \"Holtslag-Boville\" \n# \"EDMF\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "30.2. Scheme Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nBoundary layer turbulence scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"TKE prognostic\" \n# \"TKE diagnostic\" \n# \"TKE coupled with water\" \n# \"vertical profile of Kz\" \n# \"non-local diffusion\" \n# \"Monin-Obukhov similarity\" \n# \"Coastal Buddy Scheme\" \n# \"Coupled with convection\" \n# \"Coupled with gravity waves\" \n# \"Depth capped at cloud base\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "30.3. Closure Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBoundary layer turbulence scheme closure order", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "30.4. Counter Gradient\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nUses boundary layer turbulence scheme counter gradient", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "31. Turbulence Convection --&gt; Deep Convection\nProperties of the deep convection scheme\n31.1. Scheme Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDeep convection scheme name", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "31.2. Scheme Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nDeep convection scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"mass-flux\" \n# \"adjustment\" \n# \"plume ensemble\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "31.3. Scheme Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nDeep convection scheme method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CAPE\" \n# \"bulk\" \n# \"ensemble\" \n# \"CAPE/WFN based\" \n# \"TKE/CIN based\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "31.4. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPhysical processes taken into account in the parameterisation of deep convection", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"vertical momentum transport\" \n# \"convective momentum transport\" \n# \"entrainment\" \n# \"detrainment\" \n# \"penetrative convection\" \n# \"updrafts\" \n# \"downdrafts\" \n# \"radiative effect of anvils\" \n# \"re-evaporation of convective precipitation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "31.5. Microphysics\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nMicrophysics scheme for deep convection. Microphysical processes directly control the amount of detrainment of cloud hydrometeor and water vapor from updrafts", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"tuning parameter based\" \n# \"single moment\" \n# \"two moment\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "32. Turbulence Convection --&gt; Shallow Convection\nProperties of the shallow convection scheme\n32.1. Scheme Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nShallow convection scheme name", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "32.2. Scheme Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nshallow convection scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"mass-flux\" \n# \"cumulus-capped boundary layer\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "32.3. Scheme Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nshallow convection scheme method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"same as deep (unified)\" \n# \"included in boundary layer turbulence\" \n# \"separate diagnosis\" \n# TODO - please enter value(s)\n", "32.4. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPhysical processes taken into account in the parameterisation of shallow convection", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"convective momentum transport\" \n# \"entrainment\" \n# \"detrainment\" \n# \"penetrative convection\" \n# \"re-evaporation of convective precipitation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "32.5. Microphysics\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nMicrophysics scheme for shallow convection", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"tuning parameter based\" \n# \"single moment\" \n# \"two moment\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "33. Microphysics Precipitation\nLarge Scale Cloud Microphysics and Precipitation\n33.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of large scale cloud microphysics and precipitation", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "34. Microphysics Precipitation --&gt; Large Scale Precipitation\nProperties of the large scale precipitation scheme\n34.1. Scheme Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCommonly used name of the large scale precipitation parameterisation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "34.2. Hydrometeors\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPrecipitating hydrometeors taken into account in the large scale precipitation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"liquid rain\" \n# \"snow\" \n# \"hail\" \n# \"graupel\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "35. Microphysics Precipitation --&gt; Large Scale Cloud Microphysics\nProperties of the large scale cloud microphysics scheme\n35.1. Scheme Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCommonly used name of the microphysics parameterisation scheme used for large scale clouds.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "35.2. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nLarge scale cloud microphysics processes", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"mixed phase\" \n# \"cloud droplets\" \n# \"cloud ice\" \n# \"ice nucleation\" \n# \"water vapour deposition\" \n# \"effect of raindrops\" \n# \"effect of snow\" \n# \"effect of graupel\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "36. Cloud Scheme\nCharacteristics of the cloud scheme\n36.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of the atmosphere cloud scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "36.2. Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCommonly used name for the cloud scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "36.3. Atmos Coupling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nAtmosphere components that are linked to the cloud scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"atmosphere_radiation\" \n# \"atmosphere_microphysics_precipitation\" \n# \"atmosphere_turbulence_convection\" \n# \"atmosphere_gravity_waves\" \n# \"atmosphere_solar\" \n# \"atmosphere_volcano\" \n# \"atmosphere_cloud_simulator\" \n# TODO - please enter value(s)\n", "36.4. Uses Separate Treatment\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDifferent cloud schemes for the different types of clouds (convective, stratiform and boundary layer)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "36.5. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nProcesses included in the cloud scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"entrainment\" \n# \"detrainment\" \n# \"bulk cloud\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "36.6. Prognostic Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the cloud scheme a prognostic scheme?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "36.7. Diagnostic Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the cloud scheme a diagnostic scheme?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "36.8. Prognostic Variables\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList the prognostic variables used by the cloud scheme, if applicable.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"cloud amount\" \n# \"liquid\" \n# \"ice\" \n# \"rain\" \n# \"snow\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "37. Cloud Scheme --&gt; Optical Cloud Properties\nOptical cloud properties\n37.1. Cloud Overlap Method\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nMethod for taking into account overlapping of cloud layers", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"random\" \n# \"maximum\" \n# \"maximum-random\" \n# \"exponential\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "37.2. Cloud Inhomogeneity\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nMethod for taking into account cloud inhomogeneity", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "38. Cloud Scheme --&gt; Sub Grid Scale Water Distribution\nSub-grid scale water distribution\n38.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSub-grid scale water distribution type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# TODO - please enter value(s)\n", "38.2. Function Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSub-grid scale water distribution function name", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "38.3. Function Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSub-grid scale water distribution function type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "38.4. Convection Coupling\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSub-grid scale water distribution coupling with convection", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"coupled with deep\" \n# \"coupled with shallow\" \n# \"not coupled with convection\" \n# TODO - please enter value(s)\n", "39. Cloud Scheme --&gt; Sub Grid Scale Ice Distribution\nSub-grid scale ice distribution\n39.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSub-grid scale ice distribution type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# TODO - please enter value(s)\n", "39.2. Function Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSub-grid scale ice distribution function name", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "39.3. Function Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSub-grid scale ice distribution function type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "39.4. Convection Coupling\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSub-grid scale ice distribution coupling with convection", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"coupled with deep\" \n# \"coupled with shallow\" \n# \"not coupled with convection\" \n# TODO - please enter value(s)\n", "40. Observation Simulation\nCharacteristics of observation simulation\n40.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of observation simulator characteristics", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "41. Observation Simulation --&gt; Isscp Attributes\nISSCP Characteristics\n41.1. Top Height Estimation Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nCloud simulator ISSCP top height estimation methodUo", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"no adjustment\" \n# \"IR brightness\" \n# \"visible optical depth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "41.2. Top Height Direction\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nCloud simulator ISSCP top height direction", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"lowest altitude level\" \n# \"highest altitude level\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "42. Observation Simulation --&gt; Cosp Attributes\nCFMIP Observational Simulator Package attributes\n42.1. Run Configuration\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nCloud simulator COSP run configuration", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Inline\" \n# \"Offline\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "42.2. Number Of Grid Points\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nCloud simulator COSP number of grid points", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "42.3. Number Of Sub Columns\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nCloud simulator COSP number of sub-cloumns used to simulate sub-grid variability", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "42.4. Number Of Levels\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nCloud simulator COSP number of levels", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "43. Observation Simulation --&gt; Radar Inputs\nCharacteristics of the cloud radar simulator\n43.1. Frequency\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nCloud simulator radar frequency (Hz)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "43.2. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nCloud simulator radar type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"surface\" \n# \"space borne\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "43.3. Gas Absorption\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nCloud simulator radar uses gas absorption", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "43.4. Effective Radius\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nCloud simulator radar uses effective radius", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "44. Observation Simulation --&gt; Lidar Inputs\nCharacteristics of the cloud lidar simulator\n44.1. Ice Types\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nCloud simulator lidar ice type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"ice spheres\" \n# \"ice non-spherical\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "44.2. Overlap\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nCloud simulator lidar overlap", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"max\" \n# \"random\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "45. Gravity Waves\nCharacteristics of the parameterised gravity waves in the atmosphere, whether from orography or other sources.\n45.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of gravity wave parameterisation in the atmosphere", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "45.2. Sponge Layer\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSponge layer in the upper levels in order to avoid gravity wave reflection at the top.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.sponge_layer') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Rayleigh friction\" \n# \"Diffusive sponge layer\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "45.3. Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBackground wave distribution", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"continuous spectrum\" \n# \"discrete spectrum\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "45.4. Subgrid Scale Orography\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSubgrid scale orography effects taken into account.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"effect on drag\" \n# \"effect on lifting\" \n# \"enhanced topography\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "46. Gravity Waves --&gt; Orographic Gravity Waves\nGravity waves generated due to the presence of orography\n46.1. Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCommonly used name for the orographic gravity wave scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "46.2. Source Mechanisms\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nOrographic gravity wave source mechanisms", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"linear mountain waves\" \n# \"hydraulic jump\" \n# \"envelope orography\" \n# \"low level flow blocking\" \n# \"statistical sub-grid scale variance\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "46.3. Calculation Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nOrographic gravity wave calculation method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"non-linear calculation\" \n# \"more than two cardinal directions\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "46.4. Propagation Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOrographic gravity wave propogation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"linear theory\" \n# \"non-linear theory\" \n# \"includes boundary layer ducting\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "46.5. Dissipation Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOrographic gravity wave dissipation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"total wave\" \n# \"single wave\" \n# \"spectral\" \n# \"linear\" \n# \"wave saturation vs Richardson number\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "47. Gravity Waves --&gt; Non Orographic Gravity Waves\nGravity waves generated by non-orographic processes.\n47.1. Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCommonly used name for the non-orographic gravity wave scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "47.2. Source Mechanisms\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nNon-orographic gravity wave source mechanisms", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"convection\" \n# \"precipitation\" \n# \"background spectrum\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "47.3. Calculation Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nNon-orographic gravity wave calculation method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"spatially dependent\" \n# \"temporally dependent\" \n# TODO - please enter value(s)\n", "47.4. Propagation Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNon-orographic gravity wave propogation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"linear theory\" \n# \"non-linear theory\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "47.5. Dissipation Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNon-orographic gravity wave dissipation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"total wave\" \n# \"single wave\" \n# \"spectral\" \n# \"linear\" \n# \"wave saturation vs Richardson number\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "48. Solar\nTop of atmosphere solar insolation characteristics\n48.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of solar insolation of the atmosphere", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "49. Solar --&gt; Solar Pathways\nPathways for solar forcing of the atmosphere\n49.1. Pathways\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPathways for the solar forcing of the atmosphere model domain", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.solar_pathways.pathways') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"SW radiation\" \n# \"precipitating energetic particles\" \n# \"cosmic rays\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "50. Solar --&gt; Solar Constant\nSolar constant and top of atmosphere insolation characteristics\n50.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime adaptation of the solar constant.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.solar_constant.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"fixed\" \n# \"transient\" \n# TODO - please enter value(s)\n", "50.2. Fixed Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf the solar constant is fixed, enter the value of the solar constant (W m-2).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "50.3. Transient Characteristics\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nsolar constant transient characteristics (W m-2)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "51. Solar --&gt; Orbital Parameters\nOrbital parameters and top of atmosphere insolation characteristics\n51.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime adaptation of orbital parameters", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.orbital_parameters.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"fixed\" \n# \"transient\" \n# TODO - please enter value(s)\n", "51.2. Fixed Reference Date\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nReference date for fixed orbital parameters (yyyy)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "51.3. Transient Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of transient orbital parameters", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "51.4. Computation Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod used for computing orbital parameters.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Berger 1978\" \n# \"Laskar 2004\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "52. Solar --&gt; Insolation Ozone\nImpact of solar insolation on stratospheric ozone\n52.1. Solar Ozone Impact\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes top of atmosphere insolation impact on stratospheric ozone?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "53. Volcanos\nCharacteristics of the implementation of volcanoes\n53.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview description of the implementation of volcanic effects in the atmosphere", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.volcanos.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "54. Volcanos --&gt; Volcanoes Treatment\nTreatment of volcanoes in the atmosphere\n54.1. Volcanoes Implementation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow volcanic effects are modeled in the atmosphere.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"high frequency solar constant anomaly\" \n# \"stratospheric aerosols optical thickness\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
tensorflow/docs-l10n
site/ja/agents/tutorials/bandits_tutorial.ipynb
apache-2.0
[ "Copyright 2020 The TF-Agents Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "TF-Agents における多腕バンディット問題のチュートリアル\nはじめに\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://www.tensorflow.org/agents/tutorials/bandits_tutorial\"> <img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\"> TensorFlow.org で表示</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/agents/tutorials/bandits_tutorial.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\">Google Colab で実行</a></td>\n <td><a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ja/agents/tutorials/bandits_tutorial.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\">GitHub でソースを表示</a></td>\n <td><a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/agents/tutorials/bandits_tutorial.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\">ノートブックをダウンロード</a></td>\n</table>\n\nセットアップ\n以下の依存関係をインストールしていない場合は、実行します。", "!pip install tf-agents", "インポート", "import abc\nimport numpy as np\nimport tensorflow as tf\n\nfrom tf_agents.agents import tf_agent\nfrom tf_agents.drivers import driver\nfrom tf_agents.environments import py_environment\nfrom tf_agents.environments import tf_environment\nfrom tf_agents.environments import tf_py_environment\nfrom tf_agents.policies import tf_policy\nfrom tf_agents.specs import array_spec\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.trajectories import trajectory\nfrom tf_agents.trajectories import policy_step\n\nnest = tf.nest", "はじめに\nタ腕バンディット問題(MAB)は、エージェントがある環境の状態を観察した後に何らかのアクションを取ることで、その環境における報酬を得るという強化学習の特殊なケースです。一般的な RL と MAB の主な違いは、MAB では、エージェントが環境の次の状態に影響を与えないことです。したがって、エージェントは状態遷移をモデル化したり、過去のアクションに対する報酬を与えたり、より多くの報酬を得るための「予測」を行いません。\nほかの RL 分野と同様に、MAB エージェントの目標は、できる限り多くの報酬を収集するポリシーを見つけ出すことです。ただし、十分に調べなかった場合により適したアクションを見逃す可能性があるため、最高の報酬を約束するアクションを常に使用しようとするのは間違いです。これが MAB で解決しなければならない主な問題であり、通常、探索と知識利用のジレンマと呼ばれています。\nMAB のバンディット環境、ポリシー、およびエージェントは、tf_agents/bandits のサブディレクトリにあります。\n環境\nTF-Agents では、環境クラスは現在の状態(観測またはコンテキスト)に関する情報を提供し、アクションを入力として受け取って状態遷移を実行し、報酬を出力する役割があります。このクラスは、エピソードが終了したときに新しいエピソードが開始されるよう、リセットも行います。これは、状態のラベルがエピソードの「最後」となった時に reset 関数を呼び出して行われます。\n詳細については、「TF-Agents 環境のチュートリアル」をご覧ください。\n前述のとおり、MAB は、アクションが次の観測に影響を与えないという点で一般的な RL と異なりますが、もう一つの違いは、バンディットには、前の時間ステップから独立して新しい観測でステップが開始するたびに「エピソード」がないところにあります。\n観測が確実に独立しており、RL エピソードの概念を中傷かするために、PyEnvironment と TFEnvironment のサブクラスである BanditPyEnvironment と BanditTFEnvironment を導入します。これらのクラスは、ユーザーが実装したままにする 2 つのプライベートメンバー関数を公開します。\npython\n@abc.abstractmethod\ndef _observe(self):\nと\npython\n@abc.abstractmethod\ndef _apply_action(self, action):\n_observe 関数は観測を戻します。すると、ポリシーがこの観測に基づくアクションを選択します。_apply_action は、アクションを入力として受け取り、対応する報酬を戻します。これらのプライベートメンバー関数はそれぞれ、reset 関数と step 関数によって呼び出されます。", "class BanditPyEnvironment(py_environment.PyEnvironment):\n\n def __init__(self, observation_spec, action_spec):\n self._observation_spec = observation_spec\n self._action_spec = action_spec\n super(BanditPyEnvironment, self).__init__()\n\n # Helper functions.\n def action_spec(self):\n return self._action_spec\n\n def observation_spec(self):\n return self._observation_spec\n\n def _empty_observation(self):\n return tf.nest.map_structure(lambda x: np.zeros(x.shape, x.dtype),\n self.observation_spec())\n\n # These two functions below should not be overridden by subclasses.\n def _reset(self):\n \"\"\"Returns a time step containing an observation.\"\"\"\n return ts.restart(self._observe(), batch_size=self.batch_size)\n\n def _step(self, action):\n \"\"\"Returns a time step containing the reward for the action taken.\"\"\"\n reward = self._apply_action(action)\n return ts.termination(self._observe(), reward)\n\n # These two functions below are to be implemented in subclasses.\n @abc.abstractmethod\n def _observe(self):\n \"\"\"Returns an observation.\"\"\"\n\n @abc.abstractmethod\n def _apply_action(self, action):\n \"\"\"Applies `action` to the Environment and returns the corresponding reward.\n \"\"\"", "上記の中間抽象クラスは PyEnvironment の _reset 関数と _step 関数を実装し、サブクラスが実装する抽象関数の _observe と _apply_action を公開します。\n単純な環境クラスの例\n以下のクラスは、観測が -2 から 2 のランダム整数で、3 つの可能なアクション (0, 1, 2) があり、報酬がこのアクションと観測の積である非常に単純な環境を提供します。", "class SimplePyEnvironment(BanditPyEnvironment):\n\n def __init__(self):\n action_spec = array_spec.BoundedArraySpec(\n shape=(), dtype=np.int32, minimum=0, maximum=2, name='action')\n observation_spec = array_spec.BoundedArraySpec(\n shape=(1,), dtype=np.int32, minimum=-2, maximum=2, name='observation')\n super(SimplePyEnvironment, self).__init__(observation_spec, action_spec)\n\n def _observe(self):\n self._observation = np.random.randint(-2, 3, (1,), dtype='int32')\n return self._observation\n\n def _apply_action(self, action):\n return action * self._observation", "次に、この環境を使用して観測を取得し、アクションに対する報酬を受け取ります。", "environment = SimplePyEnvironment()\nobservation = environment.reset().observation\nprint(\"observation: %d\" % observation)\n\naction = 2 #@param\n\nprint(\"action: %d\" % action)\nreward = environment.step(action).reward\nprint(\"reward: %f\" % reward)", "TF Environment\nBanditTFEnvironment をサブクラス化するか、RL 環境と同様に BanditPyEnvironment を定義して TFPyEnvironment でラップすることでバンディット環境を定義できます。単純さを維持するために、このチュートリアルでは、後者を使用することにします。", "tf_environment = tf_py_environment.TFPyEnvironment(environment)", "ポリシー\nバンディット問題におけるポリシーは、RL 問題と同様に機能し、観測を入力としてアクション(またはアクションの分布)を提供します。\n詳細については、「TF-Agents ポリシーのチュートリアル」をご覧ください。\n環境と同様に、ポリシーの構築には 2 つの方法があります。1 つは、PyPolicy を作成して TFPyPolicy でラップする方法で、もう 1 つは、TFPolicy を直接作成する方法です。ここでは、直接作成する方法を使用します。\nこの例は非常に単純であるため、最適なポリシーを手動で作成できます。アクションは観測の表示にのみ依存しており、負の場合は 0、正の場合は 2 となります。", "class SignPolicy(tf_policy.TFPolicy):\n def __init__(self):\n observation_spec = tensor_spec.BoundedTensorSpec(\n shape=(1,), dtype=tf.int32, minimum=-2, maximum=2)\n time_step_spec = ts.time_step_spec(observation_spec)\n\n action_spec = tensor_spec.BoundedTensorSpec(\n shape=(), dtype=tf.int32, minimum=0, maximum=2)\n\n super(SignPolicy, self).__init__(time_step_spec=time_step_spec,\n action_spec=action_spec)\n def _distribution(self, time_step):\n pass\n\n def _variables(self):\n return ()\n\n def _action(self, time_step, policy_state, seed):\n observation_sign = tf.cast(tf.sign(time_step.observation[0]), dtype=tf.int32)\n action = observation_sign + 1\n return policy_step.PolicyStep(action, policy_state)", "次に、環境に観測をリクエストし、ポリシーを呼び出してポリシーを選択すると、環境が報酬を出力します。", "sign_policy = SignPolicy()\n\ncurrent_time_step = tf_environment.reset()\nprint('Observation:')\nprint (current_time_step.observation)\naction = sign_policy.action(current_time_step).action\nprint('Action:')\nprint (action)\nreward = tf_environment.step(action).reward\nprint('Reward:')\nprint(reward)", "バンディット環境の実装方法によって、ステップを取るたびに、選択したアクションに対する報酬が得られるだけでなく、次の観測も得られます。", "step = tf_environment.reset()\naction = 1\nnext_step = tf_environment.step(action)\nreward = next_step.reward\nnext_observation = next_step.observation\nprint(\"Reward: \")\nprint(reward)\nprint(\"Next observation:\")\nprint(next_observation)", "エージェント\nバンディット環境とバンディットポリシーを準備したので、バンディットエージェントを定義することにしましょう。バンディットエージェントは、トレーニングサンプルに基づいてポリシーの変更を行います。\nバンディットエージェントの API は RL のエージェントと同じですが、_initialize メソッドと _train メソッドを実装し、policy と collect_policy を定義する必要があります。\nより複雑な環境\nバンディットエージェントを記述する前に、少し理解しにくい環境を用意する必要があります。もう少し面白くするために、次の環境は、reward = observation * action または reward = -observation * action のいずれかを必ず与えるようにしましょう。どちらが与えられるかは、環境か初期化するときに決定されます。", "class TwoWayPyEnvironment(BanditPyEnvironment):\n\n def __init__(self):\n action_spec = array_spec.BoundedArraySpec(\n shape=(), dtype=np.int32, minimum=0, maximum=2, name='action')\n observation_spec = array_spec.BoundedArraySpec(\n shape=(1,), dtype=np.int32, minimum=-2, maximum=2, name='observation')\n\n # Flipping the sign with probability 1/2.\n self._reward_sign = 2 * np.random.randint(2) - 1\n print(\"reward sign:\")\n print(self._reward_sign)\n\n super(TwoWayPyEnvironment, self).__init__(observation_spec, action_spec)\n\n def _observe(self):\n self._observation = np.random.randint(-2, 3, (1,), dtype='int32')\n return self._observation\n\n def _apply_action(self, action):\n return self._reward_sign * action * self._observation[0]\n\ntwo_way_tf_environment = tf_py_environment.TFPyEnvironment(TwoWayPyEnvironment())", "より複雑なポリシー\nより複雑なかんきょうには、より複雑なポリシーが伴います。基盤の環境の動作を検出するポリシーが必要です。ポリシーが処理する必要のある状況は 3 つあります。\n\nエージェントが、実行している環境のバージョンを検出していない場合\nエージェントが、実行している環境の元のバージョンを検出した場合\nエージェントが、実行している環境の反転バージョンを検出した場合\n\n_situation という tf_variable を、[0, 2] の値にエンコーディングされた情報を格納するように定義し、ポリシーが適宜に動作するようにします。", "class TwoWaySignPolicy(tf_policy.TFPolicy):\n def __init__(self, situation):\n observation_spec = tensor_spec.BoundedTensorSpec(\n shape=(1,), dtype=tf.int32, minimum=-2, maximum=2)\n action_spec = tensor_spec.BoundedTensorSpec(\n shape=(), dtype=tf.int32, minimum=0, maximum=2)\n time_step_spec = ts.time_step_spec(observation_spec)\n self._situation = situation\n super(TwoWaySignPolicy, self).__init__(time_step_spec=time_step_spec,\n action_spec=action_spec)\n def _distribution(self, time_step):\n pass\n\n def _variables(self):\n return [self._situation]\n\n def _action(self, time_step, policy_state, seed):\n sign = tf.cast(tf.sign(time_step.observation[0, 0]), dtype=tf.int32)\n def case_unknown_fn():\n # Choose 1 so that we get information on the sign.\n return tf.constant(1, shape=(1,))\n\n # Choose 0 or 2, depending on the situation and the sign of the observation.\n def case_normal_fn():\n return tf.constant(sign + 1, shape=(1,))\n def case_flipped_fn():\n return tf.constant(1 - sign, shape=(1,))\n\n cases = [(tf.equal(self._situation, 0), case_unknown_fn),\n (tf.equal(self._situation, 1), case_normal_fn),\n (tf.equal(self._situation, 2), case_flipped_fn)]\n action = tf.case(cases, exclusive=True)\n return policy_step.PolicyStep(action, policy_state)", "エージェント\nでは、環境のサインを検出して、ポリシーを適切に設定するエージェントを定義することにしましょう。", "class SignAgent(tf_agent.TFAgent):\n def __init__(self):\n self._situation = tf.Variable(0, dtype=tf.int32)\n policy = TwoWaySignPolicy(self._situation)\n time_step_spec = policy.time_step_spec\n action_spec = policy.action_spec\n super(SignAgent, self).__init__(time_step_spec=time_step_spec,\n action_spec=action_spec,\n policy=policy,\n collect_policy=policy,\n train_sequence_length=None)\n\n def _initialize(self):\n return tf.compat.v1.variables_initializer(self.variables)\n\n def _train(self, experience, weights=None):\n observation = experience.observation\n action = experience.action\n reward = experience.reward\n\n # We only need to change the value of the situation variable if it is\n # unknown (0) right now, and we can infer the situation only if the\n # observation is not 0.\n needs_action = tf.logical_and(tf.equal(self._situation, 0),\n tf.not_equal(reward, 0))\n\n\n def new_situation_fn():\n \"\"\"This returns either 1 or 2, depending on the signs.\"\"\"\n return (3 - tf.sign(tf.cast(observation[0, 0, 0], dtype=tf.int32) *\n tf.cast(action[0, 0], dtype=tf.int32) *\n tf.cast(reward[0, 0], dtype=tf.int32))) / 2\n\n new_situation = tf.cond(needs_action,\n new_situation_fn,\n lambda: self._situation)\n new_situation = tf.cast(new_situation, tf.int32)\n tf.compat.v1.assign(self._situation, new_situation)\n return tf_agent.LossInfo((), ())\n\nsign_agent = SignAgent()\n", "上記のコードでは、エージェントがポリシーを定義し、エージェントとポリシーが変数 situation を共有しています。\nまた、_train 関数のパラメータ experience はトラジェクトリです。\nトラジェクトリ\nTF-Agents では、trajectories は名前付きのタプルであり、前のステップで取得されたサンプルを含みます。これらのサンプルはエージェントによってポリシーのトレーニングと更新に使用されます。RL では、トラジェクトリには現在の状態、次の状態、そして現在のエピソードが終了したかどうかに関する情報が含まれている必要があります。バンディットの世界では、これらの情報は不要であるため、ヘルパー関数をセットアップしてトラジェクトリを作成します。", "# We need to add another dimension here because the agent expects the\n# trajectory of shape [batch_size, time, ...], but in this tutorial we assume\n# that both batch size and time are 1. Hence all the expand_dims.\n\ndef trajectory_for_bandit(initial_step, action_step, final_step):\n return trajectory.Trajectory(observation=tf.expand_dims(initial_step.observation, 0),\n action=tf.expand_dims(action_step.action, 0),\n policy_info=action_step.info,\n reward=tf.expand_dims(final_step.reward, 0),\n discount=tf.expand_dims(final_step.discount, 0),\n step_type=tf.expand_dims(initial_step.step_type, 0),\n next_step_type=tf.expand_dims(final_step.step_type, 0))\n", "エージェントのトレーニング\nこれで、バンディットエージェントをトレーニングするためのピースがすべて用意できました。", "step = two_way_tf_environment.reset()\nfor _ in range(10):\n action_step = sign_agent.collect_policy.action(step)\n next_step = two_way_tf_environment.step(action_step.action)\n experience = trajectory_for_bandit(step, action_step, next_step)\n print(experience)\n sign_agent.train(experience)\n step = next_step\n", "出力から、2 つ目のステップ(最初のステップで観測が 0 でなければ)の後に、ポリシーが正しい方法でアクションを選択しており、したがって収集された報酬が常に非負であることがわかります。\n実際の文脈付きバンディットの例\nこのチュートリアルの残りでは、TF-Agents Bandits ライブラリの事前実装済みの環境とエージェントを使用します。", "# Imports for example.\nfrom tf_agents.bandits.agents import lin_ucb_agent\nfrom tf_agents.bandits.environments import stationary_stochastic_py_environment as sspe\nfrom tf_agents.bandits.metrics import tf_metrics\nfrom tf_agents.drivers import dynamic_step_driver\nfrom tf_agents.replay_buffers import tf_uniform_replay_buffer\n\nimport matplotlib.pyplot as plt", "線形ペイオフ関数を使った定常確率的環境\nこの例で使用する環境は、StationaryStochasticPyEnvironment です。この環境は、観測(コンテキスト)を提供する(非常にノイズの多い)関数をパラメータとして取り、アームごとに、与えられた観測に基づいて報酬を計算する(やはりノイズの多い)関数を取ります。このチュートリアルの例では、d 次元の立方体から均一にコンテキストをサンプリングすると、報酬関数はコンテキストの線形関数で、一部はガウスノイズです。", "batch_size = 2 # @param\narm0_param = [-3, 0, 1, -2] # @param\narm1_param = [1, -2, 3, 0] # @param\narm2_param = [0, 0, 1, 1] # @param\ndef context_sampling_fn(batch_size):\n \"\"\"Contexts from [-10, 10]^4.\"\"\"\n def _context_sampling_fn():\n return np.random.randint(-10, 10, [batch_size, 4]).astype(np.float32)\n return _context_sampling_fn\n\nclass LinearNormalReward(object):\n \"\"\"A class that acts as linear reward function when called.\"\"\"\n def __init__(self, theta, sigma):\n self.theta = theta\n self.sigma = sigma\n def __call__(self, x):\n mu = np.dot(x, self.theta)\n return np.random.normal(mu, self.sigma)\n\narm0_reward_fn = LinearNormalReward(arm0_param, 1)\narm1_reward_fn = LinearNormalReward(arm1_param, 1)\narm2_reward_fn = LinearNormalReward(arm2_param, 1)\n\nenvironment = tf_py_environment.TFPyEnvironment(\n sspe.StationaryStochasticPyEnvironment(\n context_sampling_fn(batch_size),\n [arm0_reward_fn, arm1_reward_fn, arm2_reward_fn],\n batch_size=batch_size))\n", "LinUCB エージェント\n以下のエージェントは、LinUCB アルゴリズムを実装します。", "observation_spec = tensor_spec.TensorSpec([4], tf.float32)\ntime_step_spec = ts.time_step_spec(observation_spec)\naction_spec = tensor_spec.BoundedTensorSpec(\n dtype=tf.int32, shape=(), minimum=0, maximum=2)\n\nagent = lin_ucb_agent.LinearUCBAgent(time_step_spec=time_step_spec,\n action_spec=action_spec)", "Regret 基準\nバンディットで最も重要な基準は regret です。エージェントが収集した報酬と、環境の報酬関数にアクセスできる予測ポリシーの期待報酬の差として計算されます。そのため、RegretMetric には、特定の観測があった場合に、達成可能な最大の期待報酬を計算する baseline_reward_fn 関数が必要です。このチュートリアルの例では、この環境に定義した報酬関数に相当するノイズのない関数の最大値を取る必要があります。", "def compute_optimal_reward(observation):\n expected_reward_for_arms = [\n tf.linalg.matvec(observation, tf.cast(arm0_param, dtype=tf.float32)),\n tf.linalg.matvec(observation, tf.cast(arm1_param, dtype=tf.float32)),\n tf.linalg.matvec(observation, tf.cast(arm2_param, dtype=tf.float32))]\n optimal_action_reward = tf.reduce_max(expected_reward_for_arms, axis=0)\n return optimal_action_reward\n\nregret_metric = tf_metrics.RegretMetric(compute_optimal_reward)", "トレーニング\n上記で説明した環境、ポリシー、およびエージェントの要素をすべてを組み合わせましょう。ドライバーを使用して、環境でポリシーを実行してトレーニングデータを出力し、そのデータでエージェントをトレーニングします。\n必要なステップ数を共に指定するパラメータが 2 つあることに注意してください。num_iterations はトレーナーループを実行する回数を指定し、ドライバーはイテレーションごとに steps_per_loop ステップを実行します。これらのパラメータを維持するのは、主に、イテレーションごとに実行される演算と、ステップごとにドライバーが行う演算があるためです。たとえば、エージェントの train 関数はイテレーションにつき一度しか呼び出されません。ここでは、トレーニングの頻度を高めると、ポリシーが「より新しく」なるのに対し、より大きなバッチでトレーニングすると時間の効率がよくなるというトレードオフがあります。", "num_iterations = 90 # @param\nsteps_per_loop = 1 # @param\n\nreplay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(\n data_spec=agent.policy.trajectory_spec,\n batch_size=batch_size,\n max_length=steps_per_loop)\n\nobservers = [replay_buffer.add_batch, regret_metric]\n\ndriver = dynamic_step_driver.DynamicStepDriver(\n env=environment,\n policy=agent.collect_policy,\n num_steps=steps_per_loop * batch_size,\n observers=observers)\n\nregret_values = []\n\nfor _ in range(num_iterations):\n driver.run()\n loss_info = agent.train(replay_buffer.gather_all())\n replay_buffer.clear()\n regret_values.append(regret_metric.result())\n\nplt.plot(regret_values)\nplt.ylabel('Average Regret')\nplt.xlabel('Number of Iterations')", "最後のコードスニペットを実行したら、生成されるプロットから、エージェントのトレーニングが増えて、特定の観測が与えられる場合にポリシーが適切なアクションを選択する確率が高まる過程で、平均 Regret が下降しているのが示されます(そうであることを願います)。\n今後の内容\nさらに機能例を確認する場合は、bandits/agents/examples ディレクトリをご覧ください。さまざまなエージェントと環境用にすぐに実行できる例が掲載されています。\nTF-Agents ライブラリは、アームごとの特徴量でタ腕バンディットを処理することもできます。それについては、アームごとのバンディット問題のチュートリアルをご覧ください。" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
appleby/fastai-courses
deeplearning1/nbs/lesson5-ma.ipynb
apache-2.0
[ "from theano.sandbox import cuda\n\n%matplotlib inline\nimport utils; reload(utils)\nfrom utils import *\nfrom __future__ import division, print_function\n\nmodel_path = 'data/imdb/models/'\n%mkdir -p $model_path", "Setup data\nWe're going to look at the IMDB dataset, which contains movie reviews from IMDB, along with their sentiment. Keras comes with some helpers for this dataset.", "from keras.datasets import imdb\nidx = imdb.get_word_index()", "This is the word list:", "idx\n\nidx_arr = sorted(idx, key=idx.get)\nidx_arr[:10]", "...and this is the mapping from id to word", "idx2word = {v: k for k, v in idx.iteritems()}", "We download the reviews using code copied from keras.datasets:", "path = get_file('imdb_full.pkl',\n origin='https://s3.amazonaws.com/text-datasets/imdb_full.pkl',\n md5_hash='d091312047c43cf9e4e38fef92437263')\nf = open(path, 'rb')\n(x_train, labels_train), (x_test, labels_test) = pickle.load(f)\n\npath\n\nlen(x_train)", "Here's the 1st review. As you see, the words have been replaced by ids. The ids can be looked up in idx2word.", "', '.join(map(str, x_train[0]))", "The first word of the first review is 23022. Let's see what that is.", "idx2word[23022]", "Here's the whole review, mapped from ids to words.", "' '.join([idx2word[o] for o in x_train[0]])", "The labels are 1 for positive, 0 for negative.", "labels_train[:10]", "Reduce vocab size by setting rare words to max index.", "vocab_size = 5000\nmaxidx = vocab_size - 1\n\ntrn = [np.array([i if i < maxidx else maxidx for i in s]) for s in x_train]\ntest = [np.array([i if i < maxidx else maxidx for i in s]) for s in x_test]", "Look at distribution of lengths of sentences.", "lens = np.array(map(len, trn))\n(lens.max(), lens.min(), lens.mean())", "Pad (with zero) or truncate each sentence to make consistent length.", "seq_len = 500\n\ntrn = sequence.pad_sequences(trn, maxlen=seq_len, value=0)\ntest = sequence.pad_sequences(test, maxlen=seq_len, value=0)", "This results in nice rectangular matrices that can be passed to ML algorithms. Reviews shorter than 500 words are pre-padded with zeros, those greater are truncated.", "trn.shape", "Create simple models\nSingle hidden layer NN\nThe simplest model that tends to give reasonable results is a single hidden layer net. So let's try that. Note that we can't expect to get any useful results by feeding word ids directly into a neural net - so instead we use an embedding to replace them with a vector of 32 (initially random) floats for each word in the vocab.", "model = Sequential([\n Embedding(vocab_size, 32, input_length=seq_len),\n Flatten(),\n Dense(100, activation='relu'),\n Dropout(0.7),\n Dense(1, activation='sigmoid')])\n\nmodel.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy'])\nmodel.summary()\n\nmodel.fit(trn, labels_train, batch_size=64, nb_epoch=2, validation_data=(test, labels_test))\n\nmodel.save_weights(model_path + 'single1.h5')\n\nmodel.load_weights(model_path + 'single1.h5')", "The stanford paper that this dataset is from cites a state of the art accuracy (without unlabelled data) of 0.883. So we're short of that, but on the right track.\nSingle conv layer with max pooling\nA CNN is likely to work better, since it's designed to take advantage of ordered data. We'll need to use a 1D CNN, since a sequence of words is 1D.", "conv1 = Sequential([\n Embedding(vocab_size, 32, input_length=seq_len, dropout=0.2),\n Dropout(0.2),\n Convolution1D(64, 5, border_mode='same', activation='relu'),\n Dropout(0.2),\n MaxPooling1D(),\n Flatten(),\n Dense(100, activation='relu'),\n Dropout(0.7),\n Dense(1, activation='sigmoid')])\n\nconv1.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy'])\nconv1.summary()\n\nconv1.fit(trn, labels_train, batch_size=64, nb_epoch=4, validation_data=(test, labels_test))", "That's well past the Stanford paper's accuracy - another win for CNNs!", "conv1.save_weights(model_path + 'conv1.h5')\n\nconv1.load_weights(model_path + 'conv1.h5')", "Pre-trained vectors\nYou may want to look at wordvectors.ipynb before moving on.\nIn this section, we replicate the previous CNN, but using pre-trained embeddings.", "def get_glove_dataset(dataset):\n \"\"\"Download the requested glove dataset from files.fast.ai\n and return a location that can be passed to load_vectors.\n \"\"\"\n # see wordvectors.ipynb for info on how these files were\n # generated from the original glove data.\n md5sums = {'6B.50d': '8e1557d1228decbda7db6dfd81cd9909',\n '6B.100d': 'c92dbbeacde2b0384a43014885a60b2c',\n '6B.200d': 'af271b46c04b0b2e41a84d8cd806178d',\n '6B.300d': '30290210376887dcc6d0a5a6374d8255'}\n glove_path = os.path.abspath('data/glove/results')\n %mkdir -p $glove_path\n return get_file(dataset,\n 'http://files.fast.ai/models/glove/' + dataset + '.tgz',\n cache_subdir=glove_path,\n md5_hash=md5sums.get(dataset, None),\n untar=True)\n\ndef load_vectors(loc):\n return (load_array(loc+'.dat'),\n pickle.load(open(loc+'_words.pkl','rb')),\n pickle.load(open(loc+'_idx.pkl','rb')))\n\nvecs, words, wordidx = load_vectors(get_glove_dataset('6B.50d'))", "The glove word ids and imdb word ids use different indexes. So we create a simple function that creates an embedding matrix using the indexes from imdb, and the embeddings from glove (where they exist).", "def create_emb():\n n_fact = vecs.shape[1]\n emb = np.zeros((vocab_size, n_fact))\n\n for i in range(1,len(emb)):\n word = idx2word[i]\n if word and re.match(r\"^[a-zA-Z0-9\\-]*$\", word):\n src_idx = wordidx[word]\n emb[i] = vecs[src_idx]\n else:\n # If we can't find the word in glove, randomly initialize\n emb[i] = normal(scale=0.6, size=(n_fact,))\n\n # This is our \"rare word\" id - we want to randomly initialize\n emb[-1] = normal(scale=0.6, size=(n_fact,))\n emb/=3\n return emb\n\nemb = create_emb()", "We pass our embedding matrix to the Embedding constructor, and set it to non-trainable.", "model = Sequential([\n Embedding(vocab_size, 50, input_length=seq_len, dropout=0.2, \n weights=[emb], trainable=False),\n Dropout(0.25),\n Convolution1D(64, 5, border_mode='same', activation='relu'),\n Dropout(0.25),\n MaxPooling1D(),\n Flatten(),\n Dense(100, activation='relu'),\n Dropout(0.7),\n Dense(1, activation='sigmoid')])\n\nmodel.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy'])\n\nmodel.fit(trn, labels_train, batch_size=64, nb_epoch=2, validation_data=(test, labels_test))", "We already have beaten our previous model! But let's fine-tune the embedding weights - especially since the words we couldn't find in glove just have random embeddings.", "model.layers[0].trainable=True\n\nmodel.optimizer.lr=1e-4\n\nmodel.fit(trn, labels_train, batch_size=64, nb_epoch=1, validation_data=(test, labels_test))", "As expected, that's given us a nice little boost. :)", "model.save_weights(model_path+'glove50.h5')", "Multi-size CNN\nThis is an implementation of a multi-size CNN as shown in Ben Bowles' excellent blog post.", "from keras.layers import Merge", "We use the functional API to create multiple conv layers of different sizes, and then concatenate them.", "graph_in = Input ((vocab_size, 50))\nconvs = [ ] \nfor fsz in range (3, 6): \n x = Convolution1D(64, fsz, border_mode='same', activation=\"relu\")(graph_in)\n x = MaxPooling1D()(x) \n x = Flatten()(x) \n convs.append(x)\nout = Merge(mode=\"concat\")(convs) \ngraph = Model(graph_in, out) \n\nemb = create_emb()", "We then replace the conv/max-pool layer in our original CNN with the concatenated conv layers.", "model = Sequential ([\n Embedding(vocab_size, 50, input_length=seq_len, dropout=0.2, weights=[emb]),\n Dropout (0.2),\n graph,\n Dropout (0.5),\n Dense (100, activation=\"relu\"),\n Dropout (0.7),\n Dense (1, activation='sigmoid')\n ])\n\nmodel.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy'])\n\nmodel.fit(trn, labels_train, batch_size=64, nb_epoch=2, validation_data=(test, labels_test))", "Interestingly, I found that in this case I got best results when I started the embedding layer as being trainable, and then set it to non-trainable after a couple of epochs. I have no idea why!", "model.layers[0].trainable=False\n\nmodel.optimizer.lr=1e-5\n\nmodel.fit(trn, labels_train, batch_size=64, nb_epoch=2, validation_data=(test, labels_test))\n\nmodel.save_weights(model_path+'multiconv1.h5')", "This more complex architecture has given us another boost in accuracy.\nLSTM\nWe haven't covered this bit yet!", "model = Sequential([\n Embedding(vocab_size, 32, input_length=seq_len, mask_zero=True,\n W_regularizer=l2(1e-6), dropout=0.2),\n LSTM(100, consume_less='gpu'),\n Dense(1, activation='sigmoid')])\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nmodel.summary()\n\nmodel.fit(trn, labels_train, batch_size=64, nb_epoch=5, validation_data=(test, labels_test))\n\nmodel.save_weights(model_path+'lstm1.h5')" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
atulsingh0/MachineLearning
MasteringML_wSkLearn/05_Decision_Trees.ipynb
gpl-3.0
[ "Nonlinear Classification and Regression with Decision Trees\nDecision trees\nDecision trees are commonly learned by recursively splitting the set of training\ninstances into subsets based on the instances' values for the explanatory variables. \nIn classification tasks, the leaf nodes\nof the decision tree represent classes. In regression tasks, the values of the response\nvariable for the instances contained in a leaf node may be averaged to produce the\nestimate for the response variable. After the decision tree has been constructed,\nmaking a prediction for a test instance requires only following the edges until a\nleaf node is reached. \nLet's create a decision tree using an algorithm called Iterative Dichotomiser 3 (ID3).\nInvented by Ross Quinlan, ID3 was one of the first algorithms used to train decision\ntrees. \nBut how to choose the first variable on which we have to divide the data so that we can have smaller tree. \nMeasured in bits, entropy quantifies the amount of uncertainty in a variable. Entropy\nis given by the following equation, where n is the number of outcomes and ( ) i P x is\nthe probability of the outcome i. Common values for b are 2, e, and 10. Because the\nlog of a number less than one will be negative, the entire sum is negated to return a\npositive value. \nentropy $$ H(X) = -\\sum_{i=1}^{n} P(x_i)log_b P(x_i) $$ \nInformation gain\nSelecting the test that produces the subsets with the lowest average entropy can produce a suboptimal tree. \nwe will measure the reduction in entropy using a metric called information gain.\nCalculated with the following equation, information gain is the difference between the entropy of the parent\nnode, H (T ), and the weighted average of the children nodes' entropies. \n \nFor creating Decision Tree, Algo ID3 is the one mostly used. C4.5 is a modified version of ID3\nthat can be used with continuous explanatory variables and can accommodate\nmissing values for features. C4.5 also can prune trees.\nPruning reduces the size of a tree by replacing branches that classify few instances with leaf nodes. Used by\nscikit-learn's implementation of decision trees, CART is another learning algorithm\nthat supports pruning.\nGini impurity\nGini impurity measures the proportions of classes in a set. Gini impurity\nis given by the following equation, where j is the number of classes, t is the subset\nof instances for the node, and P(i|t) is the probability of selecting an element of\nclass i from the node's subset: \n$$ Gini (t) = 1 - \\sum_{i=1}^{j} P(i|t)^2 $$ \nIntuitively, Gini impurity is zero when all of the elements of the set are the same\nclass, as the probability of selecting an element of that class is equal to one. Like\nentropy, Gini impurity is greatest when each class has an equal probability of being\nselected. The maximum value of Gini impurity depends on the number of possible\nclasses, and it is given by the following equation: \n$$ Gini_{max} = 1 - \\frac{1}{n} $$", "# import\nimport pandas as pd\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.ensemble import RandomForestClassifier\n\ndf = pd.read_csv(\"data/ad.data\", header=None)\n\nexplanatory_variable_columns = set(df.columns.values)\nresponse_variable_column = df[len(df.columns.values)-1]\n# The last column describes the targets\nexplanatory_variable_columns.remove(len(df.columns.values)-1)\ny = [1 if e == 'ad.' else 0 for e in response_variable_column]\nX = df[list(explanatory_variable_columns)]\n\n\n#X.replace(to_replace=' *\\?', value=-1, regex=True, inplace=True)\nX.replace(['?'], [-1])\n\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n\npipeline = Pipeline([\n('clf', DecisionTreeClassifier(criterion='entropy'))\n])\n\nparameters = {\n'clf__max_depth': (150, 155, 160),\n'clf__min_samples_split': (1, 2, 3),\n'clf__min_samples_leaf': (1, 2, 3)\n}\n\ngrid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1, scoring='f1')\n\n#grid_search.fit(X_train, y_train)\n\nprint( 'Best score: %0.3f' % grid_search.best_score_)\nprint( 'Best parameters set:')\nbest_parameters = grid_search.best_estimator_.get_params()\nfor param_name in sorted(parameters.keys()):\n print( '\\t%s: %r' % (param_name, best_parameters[param_name]))\n \npredictions = grid_search.predict(X_test)\n\nprint ('Accuracy:', accuracy_score(y_test, predictions))\nprint ('Confusion Matrix:', confusion_matrix(y_test, predictions))\nprint ('Classification Report:', classification_report(y_test, predictions))", "Tree ensembles (RandomForestClassifier)\nEnsemble learning methods combine a set of models to produce an estimator that\nhas better predictive performance than its individual components. A random forest\nis a collection of decision trees that have been trained on randomly selected subsets\nof the training instances and explanatory variables. Random forests usually make\npredictions by returning the mode or mean of the predictions of their constituent\ntrees. \nRandom forests are less prone to overfitting than decision trees because no single\ntree can learn from all of the instances and explanatory variables; no single tree can\nmemorize all of the noise in the representation", "pipeline = Pipeline([\n('clf', RandomForestClassifier(criterion='entropy'))\n])\n\nparameters = {\n'clf__n_estimators': (5, 10, 20, 50),\n'clf__max_depth': (50, 150, 250),\n'clf__min_samples_split': (1, 2, 3),\n'clf__min_samples_leaf': (1, 2, 3)\n}\n\ngrid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1, scoring='f1')\n#grid_search.fit(X_train, y_train)", "The advantages and disadvantages of decision trees\nDecision trees are easy to use. Unlike many learning\nalgorithms, decision trees do not require the data to have zero mean and unit\nvariance. While decision trees can tolerate missing values for explanatory variables,\nscikit-learn's current implementation cannot. Decision trees can even learn to ignore\nexplanatory variables that are not relevant to the task. \nSmall decision trees can be easy to interpret and visualize with the export_graphviz\nfunction from scikit-learn's tree module. The branches of a decision tree are\nconjunctions of logical predicates, and they are easily visualized as flowcharts.\nDecision trees support multioutput tasks, and a single decision tree can be used for\nmulticlass classification without employing a strategy like one-versus-all. \ndecision trees are eager learners. Eager learners\nmust build an input-independent model from the training data before they can be\nused to estimate the values of test instances, but can predict relatively quickly once\nthe model has been built. In contrast, lazy learners such as the k-nearest neighbors\nalgorithm defer all generalization until they must make a prediction. Lazy learners\ndo not spend time training, but often predict slowly compared to eager learners. \nDecision trees are more prone to overfitting than many of the models, Pruning is a common\nstrategy that removes some of the tallest nodes and leaves of a decision tree but\nit is not currently implemented in scikit-learn. However, similar effects can be\nachieved by setting a maximum depth for the tree or by creating child nodes only\nwhen the number of training instances they will contain exceeds a threshold.\nSome of Algo are :\nID3, C4.5, J4.5, RandomeForest" ]
[ "markdown", "code", "markdown", "code", "markdown" ]
unconed/mathbox
examples/notebooks/mathbox.ipynb
mit
[ "Jupyter meets MathBox2\nThis notebook contains a few examples of embedded MathBox2 plots.\nNOTE that these examples are currently pegged to an old version of Mathbox! It would be lovely to update this to the modern stack.\nBoilerplate code", "import json\nimport numpy as np\nfrom IPython.display import HTML, Javascript, display\n\ndef json_numpy_serialzer(o):\n '''Helper function to serialize NumPy arrays.'''\n if isinstance(o, np.ndarray):\n return o.tolist()\n raise TypeError(\"{} of type {} is not JSON serializable\".format(repr(o), type(o)))\n\ndef jsglobal(**params):\n '''Populate JS global namespace with provided Python obejcts.'''\n code = [];\n for name, value in params.items():\n jsdata = json.dumps(value, default=json_numpy_serialzer)\n code.append(\"window.{} = {};\".format(name, jsdata))\n display(Javascript(\"\\n\".join(code)))\n\n%%javascript\n\n// Loading the compiled MathBox bundle.\nrequire.config({\n paths: {\n mathBox: '//cdn.rawgit.com/unconed/mathbox/eaeb8e15/build/mathbox-bundle'\n }\n});\n\n// Helper function that setups WebGL context and initializes MathBox.\nwindow.with_mathbox = function(element, func) {\n require(['mathBox'], function(){\n var mathbox = mathBox({\n plugins: ['core', 'controls', 'cursor', 'mathbox'],\n controls: { klass: THREE.OrbitControls },\n mathbox: {inspect: false},\n element: element[0],\n loop: {start: false},\n \n });\n var three = mathbox.three;\n three.renderer.setClearColor(new THREE.Color(0xFFFFFF), 1.0);\n three.camera.position.set(-1, 1, 2);\n three.controls.noKeys = true;\n \n three.element.style.height = \"400px\";\n three.element.style.width = \"100%\";\n \n function isInViewport(element) {\n var rect = element.getBoundingClientRect();\n var html = document.documentElement;\n var w = window.innerWidth || html.clientWidth;\n var h = window.innerHeight || html.clientHeight;\n return rect.top < h && rect.left < w && rect.bottom > 0 && rect.right > 0;\n }\n \n // Running update/render loop only for visible plots.\n var intervalId = setInterval(function(){\n if (three.element.offsetParent === null) {\n clearInterval(intervalId);\n three.destroy();\n return;\n }\n var visible = isInViewport(three.canvas);\n if (three.Loop.running != visible) {\n visible? three.Loop.start() : three.Loop.stop();\n }\n }, 100);\n\n func(mathbox);\n \n window.dispatchEvent(new Event('resize'));\n })\n}", "Simple surface plot\nThis code snippet shows an 3d surface plot of a function, defined in JS callback.", "%%javascript\nwith_mathbox(element, function(mathbox) {\n mathbox.cartesian({},{rotation:(t)=>[0, t*0.1, 0]}) // Setup rotating the coordinate frame.\n .grid({axes: [1, 3]}) // Add a grid to it.\n .area({width:50, height:50, // This defines 2D data source, sampled from JS callback \n expr: function(emit, x, y, i, j, t){\n var r = Math.sqrt(x*x+y*y);\n var z = Math.sin(r*10-t*0.5)*0.2 + 0.3;\n emit(x, z, y);\n }})\n .surface({color:'#AAA', shaded:true}) // Adding surface primitives, that draw data provided by \n .surface({color:'#55A', lineX:true, lineY:true, fill:false, zBias:1}); // the last defined datasource.\n})", "Feeding data from Python\nDrawing JS-defined functions is nice, but what if we'd like to draw some data generated by Python code?", "# Make an array of 3d points.\nnp.random.seed(123)\nt = np.linspace(0, 2*np.pi, 1000)\nx, y, z = np.sin(t*10), np.sin(t*20), np.sin(t*30+0.5)\npos = np.vstack([x, y, z]).T\npos += np.random.normal(size=pos.shape)*0.02\n\njsglobal(POS=pos) # Pass this array to JS-world as a global variable \"POS\".\n\n%%javascript\nwith_mathbox(element, function(mathbox) {\n mathbox.cartesian({},{rotation:(t)=>[0, t*0.1, 0]})\n .grid({axes: [1, 3]})\n // Now we can see the data on JS side!\n .array({data:POS, channels:3, live:false})\n .point({color:\"#55a\"})\n .line({width:1.0})\n})", "Tesseract rotation\nLet's draw something more exotic!", "%%javascript\nwith_mathbox(element, function(mathbox) {\n mathbox.three.element.style.height = '600px';\n mathbox.cartesian().grid({width: 2, opacity: 0.5, axes: [1, 3], origin: [0, -1, 0]});\n\n // Create a view that uses Stereographic 4d->3d projection, instead of 3d cartesian we used before.\n var view = mathbox.stereographic4({position:[0, 0, 0], scale:[0.5, 0.5, 0.5]});\n\n // Define Tesseract vertices and edges.\n var edges = [];\n var points = []\n for (var e=-1; e<2; e+=2)\n for (var i=-1; i<2; i+=2)\n for (var j=-1; j<2; j+=2)\n for (var k=-1; k<2; k+=2) {\n edges.push([i, j, k, e])\n edges.push([i, j, e, k])\n edges.push([i, e, j, k])\n edges.push([e, i, j, k])\n points.push([i, j, k, e])\n }\n\n view.matrix({width:edges.length/2, height:2, data:edges, live: false})\n .transpose({order:\"yx\", id:\"edges\"})\n .array({data:points, id:\"points\"})\n .clock({speed:0.25})\n // Animate rotation in 4d space.\n .transform4({}, {matrix:function(t) {\n var c = Math.cos(t), s = Math.sin(t);\n return [c, 0, 0,-s,\n 0, 1, 0, 0,\n 0, 0, 1, 0,\n s, 0, 0, c];\n }}) \n // Draw points. \n .point({size:8, points:\"#points\"})\n // Label them. \n .format({live:false, expr:(x, y, z, w)=>{\n return x+\", \"+y+\", \"+z+\", \"+w;\n }}).label({size:16, depth:0.5, outline:0.5})\n // This line linearly interpolates our edges in 4d space before doing projection,\n // which gives us nice curved edges.\n .lerp({width:32, source:\"#edges\"})\n .line({color:0x3090FF, depth:1.0, width:4});\n})", "Gray-Scott Reaction-Diffusion on Torus (GLSL)\nMathbox allows to inject custom GLSL functions into its dataflow pipelines. It also exposes Render-to-Texture functionality to make pre-CUDA style GPU computing possible with minimal amounts of boilerplate code. This sample computers reaction-diffusion simulation in an offscreen texture, and then uses this texture to displace and colorize torus surface.", "%%javascript\nwith_mathbox(element, function(mathbox) {\n\nmathbox.three.camera.position.set(-0.1, 1, 1.5);\nvar W = 512, H = 256;\nmathbox\n .rtt({width:W, height:H, type:\"float\", id:\"rtt\"}) // offscreen rendering\n // main simulation shader code\n .shader({code:`\n uniform vec2 dataSize;\n uniform vec2 spot;\n uniform vec2 fk;\n vec4 getsample(vec2 p);\n vec2 sample(vec2 p) {\n return getsample(mod(p, dataSize)).xy;\n }\n vec4 main(vec2 p) {\n if (length(spot-p)<2.0) {\n return vec4(0.0, 0.5, 0.0, 0.0);\n }\n float f = fk.x, k = fk.y;\n const vec2 dx=vec2(1.,0.0), dy=vec2(0.0,1.);\n vec2 v = sample(p);\n vec2 lap = sample(p+dx)+sample(p-dx)+sample(p+dy)+sample(p-dy)-4.0*v;\n float rate = v.x * v.y * v.y;\n vec2 dv = vec2(0.2, 0.1)*lap + vec2(-rate, rate);\n dv += vec2(f * (1.0 - v.x), -(f + k) * v.y);\n v = clamp(v+dv, 0.0, 1.0);\n return vec4(v, 0.0, 0.0);\n }`, fk:[0.034, 0.056]}, {spot:(t)=>[(t*0.02+0.75)%1*W, (t*0.12+0.5)%1*H]})\n .play({ // animate Gray-Scott reaction-diffusion parameters\n loop: true, to:4, pace:3.0,\n script:[\n {fk:[0.034, 0.056]}, \n {fk:[0.029, 0.057]},\n {fk:[0.014, 0.054]},\n {fk:[0.025, 0.060]},\n {fk:[0.034, 0.056]}]})\n .resample({indices:2}).compose() // this triggers Render-to-Texture pass\n .end() // back from offscreen rendering\n\n .cartesian({}, {rotation:(t)=>[t*0.1+1.5, 0, 0]})\n // shader to compute surface colors\n .shader({code:`\n vec4 sample(vec4 p);\n vec4 main(vec4 p) {\n float v = sample(p).y;\n return vec4(0.5+v, 0.5, 0.5, 1.0);\n }\n `}).resample()\n // shader to compute 3d positions of torus vertices\n .shader({code:`\n uniform vec4 dataSize;\n const float pi = 3.141593;\n vec4 sample(vec4 p);\n vec4 main(vec4 p) {\n float v = sample(p).y;\n vec2 pq = p.xy/(dataSize.xy-1.0)*2.0*pi;\n float r = v*0.2 + 0.3;\n float a = 0.7 + r*cos(pq.y);\n return vec4(a*cos(pq.x), a*sin(pq.x), r*sin(pq.y), 0.0);\n }`}).resample({source:'#rtt'})\n // draw the torus!\n .surface({shaded:true, closedX:true, colors:'<<', color:\"#fff\"});\n\n // this hack triggers RTT pass multiple times per frame for faster simulation\n mathbox.three.on('update', function(){\n var rtt = mathbox.select('rtt')[0].controller.rtt;\n for (var i=0; i<5; ++i)\n rtt.render()\n })\n})" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
UWSEDS/LectureNotes
Spring2018/MNIST_classification.ipynb
bsd-2-clause
[ "MNIST classification\nThis lecture demonstrates an example of a classification instance using Tensorflow.\nMNIST data\n​\nMNIST is a collection of images of handwritten numerical digits. Each image is 28x28 pixels and each has a grayscale value from 0-255.", "import numpy as np;\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n\nmnist = np.load('mnist_data.npz')\n\n\nX_train = mnist['X_train']\nX_test = mnist['X_test']\ny_train = mnist['y_train']\ny_test = mnist['y_test']\n\n", "We have 60000 28x28 images in our training set and 10000 in the test set.", "print(X_train.shape)\nprint(X_test.shape)", "The pixel value range is 0-255", "print(X_train[0])\n\nplt.imshow(X_train[0],cmap='gray')\nplt.axis('off')\nplt.show()", "Let's see what they look like:", "sz = 5;\n\nfor i in range(sz*sz):\n\n plt.subplot(sz, sz, i+1)\n plt.imshow(X_train[i],cmap='gray')\n plt.axis('off')\n\nplt.show()", "The dataset also includes a label for each digit:", "print(y_train[:sz*sz].reshape([sz,sz]))", "Pre-processing\nWe will flatten each digit into a 784-dimensional vector.", "train_data = X_train.reshape([-1,28*28])\ntest_data = X_test.reshape([-1,28*28])\n\nprint(train_data.shape)\nprint(test_data.shape)", "We will normalize the data to the [0,1] range:", "\ndef normalize_data(x):\n\tflat_x = np.reshape(x,[-1]);\n\tL = np.min(x);\n\tH = np.max(x);\n \n\treturn (x.astype(np.float32)-L)/(H-L);\n\ntrain_data = normalize_data(train_data)\ntest_data = normalize_data(test_data)\n\nprint(train_data[0])", "We also need to convert the labels into a 1-hot representation:", "def to_one_hot(labels,num):\n one_hot_labels = np.zeros((labels.shape[0],num))\n one_hot_labels[np.arange(labels.shape[0]),labels] = 1.0\n return one_hot_labels;\n\n \ntrain_labels = to_one_hot(y_train,10)\ntest_labels = to_one_hot(y_test,10)\n\nprint('Initial labels:')\nprint(y_train[:5])\nprint('1-hot representation:')\nprint(train_labels[:5])", "Building a model\nFirst let's import tensorflow:", "import tensorflow as tf", "The model will take the flattened digit as input. An input is declared as a \"placeholder\" variable meaning that the value of this tensor will be provided at run-time. For the computation of the loss function the class labels are also considered inputs to the model:", "x = tf.placeholder(tf.float32, [None, 784])\ny = tf.placeholder(tf.float32, [None, 10])", "We will feed the input into a two dense layers with a tanh() non-linearity. Each layer consists of the weight matrix W and the bias vector b. These have to be declared as variables:", "h1_sz = 64;\n\nW1 = tf.get_variable(\"W1\", [784,h1_sz])\nb1 = tf.Variable(tf.zeros([h1_sz]))\nh1 = tf.matmul(x,W1) + b1\n\nh1 = tf.tanh(h1)\n\nW2 = tf.get_variable(\"W2\", [h1_sz,10])\nb2 = tf.Variable(tf.zeros([10]))\nh2 = tf.matmul(h1,W2) + b2\n", "The activations of the layer are fed into a soft-max layer that outputs class probabilities:", "class_probs = tf.nn.softmax(h2)", "We will use the cross-entropy between the predicted and the actual labels as our loss function:", "cross_entropy_loss = tf.reduce_mean(-tf.reduce_sum(y * tf.log(tf.nn.softmax(class_probs)),axis=[1]))", "We also need to define what a training step looks like. The command below tells tensorflow to optimize the loss function using a Stochastic Gradient Descent (SGD) step with a learning rate of 0.5:", "train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy_loss)", "We need to create a session that will run on the computational graph:", "sess = tf.InteractiveSession()", "We also need to initialize the variables of the model:", "tf.global_variables_initializer().run()", "We can use the session to feed the input to the model and get the value of a specific node:", "\nbatch_x = train_data[:5]\nbatch_y = train_labels[:5]\n\nvis_probs = sess.run([class_probs], feed_dict={x:batch_x})\n\nprint(vis_probs)", "Notice how we did not provide the labels since these are not part of the slice of the computational graph for the class probabilities.\nWe can also perform an SGD step on this batch by running the session on the training step:", "for j in range(200):\n [a,vis_probs] = sess.run([train_step,class_probs], feed_dict={x:batch_x,y:batch_y})\nprint(vis_probs)", "Notice how re-running the command above shifts the output of the model towards the actual labels. Of course this instance of the model will be horribly overfit to these few digits. Let's re-initialize the model:", "tf.global_variables_initializer().run()", "Instead we will cycle over the whole training dataset a few times. We will process the dataset in mini-batches and take an SGD step for each such mini-batch.", "epochs = 5;\n\nbatch_size = 32;\n\nN = train_data.shape[0];\n\nhist_loss = [];\n\nfor epoch in range(epochs):\n\tprint(\"Epoch:\", epoch)\n\n\tfor index in range(int(N/(batch_size))):\n \n\t\tbatch_x = train_data[index*batch_size:(index+1)*batch_size];\n\t\tbatch_y = train_labels[index*batch_size:(index+1)*batch_size];\n \n\t\t[vis_loss,a] = sess.run([cross_entropy_loss,train_step], feed_dict={x:batch_x,y:batch_y})\n\t\thist_loss += [vis_loss]\n \nplt.plot(hist_loss)\nplt.show()", "Let's see what the model learns:", "for j in range(5):\n digit = test_data[j].reshape([1,784]);\n actual_label = test_labels[j];\n \n plt.imshow(digit.reshape([28,28]),cmap='gray')\n plt.show()\n print(actual_label)\n \n [vis_probs] = sess.run([class_probs], feed_dict={x:digit})\n print(vis_probs)\n input()", "To test the model more 'formally' we can compute the accuracy on the test dataset:", "correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(class_probs, 1))\n\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\ntrain_accuracy = sess.run(accuracy,feed_dict={x:train_data,y:train_labels})\ntest_accuracy = sess.run(accuracy,feed_dict={x:test_data,y:test_labels})\n\nprint(\"Accuracy on training data:\",train_accuracy)\nprint(\"Accuracy on test data:\",test_accuracy)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
fmfn/BayesianOptimization
examples/advanced-tour.ipynb
mit
[ "Advanced tour of the Bayesian Optimization package", "from bayes_opt import BayesianOptimization", "1. Suggest-Evaluate-Register Paradigm\nInternally the maximize method is simply a wrapper around the methods suggest, probe, and register. If you need more control over your optimization loops the Suggest-Evaluate-Register paradigm should give you that extra flexibility.\nFor an example of running the BayesianOptimization in a distributed fashion (where the function being optimized is evaluated concurrently in different cores/machines/servers), checkout the async_optimization.py script in the examples folder.", "# Let's start by defining our function, bounds, and instanciating an optimization object.\ndef black_box_function(x, y):\n return -x ** 2 - (y - 1) ** 2 + 1", "Notice that the evaluation of the blackbox function will NOT be carried out by the optimizer object. We are simulating a situation where this function could be being executed in a different machine, maybe it is written in another language, or it could even be the result of a chemistry experiment. Whatever the case may be, you can take charge of it and as long as you don't invoke the probe or maximize methods directly, the optimizer object will ignore the blackbox function.", "optimizer = BayesianOptimization(\n f=None,\n pbounds={'x': (-2, 2), 'y': (-3, 3)},\n verbose=2,\n random_state=1,\n)", "One extra ingredient we will need is an UtilityFunction instance. In case it is not clear why, take a look at the literature to understand better how this method works.", "from bayes_opt import UtilityFunction\n\nutility = UtilityFunction(kind=\"ucb\", kappa=2.5, xi=0.0)", "The suggest method of our optimizer can be called at any time. What you get back is a suggestion for the next parameter combination the optimizer wants to probe.\nNotice that while the optimizer hasn't observed any points, the suggestions will be random. However, they will stop being random and improve in quality the more points are observed.", "next_point_to_probe = optimizer.suggest(utility)\nprint(\"Next point to probe is:\", next_point_to_probe)", "You are now free to evaluate your function at the suggested point however/whenever you like.", "target = black_box_function(**next_point_to_probe)\nprint(\"Found the target value to be:\", target)", "Last thing left to do is to tell the optimizer what target value was observed.", "optimizer.register(\n params=next_point_to_probe,\n target=target,\n)", "1.1 The maximize loop\nAnd that's it. By repeating the steps above you recreate the internals of the maximize method. This should give you all the flexibility you need to log progress, hault execution, perform concurrent evaluations, etc.", "for _ in range(5):\n next_point = optimizer.suggest(utility)\n target = black_box_function(**next_point)\n optimizer.register(params=next_point, target=target)\n \n print(target, next_point)\nprint(optimizer.max)", "2. Dealing with discrete parameters\nThere is no principled way of dealing with discrete parameters using this package.\nOk, now that we got that out of the way, how do you do it? You're bound to be in a situation where some of your function's parameters may only take on discrete values. Unfortunately, the nature of bayesian optimization with gaussian processes doesn't allow for an easy/intuitive way of dealing with discrete parameters - but that doesn't mean it is impossible. The example below showcases a simple, yet reasonably adequate, way to dealing with discrete parameters.", "def func_with_discrete_params(x, y, d):\n # Simulate necessity of having d being discrete.\n assert type(d) == int\n \n return ((x + y + d) // (1 + d)) / (1 + (x + y) ** 2)\n\ndef function_to_be_optimized(x, y, w):\n d = int(w)\n return func_with_discrete_params(x, y, d)\n\noptimizer = BayesianOptimization(\n f=function_to_be_optimized,\n pbounds={'x': (-10, 10), 'y': (-10, 10), 'w': (0, 5)},\n verbose=2,\n random_state=1,\n)\n\noptimizer.maximize(alpha=1e-3)", "3. Tuning the underlying Gaussian Process\nThe bayesian optimization algorithm works by performing a gaussian process regression of the observed combination of parameters and their associated target values. The predicted parameter$\\rightarrow$target hyper-surface (and its uncertainty) is then used to guide the next best point to probe.\n3.1 Passing parameter to the GP\nDepending on the problem it could be beneficial to change the default parameters of the underlying GP. You can simply pass GP parameters to the maximize method directly as you can see below:", "optimizer = BayesianOptimization(\n f=black_box_function,\n pbounds={'x': (-2, 2), 'y': (-3, 3)},\n verbose=2,\n random_state=1,\n)\noptimizer.maximize(\n init_points=1,\n n_iter=5,\n # What follows are GP regressor parameters\n alpha=1e-3,\n n_restarts_optimizer=5\n)", "Another alternative, specially useful if you're calling maximize multiple times or optimizing outside the maximize loop, is to call the set_gp_params method.", "optimizer.set_gp_params(normalize_y=True)", "3.2 Tuning the alpha parameter\nWhen dealing with functions with discrete parameters,or particularly erratic target space it might be beneficial to increase the value of the alpha parameter. This parameters controls how much noise the GP can handle, so increase it whenever you think that extra flexibility is needed.\n3.3 Changing kernels\nBy default this package uses the Mattern 2.5 kernel. Depending on your use case you may find that tunning the GP kernel could be beneficial. You're on your own here since these are very specific solutions to very specific problems.\nObservers Continued\nObservers are objects that subscribe and listen to particular events fired by the BayesianOptimization object. \nWhen an event gets fired a callback function is called with the event and the BayesianOptimization instance passed as parameters. The callback can be specified at the time of subscription. If none is given it will look for an update method from the observer.", "from bayes_opt.event import DEFAULT_EVENTS, Events\n\noptimizer = BayesianOptimization(\n f=black_box_function,\n pbounds={'x': (-2, 2), 'y': (-3, 3)},\n verbose=2,\n random_state=1,\n)\n\nclass BasicObserver:\n def update(self, event, instance):\n \"\"\"Does whatever you want with the event and `BayesianOptimization` instance.\"\"\"\n print(\"Event `{}` was observed\".format(event))\n\nmy_observer = BasicObserver()\n\noptimizer.subscribe(\n event=Events.OPTIMIZATION_STEP,\n subscriber=my_observer,\n callback=None, # Will use the `update` method as callback\n)", "Alternatively you have the option to pass a completely different callback.", "def my_callback(event, instance):\n print(\"Go nuts here!\")\n\noptimizer.subscribe(\n event=Events.OPTIMIZATION_START,\n subscriber=\"Any hashable object\",\n callback=my_callback,\n)\n\noptimizer.maximize(init_points=1, n_iter=2)", "For a list of all default events you can checkout DEFAULT_EVENTS", "DEFAULT_EVENTS" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
AnyBody-Research-Group/AnyPyTools
docs/Tutorial/02_Generating_macros.ipynb
mit
[ "Creating AnyScript Macros\nAnyPyTools can create AnyScript macros automatically. Doing so simplifies the process of writing complex macros and makes it easier to do things like parameter studies, Monte Carlo simulation, etc. \nThere is a class for every macro command:", "from anypytools.macro_commands import (MacroCommand, Load, SetValue, SetValue_random, Dump, SaveDesign,\n LoadDesign, SaveValues, LoadValues, UpdateValues, OperationRun)", "A quick example\nThe following shows how the to generate a simple macro.", "macrolist = [\n Load('Knee.any', defs={'SUBJECT':'\"S02\"', 'TRIAL':'\"T04\"'}),\n OperationRun('Main.MyStudy.InverseDynamics'),\n Dump('Main.MyStudy.Output.MaxMuscleActivity'),\n] \nmacrolist ", "Each macro object will generate the macro commands with the correct syntax.\nThe macro can be launched using the start_macro() method of the AnyPyProcess object.", "from anypytools import AnyPyProcess\napp = AnyPyProcess()\napp.start_macro(macrolist);", "Overview of macro commands\nThe macro_commands module have classes for generating many of the standard AnyScipt macro commands.\n\nLoad(mainfile, defines, paths): load command\nOperationRun(var): select operation and run \nDump(var): classoperation \"Dump\"\nLoadDesign(var, filename): classoperation \"Load design\" \nSaveDesign(var, filename): classoperation \"Save design\" \nLoadValues(filename): classoperation Main \"Load Values\" \nSaveValues(filename): classoperation Main \"Save Values\"\nUpdateValues(): classoperation \"Update Values\"\nSetValue(var,value): classoperation \"Set Value\" \nMacroCommand(macro_string): Add abitrary macro string\n\nCreating many macros\nThe macro in the previous example would have been easy to write manually. However, in some cases we want to create many macros. Then it is a big advantage to generate them programmatically. \nTo generate many macros we need an extra class AnyMacro to wrap our macro list.", "from anypytools import AnyMacro\n\nmacrolist = [\n Load('Knee.any' ),\n OperationRun('Main.MyStudy.InverseDynamics'),\n] \n\nmg = AnyMacro(macrolist)\nmg", "By default AnyMacro just behaves as a container for our macro. But has additional attributes that specify how many macros we want.", "mg = AnyMacro(macrolist, number_of_macros = 5)\nmg", "This can also be overidden when calling its create_macros() function", "mg.create_macros(2)", "This list of macros can also be passed to the 'start_macro' function to executed in parallel.", "from anypytools import AnyPyProcess\n\napp = AnyPyProcess()\noutput = app.start_macro(mg.create_macros(100))", "Running many macros is only really useful if the macros are different. Some macros classes, like SetValue(), accepts lists of values which it distributes across the generated macros. \nImagine a list of 5 parameters. We want to create five macros that use these values:", "parameter_list = [2.2, 2.5, 2.7, 2.9, 3.1]\n\nmg = AnyMacro(SetValue('Main.MyParameter', parameter_list ))\nmg.create_macros(5)", "A simple parameter study\nLet us combine the previous to create a parameter study. We will continue with the simplified knee model where we left off in the previous tutorial. The parameter study will vary the patella tendon length from 2.0cm to 8.0cm, and observe the effect on maximum muscle activity.\nFirst we create a list of patella length parameters.", "patella_tendon_lengths = [\n 0.02 + i*0.01 \n for i in range(7)\n]\nprint(patella_tendon_lengths)", "This list of values is added to the macros with the SetValue class.", "macro = [\n Load('Knee.any'),\n SetValue('Main.MyModel.PatellaLigament.DriverPos', patella_tendon_lengths ),\n OperationRun('Main.MyStudy.InverseDynamics'),\n Dump('Main.MyStudy.Output.Abscissa.t'),\n Dump('Main.MyStudy.Output.MaxMuscleActivity'),\n Dump('Main.MyModel.PatellaLigament.DriverPos'),\n]\n\nparameter_study_macro = AnyMacro(macro, number_of_macros= len(patella_tendon_lengths) )", "We can now run the model and analyze the resulting maximum muscle activity by plotting the data in the output variable:", "output = app.start_macro(parameter_study_macro)\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\nfor data in output:\n max_activity = data['Main.MyStudy.Output.MaxMuscleActivity']\n time = data['Main.MyStudy.Output.Abscissa.t']\n patella_ligament_length = data['Main.MyModel.PatellaLigament.DriverPos'][0]\n plt.plot(time, max_activity, label='{:.1f} cm'.format(100* patella_ligament_length) )\n\nplt.title('Effect of changing patella tendon length') \nplt.xlabel('Time steps')\nplt.ylabel('Max muscle activity')\nplt.legend(bbox_to_anchor=(1.05, 1), loc=2);\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
julienchastang/unidata-python-workshop
notebooks/MetPy_Advanced/QG Analysis.ipynb
mit
[ "<a name=\"top\"></a>\n<div style=\"width:1000 px\">\n\n<div style=\"float:right; width:98 px; height:98px;\">\n<img src=\"https://raw.githubusercontent.com/Unidata/MetPy/master/metpy/plots/_static/unidata_150x150.png\" alt=\"Unidata Logo\" style=\"height: 98px;\">\n</div>\n\n<h1>Advanced MetPy: Quasi-Geostrophic Analysis</h1>\n\n<div style=\"clear:both\"></div>\n</div>\n\n<hr style=\"height:2px;\">\n\nOverview:\n\nTeaching: 30 minutes\nExercises: 45 minutes\n\nObjectives\n\n<a href=\"#download\">Download NARR output from TDS</a>\n<a href=\"#interpolation\">Calculate QG-Omega Forcing Terms</a>\n<a href=\"#ascent\">Create a four-panel plot of QG Forcings</a>\n\nThis is a tutorial demonstrates common analyses for Synoptic Meteorology courses with use of Unidata tools, specifically MetPy and Siphon. In this tutorial we will cover accessing, calculating, and plotting model output.\nLet's investigate The Storm of the Century, although it would easy to change which case you wanted (please feel free to do so).\nReanalysis Output: NARR 00 UTC 13 March 1993\nData from Reanalysis on pressure surfaces:\n\nGeopotential Heights\nTemperature\nu-wind component\nv-wind component\n\nCalculations:\n\nLaplacian of Temperature Advection\nDifferential Vorticity Advection\nWind Speed", "from datetime import datetime\n\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nimport numpy as np\nfrom scipy.ndimage import gaussian_filter\nfrom siphon.catalog import TDSCatalog\nfrom siphon.ncss import NCSS\nimport matplotlib.pyplot as plt\nimport metpy.calc as mpcalc\nimport metpy.constants as mpconstants\nfrom metpy.units import units\nimport xarray as xr", "<a name=\"download\"></a>\nDownloading NARR Output\nLets investigate what specific NARR output is available to work with from NCEI.\nhttps://www.ncdc.noaa.gov/data-access/model-data/model-datasets/north-american-regional-reanalysis-narr\nWe specifically want to look for data that has \"TDS\" data access, since that is short for a THREDDS server data access point. There are a total of four different GFS datasets that we could potentially use.\nChoosing our data source\nLet's go ahead and use the NARR Analysis data to investigate the past case we identified (The Storm of the Century).\nhttps://www.ncei.noaa.gov/thredds/catalog/narr-a-files/199303/19930313/catalog.html?dataset=narr-a-files/199303/19930313/narr-a_221_19930313_0000_000.grb\nAnd we will use a python package called Siphon to read this data through the NetCDFSubset (NetCDFServer) link.\nhttps://www.ncei.noaa.gov/thredds/ncss/grid/narr-a-files/199303/19930313/narr-a_221_19930313_0000_000.grb/dataset.html\nFirst we can set out date using the datetime module", "# Case Study Date\nyear = 1993\nmonth = 3\nday = 13\nhour = 0\n\ndt = datetime(year, month, day, hour)", "Next, we set up access to request subsets of data from the model. This uses the NetCDF Subset Service (NCSS) to make requests from the GRIB collection and get results in netCDF format.", "# Read NARR Data from THREDDS server\nbase_url = 'https://www.ncei.noaa.gov/thredds/catalog/narr-a-files/'\n\n# Programmatically generate the URL to the day of data we want\ncat = TDSCatalog(f'{base_url}{dt:%Y%m}/{dt:%Y%m%d}/catalog.xml')\n\n# Have Siphon find the appropriate dataset\nds = cat.datasets.filter_time_nearest(dt)\n\n# Download data using the NetCDF Subset Service\nncss = ds.subset()\nquery = ncss.query().lonlat_box(north=60, south=18, east=300, west=225)\nquery.time(dt).variables('Geopotential_height_isobaric',\n 'Temperature_isobaric',\n 'u-component_of_wind_isobaric',\n 'v-component_of_wind_isobaric').add_lonlat().accept('netcdf')\ndata = ncss.get_data(query)\n\n# Open data with xarray, and parse it with MetPy\nds = xr.open_dataset(xr.backends.NetCDF4DataStore(data)).metpy.parse_cf()\nds\n\n# Back up in case of bad internet connection.\n# Uncomment the following line to read local netCDF file of NARR data\n# ds = xr.open_dataset('../../data/NARR_19930313_0000.nc').metpy.parse_cf()", "Subset Pressure Levels\nUsing xarray gives great funtionality for selecting pieces of your dataset to use within your script/program. MetPy also includes helpers for unit- and coordinate-aware selection and getting unit arrays from xarray DataArrays.", "# This is the time we're using\nvtime = ds.Temperature_isobaric.metpy.time[0]\n\n# Grab lat/lon values from file as unit arrays\nlats = ds.lat.metpy.unit_array\nlons = ds.lon.metpy.unit_array\n\n# Calculate distance between grid points\n# will need for computations later\ndx, dy = mpcalc.lat_lon_grid_deltas(lons, lats)\n\n# Grabbing data for specific variable contained in file (as a unit array)\n# 700 hPa Geopotential Heights\nhght_700 = ds.Geopotential_height_isobaric.metpy.sel(vertical=700 * units.hPa,\n time=vtime)\n\n# Equivalent form needed if there is a dash in name of variable\n# (e.g., 'u-component_of_wind_isobaric')\n# hght_700 = ds['Geopotential_height_isobaric'].metpy.sel(vertical=700 * units.hPa, time=vtime)\n\n# 700 hPa Temperature\ntmpk_700 = ds.Temperature_isobaric.metpy.sel(vertical=700 * units.hPa,\n time=vtime)\n\n# 700 hPa u-component_of_wind\nuwnd_700 = ds['u-component_of_wind_isobaric'].metpy.sel(vertical=700 * units.hPa,\n time=vtime)\n\n# 700 hPa v-component_of_wind\nvwnd_700 = ds['v-component_of_wind_isobaric'].metpy.sel(vertical=700 * units.hPa,\n time=vtime)", "Exercise\nWrite the code to access the remaining necessary pieces of data from our file to calculate the QG Omega forcing terms valid at 700 hPa.\nData variables desired:\n* hght_500: 500-hPa Geopotential_height_isobaric\n* uwnd_500: 500-hPa u-component_of_wind_isobaric\n* vwnd_500: 500-hPa v-component_of_wind_isobaric\n* uwnd_900: 900-hPa u-component_of_wind_isobaric\n* vwnd_900: 900-hPa v-component_of_wind_isobaric", "# 500 hPa Geopotential Height\n\n\n# 500 hPa u-component_of_wind\n\n\n# 500 hPa v-component_of_wind\n\n\n# 900 hPa u-component_of_wind\n\n\n# 900 hPa v-component_of_wind\n\n", "Solution", "# %load solutions/QG_data.py", "QG Omega Forcing Terms\nHere is the QG Omega equation from Bluesetein (1992; Eq. 5.6.11) with the two primary forcing terms on the right hand side of this equation.\n$$\\left(\\nabla_p ^2 + \\frac{f^2}{\\sigma}\\frac{\\partial ^2}{\\partial p^2}\\right)\\omega = \n\\frac{f_o}{\\sigma}\\frac{\\partial}{\\partial p}\\left[\\vec{V_g} \\cdot \\nabla_p \\left(\\zeta_g + f \\right)\\right] +\n\\frac{R}{\\sigma p} \\nabla_p ^2 \\left[\\vec{V_g} \\cdot \\nabla_p T \\right]$$\nWe want to write code that will calculate the differential vorticity advection term (the first term on the r.h.s.) and the laplacian of the temperature advection. We will compute these terms so that they are valid at 700 hPa. Need to set constants for static stability, f0, and Rd.", "# Set constant values that will be needed in computations\n\n# Set default static stability value\nsigma = 2.0e-6 * units('m^2 Pa^-2 s^-2')\n\n# Set f-plane at typical synoptic f0 value\nf0 = 1e-4 * units('s^-1')\n\n# Use dry gas constant from MetPy constants\nRd = mpconstants.Rd\n\n# Smooth Heights\n# For calculation purposes we want to smooth our variables\n# a little to get to the \"synoptic values\" from higher\n# resolution datasets\n\n# Number of repetitions of smoothing function\nn_reps = 50\n\n# Apply the 9-point smoother\nhght_700s = mpcalc.smooth_n_point(hght_700, 9, n_reps)\nhght_500s = mpcalc.smooth_n_point(hght_500, 9, n_reps)\n\ntmpk_700s = mpcalc.smooth_n_point(tmpk_700, 9, n_reps)\ntmpc_700s = tmpk_700s.to('degC')\n\nuwnd_700s = mpcalc.smooth_n_point(uwnd_700, 9, n_reps)\nvwnd_700s = mpcalc.smooth_n_point(vwnd_700, 9, n_reps)\n\nuwnd_500s = mpcalc.smooth_n_point(uwnd_500, 9, n_reps)\nvwnd_500s = mpcalc.smooth_n_point(vwnd_500, 9, n_reps)\n\nuwnd_900s = mpcalc.smooth_n_point(uwnd_900, 9, n_reps)\nvwnd_900s = mpcalc.smooth_n_point(vwnd_900, 9, n_reps)", "Compute Term A - Differential Vorticity Advection\nNeed to compute:\n1. absolute vorticity at two levels (e.g., 500 and 900 hPa)\n2. absolute vorticity advection at same two levels\n3. centered finite-difference between two levels (e.g., valid at 700 hPa)\n4. apply constants to calculate value of full term", "# Absolute Vorticity Calculation\navor_900 = mpcalc.absolute_vorticity(uwnd_900s, vwnd_900s, dx, dy, lats)\navor_500 = mpcalc.absolute_vorticity(uwnd_500s, vwnd_500s, dx, dy, lats)\n\n# Advection of Absolute Vorticity\nvortadv_900 = mpcalc.advection(avor_900, (uwnd_900s, vwnd_900s), (dx, dy)).to_base_units()\nvortadv_500 = mpcalc.advection(avor_500, (uwnd_500s, vwnd_500s), (dx, dy)).to_base_units()\n\n# Differential Vorticity Advection between two levels\ndiff_avor = ((vortadv_900 - vortadv_500)/(400 * units.hPa)).to_base_units()\n\n# Calculation of final differential vorticity advection term\nterm_A = (-f0 / sigma * diff_avor).to_base_units()\nprint(term_A.units)", "Exercise\nCompute Term B - Laplacian of Temperature Advection\nNeed to compute:\n1. Temperature advection at 700 hPa (tadv_700)\n2. Laplacian of Temp Adv. at 700 hPa (lap_tadv_700)\n3. final term B with appropriate constants (term_B)\nFor information on how to calculate a Laplacian using MetPy, see the documentation on this function.", "# Temperature Advection\n\n\n# Laplacian of Temperature Advection\n\n\n# Calculation of final Laplacian of Temperature Advection term\n\n", "Solution", "# %load solutions/term_B_calc.py", "Four Panel Plot\nUpper-left Panel: 700-hPa Geopotential Heights, Temperature, and Winds\nUpper-right Panel: 500-hPa Geopotential Heights, Absolute Vorticity, and Winds\nLower-left Panel: Term B (Laplacian of Temperature Advection)\nLower-right Panel: Term A (Laplacian of differential Vorticity Advection)", "# Set some contour intervals for various parameters\n\n# CINT 500 hPa Heights\nclev_hght_500 = np.arange(0, 7000, 60)\n# CINT 700 hPa Heights\nclev_hght_700 = np.arange(0, 7000, 30)\n# CINT 700 hPa Temps\nclev_tmpc_700 = np.arange(-40, 40, 5)\n# CINT Omega terms\nclev_omega = np.arange(-20, 21, 2)\n\n# Set some projections for our data (Plate Carree)\n# and output maps (Lambert Conformal)\n\n# Data projection; NARR Data is Earth Relative\ndataproj = ccrs.PlateCarree()\n\n# Plot projection\n# The look you want for the view, LambertConformal for mid-latitude view\nplotproj = ccrs.LambertConformal(central_longitude=-100.,\n central_latitude=40.,\n standard_parallels=[30, 60])", "Start 4-panel Figure", "# Set figure size\nfig=plt.figure(1, figsize=(24.5,17.))\n\n# Format the valid time\nvtime_str = str(vtime.dt.strftime('%Y-%m-%d %H%MZ').values)\n\n# Upper-Left Panel\nax=plt.subplot(221, projection=plotproj)\nax.set_extent([-125., -73, 25., 50.],ccrs.PlateCarree())\nax.add_feature(cfeature.COASTLINE, linewidth=0.5)\nax.add_feature(cfeature.STATES, linewidth=0.5)\n\n# Contour #1\ncs = ax.contour(lons, lats, hght_700, clev_hght_700,colors='k',\n linewidths=1.5, linestyles='solid', transform=dataproj)\nplt.clabel(cs, fontsize=10, inline=1, inline_spacing=3, fmt='%i',\n rightside_up=True, use_clabeltext=True)\n\n# Contour #2\ncs2 = ax.contour(lons, lats, tmpc_700s, clev_tmpc_700, colors='grey',\n linewidths=1.0, linestyles='dotted', transform=dataproj)\nplt.clabel(cs2, fontsize=10, inline=1, inline_spacing=3, fmt='%d',\n rightside_up=True, use_clabeltext=True)\n\n# Colorfill\ncf = ax.contourf(lons, lats, tadv_700*10**4, np.arange(-10,10.1,0.5),\n cmap=plt.cm.bwr, extend='both', transform=dataproj)\nplt.colorbar(cf, orientation='horizontal', pad=0.0, aspect=50, extendrect=True)\n\n# Vector\nax.barbs(lons.m, lats.m, uwnd_700s.to('kts').m, vwnd_700s.to('kts').m,\n regrid_shape=15, transform=dataproj)\n\n# Titles\nplt.title('700-hPa Geopotential Heights (m), Temperature (C),\\n'\n 'Winds (kts), and Temp Adv. ($*10^4$ C/s)',loc='left')\nplt.title('VALID: ' + vtime_str, loc='right')\n\n\n\n# Upper-Right Panel\nax=plt.subplot(222, projection=plotproj)\nax.set_extent([-125., -73, 25., 50.],ccrs.PlateCarree())\nax.add_feature(cfeature.COASTLINE, linewidth=0.5)\nax.add_feature(cfeature.STATES, linewidth=0.5)\n\n# Contour #1\nclev500 = np.arange(0,7000,60)\ncs = ax.contour(lons, lats, hght_500, clev500, colors='k',\n linewidths=1.5, linestyles='solid', transform=dataproj)\nplt.clabel(cs, fontsize=10, inline=1, inline_spacing=3, fmt='%i',\n rightside_up=True, use_clabeltext=True)\n\n# Contour #2\ncs2 = ax.contour(lons, lats, avor_500*10**5, np.arange(-40, 50, 3),colors='grey',\n linewidths=1.0, linestyles='dotted', transform=dataproj)\nplt.clabel(cs2, fontsize=10, inline=1, inline_spacing=3, fmt='%d',\n rightside_up=True, use_clabeltext=True)\n\n# Colorfill\ncf = ax.contourf(lons, lats, vortadv_500*10**8, np.arange(-2, 2.2, 0.2),\n cmap=plt.cm.BrBG, extend='both', transform=dataproj)\nplt.colorbar(cf, orientation='horizontal', pad=0.0, aspect=50, extendrect=True)\n\n# Vector\nax.barbs(lons.m, lats.m, uwnd_500s.to('kts').m, vwnd_500s.to('kts').m,\n regrid_shape=15, transform=dataproj)\n\n# Titles\nplt.title('500-hPa Geopotential Heights (m), Winds (kt), and\\n'\n 'Absolute Vorticity Advection ($*10^{8}$ 1/s^2)',loc='left')\nplt.title('VALID: ' + vtime_str, loc='right')\n\n\n\n# Lower-Left Panel\nax=plt.subplot(223, projection=plotproj)\nax.set_extent([-125., -73, 25., 50.],ccrs.PlateCarree())\nax.add_feature(cfeature.COASTLINE, linewidth=0.5)\nax.add_feature(cfeature.STATES, linewidth=0.5)\n\n# Contour #1\ncs = ax.contour(lons, lats, hght_700s, clev_hght_700, colors='k',\n linewidths=1.5, linestyles='solid', transform=dataproj)\nplt.clabel(cs, fontsize=10, inline=1, inline_spacing=3, fmt='%i',\n rightside_up=True, use_clabeltext=True)\n\n# Contour #2\ncs2 = ax.contour(lons, lats, tmpc_700s, clev_tmpc_700, colors='grey',\n linewidths=1.0, transform=dataproj)\nplt.clabel(cs2, fontsize=10, inline=1, inline_spacing=3, fmt='%d',\n rightside_up=True, use_clabeltext=True)\n\n# Colorfill\ncf = ax.contourf(lons, lats, term_B*10**12, clev_omega,\n cmap=plt.cm.RdYlBu_r, extend='both', transform=dataproj)\nplt.colorbar(cf, orientation='horizontal', pad=0.0, aspect=50, extendrect=True)\n\n# Vector\nax.barbs(lons.m, lats.m, uwnd_700s.to('kts').m, vwnd_700s.to('kts').m,\n regrid_shape=15, transform=dataproj)\n\n# Titles\nplt.title('700-hPa Geopotential Heights (m), Winds (kt), and\\n'\n 'Term B QG Omega ($*10^{12}$ kg m$^{-3}$ s$^{-3}$)',loc='left')\nplt.title('VALID: ' + vtime_str, loc='right')\n\n\n\n# # Lower-Right Panel\nax=plt.subplot(224, projection=plotproj)\nax.set_extent([-125., -73, 25., 50.],ccrs.PlateCarree())\nax.add_feature(cfeature.COASTLINE, linewidth=0.5)\nax.add_feature(cfeature.STATES, linewidth=0.5)\n\n# Contour #1\ncs = ax.contour(lons, lats, hght_500s, clev500, colors='k',\n linewidths=1.5, linestyles='solid', transform=dataproj)\nplt.clabel(cs, fontsize=10, inline=1, inline_spacing=3, fmt='%i',\n rightside_up=True, use_clabeltext=True)\n\n# Contour #2\ncs2 = ax.contour(lons, lats, avor_500*10**5, np.arange(-40, 50, 3), colors='grey',\n linewidths=1.0, linestyles='dotted', transform=dataproj)\nplt.clabel(cs2, fontsize=10, inline=1, inline_spacing=3, fmt='%d',\n rightside_up=True, use_clabeltext=True)\n\n# Colorfill\ncf = ax.contourf(lons, lats, term_A*10**12, clev_omega,\n cmap=plt.cm.RdYlBu_r, extend='both', transform=dataproj)\nplt.colorbar(cf, orientation='horizontal', pad=0.0, aspect=50, extendrect=True)\n\n# Vector\nax.barbs(lons.m, lats.m, uwnd_500s.to('kt').m, vwnd_500s.to('kt').m,\n regrid_shape=15, transform=dataproj)\n\n# Titles\nplt.title('500-hPa Geopotential Heights (m), Winds (kt), and\\n'\n 'Term A QG Omega ($*10^{12}$ kg m$^{-3}$ s$^{-3}$)',loc='left')\nplt.title('VALID: ' + vtime_str, loc='right')\n\nplt.show()", "Exercise\nPlot the combined QG Omega forcing terms (term_A + term_B) in a single panel\nBONUS: Compute a difference map of Term A and Term B and plot\nSolution", "# %load solutions/qg_omega_total_fig.py\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
gfeiden/Notebook
Daily/20150902_phoenix_bol_corrs.ipynb
mit
[ "Phoenix BT-Settl Bolometric Corrections\nFiguring out the best method of handling Phoenix bolometric correction files.", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.interpolate as scint", "Change to directory containing bolometric correction files.", "cd /Users/grefe950/Projects/starspot/starspot/color/tab/phx/", "Load a bolometric correction table, say for the Cousins AB photometric system.", "bc_table = np.genfromtxt('colmag.BT-Settl.server.JOHNSON.AB.bolcor', comments='!')", "Now, the structure of the file is quite irregular. The grid is not rectangular, which is not an immediate problem. The table is strucutred such that column 0 contains Teff in increasing order, followed by logg in column 1 in increasing order. However, metallicities in column 2 appear to be in decreasing order, which may be a problem for simple interpolation routines. Alpha abundances follow and are in increasing order, but since this is a \"standard\" grid, whereby alpha enrichment is a function of metallicity, we can ignore it for the moment.\nLet's take a first swing at the problem by using the LinearND Interpolator from SciPy.", "test_surface = scint.LinearNDInterpolator(bc_table[:, :3], bc_table[:, 4:])", "The surface compiled, but that is not a guarantee that the interpolation will work successfully. Some tests are required to confirm this is the case. Let's try a few Teffs at logg = 5 with solar metallicity.", "test_surface(np.array([1500., 5.0, 0.0]))", "This agrees with data in the bolometric correciton table.\nTeff logg [Fe/H] [a/Fe] B V R I\n1500.00 5.00 0.00 0.00 -15.557 -16.084 -11.560 -9.291\nNow, let's raise the temperature.", "test_surface(np.array([3000., 5.0, 0.0]))", "Again, we have a good match to tabulated values,\nTeff logg [Fe/H] [a/Fe] B V R I\n3000.00 5.00 0.00 0.00 -6.603 -5.641 -4.566 -3.273\nHowever, since we are using a tabulated metallicity, the interpolation may proceed without too much trouble. If we select a metallicity between grid points, how do we fare?", "test_surface(np.array([3000., 5.0, 0.1]))", "This appears consistent. What about progressing to lower metallicity values?", "test_surface(np.array([3000., 5.0, -0.2]))", "For reference, at [Fe/H] = $-0.5$ dex, we have\nTeff logg [Fe/H] [a/Fe] B V R I\n3000.00 5.00 -0.50 0.20 -6.533 -5.496 -4.424 -3.154\nThe interpolation routine has seemingly handled the non-monotonic nature of the metallicity column, as all interpolate values lie between values at the two respective nodes.\n\nNow let's import an isochrone and calcuate colors for stellar models for comparison against MARCS bolometric corrections.", "iso = np.genfromtxt('/Users/grefe950/evolve/dmestar/iso/dmestar_00120.0myr_z+0.00_a+0.00_marcs.iso')", "Make sure there are magnitudes and colors associated with this isochrone.", "iso.shape", "A standard isochrone would only have 6 columns, so 11 indicates this isochrone does have photometric magnitudes computed, likely BV(Ic) (JK)2MASS.", "test_bcs = test_surface(10**iso[:,1], iso[:, 2], 0.0)\n\ntest_bcs.shape", "For each Teff and logg combination we now have BCs for BV(RI)c from BT-Settl models. Now we need to convert the bolometric corrections to absolute magnitudes.", "bol_mags = 4.74 - 2.5*iso[:, 3]\nfor i in range(test_bcs.shape[1]):\n bcs = -1.0*np.log10(10**iso[:, 1]/5777.) + test_bcs[:, i] - 5.0*iso[:, 4]\n if i == 0:\n test_mags = bol_mags - bcs\n else:\n test_mags = np.column_stack((test_mags, bol_mags - bcs))\niso[50, 0:4], iso[50, 6:], test_mags[50]", "Let's try something different: using the color tables provided by the Phoenix group, from which the bolometric corrections are calculated.", "col_table = np.genfromtxt('colmag.BT-Settl.server.COUSINS.AB', comments='!')", "Create an interpolation surface from the magnitude table.", "col_surface = scint.LinearNDInterpolator(col_table[:, :3], col_table[:, 4:8])", "Compute magnitudes for a Dartmouth isochrone.", "phx_mags = col_surface(10.0**iso[:, 1], iso[:, 2], 0.0)", "Convert surface magnitudes to absolute magnitudes using the distance modulus and the radius of the star.", "for i in range(phx_mags.shape[1]):\n phx_mags[:, i] = phx_mags[:, i] - 5.0*np.log10(10**iso[:, 4]*6.956e10/3.086e18) + 5.0", "Now compare against MARCS values.", "iso[40, :5], iso[40, 6:], phx_mags[40]", "Load an isochrone from the Lyon-Phoenix series.", "phx_iso = np.genfromtxt('/Users/grefe950/Notebook/Projects/ngc2516_spots/data/phx_isochrone_120myr.txt')\n\nfig, ax = plt.subplots(1, 2, figsize=(12., 8.), sharey=True)\n\nax[0].set_xlim(0.0, 2.0)\nax[1].set_xlim(0.0, 4.0)\nax[0].set_ylim(16, 2)\n\nax[0].plot(iso[:, 6] - iso[:, 7], iso[:, 7], lw=3, c=\"#b22222\")\nax[0].plot(phx_mags[:, 0] - phx_mags[:, 1], phx_mags[:, 1], lw=3, c=\"#1e90ff\")\nax[0].plot(phx_iso[:, 7] - phx_iso[:, 8], phx_iso[:, 8], dashes=(20., 5.), lw=3, c=\"#555555\")\n\nax[1].plot(iso[:, 7] - iso[:, 8], iso[:, 7], lw=3, c=\"#b22222\")\nax[1].plot(phx_mags[:, 1] - phx_mags[:, 3], phx_mags[:, 1], lw=3, c=\"#1e90ff\")\nax[1].plot(phx_iso[:, 8] - phx_iso[:, 10], phx_iso[:, 8], dashes=(20., 5.), lw=3, c=\"#555555\")", "Export a new isochrone with colors from AGSS09 (PHX)", "new_isochrone = np.column_stack((iso[:, :6], phx_mags))\nnp.savetxt('/Users/grefe950/Notebook/Projects/pleiades_colors/data/dmestar_00120.0myr_z+0.00_a+0.00_mixed.iso', \n new_isochrone, fmt='%16.8f')", "Separate Test Case\nThese are clearly not correct and are between 1 and 2 magnitudes off from expected values. Need to reproduce the Phoenix group's results, first.", "tmp = -10.*np.log10(3681./5777.) + test_surface(3681., 4.78, 0.0) #+ 5.0*np.log10(0.477)\ntmp \n\n4.74 - 2.5*(-1.44) - tmp" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
mne-tools/mne-tools.github.io
0.23/_downloads/ed3aac2f41b33de8577252a462d2798c/mne_inverse_envelope_correlation_volume.ipynb
bsd-3-clause
[ "%matplotlib inline", "Compute envelope correlations in volume source space\nCompute envelope correlations of orthogonalized activity\n:footcite:HippEtAl2012,KhanEtAl2018 in source space using resting state\nCTF data in a volume source space.", "# Authors: Eric Larson <larson.eric.d@gmail.com>\n# Sheraz Khan <sheraz@khansheraz.com>\n# Denis Engemann <denis.engemann@gmail.com>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\n\nimport mne\nfrom mne.beamformer import make_lcmv, apply_lcmv_epochs\nfrom mne.connectivity import envelope_correlation\nfrom mne.preprocessing import compute_proj_ecg, compute_proj_eog\n\ndata_path = mne.datasets.brainstorm.bst_resting.data_path()\nsubjects_dir = op.join(data_path, 'subjects')\nsubject = 'bst_resting'\ntrans = op.join(data_path, 'MEG', 'bst_resting', 'bst_resting-trans.fif')\nbem = op.join(subjects_dir, subject, 'bem', subject + '-5120-bem-sol.fif')\nraw_fname = op.join(data_path, 'MEG', 'bst_resting',\n 'subj002_spontaneous_20111102_01_AUX.ds')\ncrop_to = 60.", "Here we do some things in the name of speed, such as crop (which will\nhurt SNR) and downsample. Then we compute SSP projectors and apply them.", "raw = mne.io.read_raw_ctf(raw_fname, verbose='error')\nraw.crop(0, crop_to).pick_types(meg=True, eeg=False).load_data().resample(80)\nraw.apply_gradient_compensation(3)\nprojs_ecg, _ = compute_proj_ecg(raw, n_grad=1, n_mag=2)\nprojs_eog, _ = compute_proj_eog(raw, n_grad=1, n_mag=2, ch_name='MLT31-4407')\nraw.info['projs'] += projs_ecg\nraw.info['projs'] += projs_eog\nraw.apply_proj()\ncov = mne.compute_raw_covariance(raw) # compute before band-pass of interest", "Now we band-pass filter our data and create epochs.", "raw.filter(14, 30)\nevents = mne.make_fixed_length_events(raw, duration=5.)\nepochs = mne.Epochs(raw, events=events, tmin=0, tmax=5.,\n baseline=None, reject=dict(mag=8e-13), preload=True)\ndel raw", "Compute the forward and inverse", "# This source space is really far too coarse, but we do this for speed\n# considerations here\npos = 15. # 1.5 cm is very broad, done here for speed!\nsrc = mne.setup_volume_source_space('bst_resting', pos, bem=bem,\n subjects_dir=subjects_dir, verbose=True)\nfwd = mne.make_forward_solution(epochs.info, trans, src, bem)\ndata_cov = mne.compute_covariance(epochs)\nfilters = make_lcmv(epochs.info, fwd, data_cov, 0.05, cov,\n pick_ori='max-power', weight_norm='nai')\ndel fwd", "Compute label time series and do envelope correlation", "epochs.apply_hilbert() # faster to do in sensor space\nstcs = apply_lcmv_epochs(epochs, filters, return_generator=True)\ncorr = envelope_correlation(stcs, verbose=True)", "Compute the degree and plot it", "degree = mne.connectivity.degree(corr, 0.15)\nstc = mne.VolSourceEstimate(degree, [src[0]['vertno']], 0, 1, 'bst_resting')\nbrain = stc.plot(\n src, clim=dict(kind='percent', lims=[75, 85, 95]), colormap='gnuplot',\n subjects_dir=subjects_dir, mode='glass_brain')", "References\n.. footbibliography::" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
whitead/numerical_stats
unit_8/hw_2019/homework_8_key.ipynb
gpl-3.0
[ "Homework 8 Key\nCHE 116: Numerical Methods and Statistics\n2/21/2019\n\n1. Short Answer (12 Points)\n\n\n[2 points] If you sum together 20 numbers sampled from a binomial distribution and 10 from a Poisson distribution, how is your sum distribted?\n\n\n[2 points] If you sample 25 numbers from different beta distributions, how will each of the numbers be distributed?\n\n\n[4 points] Assume a HW grade is determined as the sample mean of 3 HW problems. How is the HW grade distributed if we do not know the population standard deviation? Why?\n\n\n[4 points] For part 3, how could not knowing the population standard deviation change how it's distributed? How does knowledge of that number change the behavior of a random variable?\n\n\n1.1\nNormal\n1.2\nWe are not summing, no NLT. Beta distributed\n1.3\nt-distribution, since we do not know population standard deviation and N < 25\n1.4\nWe have to estimate the standard error using sample standard deviation, which itself is a random variable. If we have the exact number, then we no longer have two sources of randomness.\n2. Confidence Intervals (30 Points)\nReport the given confidence interval for error in the mean using the data given for each problem and describe in words what the confidence interval is for each example. 6 points each\n2.1\n80% Double. \ndata_21 = [65.58, -28.15, 21.17, -0.57, 6.04, -10.21, 36.46, 10.67, 77.98, 15.97]\n2.2\n99% Upper (lower bound, a value such that the mean lies above that value 99% of the time)\ndata_22 = [-8.78, -6.06, -6.03, -6.9, -13.57, -18.76, 1.5, -8.21, -3.21, -11.85, -2.72, -10.38, -11.03, -10.85, -7.6, -7.76, -5.99, -10.02, -6.32, -8.35, -19.28, -11.53, -6.04, -0.81, -12.01, -3.22, -9.25, -4.13, -7.22, -11.0, -14.42, 1.07]\n2.3\n95% Double\ndata_23 = [14.62, 10.34, 7.68, 15.81, 14.48]\n2.4\nRedo part 3 with a known standard deviation of 2\n2.5\n95% Lower (upper bound)\ndata_25 = [2.47, 2.03, 1.82, 6.98, 2.41, 2.32, 7.11, 5.89, 5.77, 3.34, 2.75, 6.51]\n2.1\nThe 80% confidence interval is $19 \\pm 14$", "import scipy.stats as ss\ndata_21 = [65.58, -28.15, 21.17, -0.57, 6.04, -10.21, 36.46, 10.67, 77.98, 15.97]\nse = np.std(data_21, ddof=1) / np.sqrt(len(data_21))\nT = ss.t.ppf(0.9, df=len(data_21) - 1)\nprint(np.mean(data_21), T * se)", "2.2\nThe 99% confidence interval is $\\mu > -10.1$", "data_22 = [-8.78, -6.06, -6.03, -6.9, -13.57, -18.76, 1.5, -8.21, -3.21, -11.85, -2.72, -10.38, -11.03, -10.85, -7.6, -7.76, -5.99, -10.02, -6.32, -8.35, -19.28, -11.53, -6.04, -0.81, -12.01, -3.22, -9.25, -4.13, -7.22, -11.0, -14.42, 1.07]\nse = np.std(data_22, ddof=1) / np.sqrt(len(data_22))\nZ = ss.norm.ppf(1 - 0.99)\nprint(Z * se + np.mean(data_22))", "2.3\nThe 85% confidence interval is $12.5 \\pm 4.3$", "data_23 = [14.62, 10.34, 7.68, 15.81, 14.48]\nse = np.std(data_23, ddof=1) / np.sqrt(len(data_23))\nT = ss.t.ppf(0.975, df=len(data_23) - 1)\nprint(np.mean(data_23), T * se)", "2.4\nThe 95% confidence interval is $12.5 \\pm 2.5$", "data_23 = [14.62, 10.34, 7.68, 15.81, 14.48]\nse = 2 / np.sqrt(len(data_23))\nZ = ss.norm.ppf(0.975)\nprint(np.mean(data_23), T * se)", "2.5\nThe 95% upper bound is $\\mu < 5.2$", "data_25 = [2.47, 2.03, 1.82, 6.98, 2.41, 2.32, 7.11, 5.89, 5.77, 3.34, 2.75, 6.51]\nse = np.std(data_25, ddof=1) / np.sqrt(len(data_25))\nT = ss.t.ppf(0.95, df=len(data_25) - 1)\nprint(np.mean(data_25) + T * se)", "3. Identifiying Distributions (12 Points)\nFor each problem state if it is a t or normal distribution and reports the distribution's $\\mu$ and $\\sigma$. Note that $\\mu, \\sigma$s listed below are the population sigmas. Report your answer like: $T(0, 4.3, 4)$ to indicate a $t$-distribution with $\\mu = 0$, $\\sigma = 4.3$ and degrees of freedom of 3. 2 Points each\n\n$P(\\mu)$, $\\bar{x} = -2$, $\\sigma = 4$, $N = 30$\n$P(\\bar{x})$, $\\mu = 1$, $\\sigma = 2$, $N = 5$\n$P(\\mu - \\bar{x})$, $\\sigma = 4.3$, $N = 2$\n$P(\\mu)$, $\\bar{x} = 4$, $\\sigma_x = 1.7$, $N = 50$\n$P(\\mu)$, $\\bar{x} = 5.5$, $\\sigma_x = 2.1$, $N = 9$\n$P(\\mu - \\bar{x})$, $\\sigma_x = 4.3$, $N = 5$\n\n3.1\n$Z(-2, 4 / \\sqrt{30})$\n3.2\n$Z(1, 2 / \\sqrt{5})$\n3.3\n$Z(0, 4.3 / \\sqrt{2})$\n3.4\n$Z(4, 1.7 / \\sqrt{50})$\n3.5\n$T(5.5, 2.1 / \\sqrt{9}, 8)$\n3.5\n$T(0, 4.3 / \\sqrt{5}, 4)$" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
georgetown-analytics/classroom-occupancy
models/kNN_model_KM.ipynb
mit
[ "KNN Model\nDataset Information\nNo. of Features: 12 \nNo. of Instances: 4492\nTable of Contents<a name='table of contents'></a>\n\nData Ingestion\nFeatures & Target Arrays\nHyperparameter Tuning\n a. Model Complexity Curve\n b. GridSearchCV\nClassification Report\nConfusion Matrix\nClass Balance\nSave Model", "%matplotlib inline\n\nimport os\nimport json\nimport time\nimport pickle\nimport requests\nimport numpy as np\nimport pandas as pd\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport yellowbrick as yb\nsns.set_palette('RdBu', 10)", "Data Ingestion<a name='data ingestion'></a>", "URL = 'https://raw.githubusercontent.com/georgetown-analytics/classroom-occupancy/master/models/sensor_data_ml.csv'\n\ndef fetch_data(fname='sensor_data_ml.csv'):\n response = requests.get(URL)\n outpath = os.path.abspath(fname)\n with open(outpath, 'wb') as f:\n f.write(response.content)\n \n return outpath\n\n# Defining fetching data from the URL\nDATA = fetch_data()\n\n# Import sensor data\ndf = pd.read_csv('sensor_data_ml.csv', index_col='datetime', parse_dates=True)\n\n# Rename columns\ndf.columns = ['temp', 'humidity', 'co2', 'light', 'light_st', 'noise',\n 'bluetooth', 'images', 'door', 'occupancy_count', 'occupancy_level']\n\ndf.info()\ndf.describe()", "Features & Target Arrays<a name='features and target arrays'></a>", "# Breakdown of classroom occupancy levels\ndf.occupancy_level.value_counts()\n\n# Encode multiclass target variable\nfrom sklearn.preprocessing import LabelEncoder\n\nencoder = LabelEncoder()\nencoder.fit_transform(df['occupancy_level'])\n\n# Create feature and target arrays\nX = df.drop('occupancy_level', axis=1).values\ny = df['occupancy_level']\n\n# Use TimeSeriesSplit to create training and test set split indices\nfrom sklearn.model_selection import TimeSeriesSplit\n\ntscv = TimeSeriesSplit(n_splits=12)\n\nfor train_index, test_index in tscv.split(X):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]", "kNN Classifier<a name='knn'></a>\nCross-Validation Score<a name='first cv scores'></a>", "# Initial cross-validation scores\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import cross_val_score\n\n# Create a KNeighborsClassifier object: knn\nknn = KNeighborsClassifier().fit(X_train, y_train)\n\n# Print the 12-fold cross-validation scores\ncv_scores = cross_val_score(knn, X_train, y_train, cv=tscv)\nprint(cv_scores)\nprint('Average 12-Fold CV Score: {:.4f}'.format(np.mean(cv_scores)))\n\n# Initial classification report\nfrom sklearn.metrics import classification_report\n\ntscv = TimeSeriesSplit()\n\nfor train_index, test_index in tscv.split(X):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n# Predict the labels of the test set: y_pred\ny_pred = knn.predict(X_test)\n\n# Compute and print the classification report and training and test scores\nprint('kNN Classification Report: \\n{}'.format(classification_report(y_test, y_pred)))\nprint('Training set score: {:.4f}'.format(knn.score(X_train, y_train)))\nprint('Test set score: {:.4f}'.format(knn.score(X_test, y_test)))", "Hyperparameter Tuning<a name='hyperparameter tuning'></a>\nkNN Model Complexity Curve<a name='model complexity curve'></a>", "from sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import RobustScaler\n\nneighbors = np.arange(1, 15)\ntrain_accuracy = np.empty(len(neighbors))\ntest_accuracy = np.empty(len(neighbors))\n\n# Loop over different values of k\nfor i, k in enumerate(neighbors):\n # Setup a k-NN Classifier with k neighbors: knn\n pipeline = Pipeline([('scaler', RobustScaler()),\n ('knn', KNeighborsClassifier(n_neighbors=k))])\n \n # Fit the classifier to the training data\n pipeline.fit(X_train, y_train)\n \n #Compute accuracy on the training set\n train_accuracy[i] = pipeline.score(X_train, y_train)\n \n #Compute accuracy on the testing set\n test_accuracy[i] = pipeline.score(X_test, y_test)\n\n# Plot the results\nplt.plot(neighbors, test_accuracy, label='Testing Accuracy')\nplt.plot(neighbors, train_accuracy, label='Training Accuracy')\nplt.title('k-NN: Varying Number of Neighbors')\nplt.xlabel('Number of k-NN Neighbors')\nplt.ylabel('Test Accuracy')\nplt.legend(loc='best')\nplt.savefig('ml_graphs/knn_model_complexity_curve.png')", "GridSearchCV<a name='gridsearchcv'></a>", "from sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import RobustScaler\n\npipeline = make_pipeline(RobustScaler(), KNeighborsClassifier())\n\nparam_grid = {'kneighborsclassifier__n_neighbors': np.arange(1, 15)}\n\nknn = GridSearchCV(pipeline, param_grid=param_grid, cv=tscv)\n\nknn.fit(X_train, y_train)\n\nprint('kNN Best estimator:\\n{}'.format(knn.best_estimator_))\n\n# Print the tuned parameters and score by accessing the best_params_ and best_score_ attributes of grid\nprint('kNN Model (Tuned)')\nprint('Best Score: {:.4f}'.format(knn.best_score_))\nprint('Best Parameters: {}'.format(knn.best_params_))", "kNN Classification Report<a name='classification report'></a>", "# Predict the labels of the test set: y_pred\ny_pred = knn.predict(X_test)\n\nprint('kNN Classification Report: \\n{}'.format(classification_report(y_test, y_pred)))\nprint('Training set score: {:.4f}'.format(knn.score(X_train, y_train)))\nprint('Test set score: {:.4f}'.format(knn.score(X_test, y_test)))\n\n# Compare f1 scores based on different averaging strategies\nfrom sklearn.metrics import f1_score\n\nprint('F1 Score - micro: {:.4f}'.format(f1_score(y_test, y_pred, average='micro')))\nprint('F1 Score - weighted: {:.4f}'.format(f1_score(y_test, y_pred, average='weighted')))\nprint('F1 Score - macro: {:.4f}'.format(f1_score(y_test, y_pred, average='macro')))\n\nfrom sklearn.metrics import precision_score, recall_score\n\nprint('Micro')\nprint('F1 Score: {:.4f}'.format(f1_score(y_test, y_pred, average='micro')))\nprint('Precision Score: {:.4f}'.format(precision_score(y_test, y_pred, average='micro')))\nprint('Recall Score: {:.4f}'.format(recall_score(y_test, y_pred, average='micro')))\n\nfrom yellowbrick.classifier import ClassificationReport\nclasses = ['Empty', 'High', 'Low', 'Mid-Level']\n\nvisualizer = ClassificationReport(knn, classes=classes)\n\nfig = plt.figure()\nvisualizer.fit(X_train, y_train)\nvisualizer.score(X_test, y_test)\ng = visualizer.poof()\n#plt.savefig('ml_graphs/knn_classification_report.png')", "Confusion Matrix<a name='confusion matrix'></a>", "from sklearn.metrics import confusion_matrix\n\nprint('kNN Confusion Matrix')\nprint(confusion_matrix(y_test, y_pred))", "Class Balance<a name='class balance'></a>", "from yellowbrick.classifier import ClassBalance\nclasses = ['Empty', 'High', 'Low', 'Mid-Level']\n\nvisualizer = ClassBalance(knn, classes=classes)\n\nfig = plt.figure()\nvisualizer.fit(X_train, y_train)\nvisualizer.score(X_test, y_test)\ng = visualizer.poof()\n#plt.savefig('ml_graphs/knn_class_balance.png')", "Save Model<a name='pickle'></a>", "import pickle\n\nknn_model = 'knn_model.sav'\n\n# Save fitted model to disk\npickle.dump(knn, open(knn_model, 'wb'))", "Return to Table of Contents", "loaded_model = pickle.load(open(knn_model, 'rb'))\n\nresult = loaded_model.score(X_test, y_test)\nprint(result)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
flohorovicic/pynoddy
docs/notebooks/.ipynb_checkpoints/Feature-Analysis-checkpoint.ipynb
gpl-2.0
[ "Analysis of classification results\nObjective: read back in the classification results and compare to original model", "from IPython.core.display import HTML\ncss_file = 'pynoddy.css'\nHTML(open(css_file, \"r\").read())\n\nimport sys, os\nimport matplotlib.pyplot as plt\n# adjust some settings for matplotlib\nfrom matplotlib import rcParams\n# print rcParams\nrcParams['font.size'] = 15\n# determine path of repository to set paths corretly below\nrepo_path = os.path.realpath('../..')\nimport pynoddy.history\nimport numpy as np\n\n%matplotlib inline", "Load original model:", "import pynoddy.output\nreload(pynoddy.output)\noutput_name = \"feature_out\"\nnout = pynoddy.output.NoddyOutput(output_name)\n\nnout.plot_section('x', \n colorbar = True, title=\"\",\n savefig = False, fig_filename = \"ex01_faults_combined.eps\",\n cmap = 'YlOrRd') # note: YlOrRd colourmap should be suitable for colorblindness!", "Load sample classification results\nThe implemented classification method does not return a single best-fit model, but an ensemble of probable model (as it is an MCMC sampling from the posterior). As a first test, we will therefore import single models first and check the misclassification rate defined as:\n$$\\mbox{MCR} = \\frac{\\mbox{Number of misclassified voxels}}{\\mbox{Total number of voxels}}$$", "f_set1 = open(\"../../sandbox/jack/features_lowres-5 with class ID.csv\").readlines()\n\nf_set1[0]\n\n# initialise classification results array\ncf1 = np.empty_like(nout.block)\n\n# iterate through results and append\nfor f in f_set1[1:]:\n fl = f.rstrip().split(\",\")\n cf1[int(fl[0]),int(fl[1]),int(fl[2])] = int(fl[6])\n\nf_set1[2:6]\n\nnout.plot_section('x', data = cf1,\n colorbar = True, title=\"\", layer_labels = range(5),\n savefig = False, fig_filename = \"ex01_faults_combined.eps\",\n cmap = 'YlOrRd') \n\n# compare to original model:\nfig = plt.figure(figsize = (12,6))\nax1 = fig.add_subplot(121)\nax2 = fig.add_subplot(122)\nnout.plot_section('x', ax = ax1, \n colorbar = False, title=\"\",\n savefig = False, fig_filename = \"ex01_faults_combined.eps\",\n cmap = 'YlOrRd') # note: YlOrRd colourmap should be suitable for colorblindness!\nnout.plot_section('x', data = cf1,ax = ax2,\n colorbar = False, title=\"\",\n savefig = False, fig_filename = \"ex01_faults_combined.eps\",\n cmap = 'YlOrRd') ", "Results of the classification do not necessarily contain the same ids as the units in the initial model. This seems to be the case here, as well. Re-sort:", "fig = plt.figure(figsize = (12,6))\nax1 = fig.add_subplot(121)\nax2 = fig.add_subplot(122)\nim1 = ax1.imshow(nout.block[15,:,:].transpose(), \n interpolation = 'none', cmap = 'YlOrRd', origin = 'lower left')\nplt.colorbar(im1)\nim2 = ax2.imshow(cf1[15,:,:].transpose(), \n interpolation = 'none', \n cmap = 'YlOrRd', origin = 'lower left')\n\nprint np.unique(nout.block)\nprint np.unique(cf1)\n# define id mapping from cluster results to original:\n# id_mapping = {2:1, 3:2, 4:5, 5:3, 1:4}\n# remapping for result 4:\n# id_mapping = {4:5, 3:4, 1:3, 5:2, 2:1}\n# remapping for result 5:\nid_mapping = {2:5, 1:4, 3:3, 5:2, 4:1}", "Now remap results and compare again:\nNote: create a vectorised function to enable a direct re-mapping of the entire array while keeping the structure!", "def re_map(id_val):\n return id_mapping[id_val]\n\nre_map_vect = np.vectorize(re_map)\n\ncf1_remap = re_map_vect(cf1)\n\n# compare to original model:\nfig = plt.figure(figsize = (12,6))\nax1 = fig.add_subplot(121)\nax2 = fig.add_subplot(122)\nnout.plot_section('x', ax = ax1, \n colorbar = False, title=\"\",\n savefig = False, fig_filename = \"ex01_faults_combined.eps\",\n cmap = 'YlOrRd') # note: YlOrRd colourmap should be suitable for colorblindness!\nnout.plot_section('x', data = cf1_remap, ax = ax2,\n colorbar = False, title=\"\",\n savefig = False, fig_filename = \"ex01_faults_combined.eps\",\n cmap = 'YlOrRd') \n\nfeature_diff = (nout.block != cf1_remap)\n\nnout.plot_section('x', data = feature_diff,\n colorbar = False, title=\"Difference between real and matched model\",\n cmap = 'YlOrRd') \n\n# Calculate the misclassification:\nnp.sum(feature_diff) / float(nout.n_total)\n\n# Export misclassification to VTK:\nmisclass = feature_diff.astype('int')\n\nnout.export_to_vtk(vtk_filename = \"misclass\", data=misclass)", "Combined analysis in a single function\nNote: function assumes correct EOL character in data file (check/ adjust with vi: %s/\\r/\\r/g)\nProblem: remapping is unfortunatley not identical!", "def calc_misclassification(nout, filename):\n \"\"\"Calculate misclassification for classification results data stored in file\n \n **Arguments**:\n - *nout* = NoddyOutput: original model (Noddy object)\n - *filename* = filename (with path): file with classification results\n \"\"\"\n f_set1 = open(filename).readlines()\n # initialise classification results array\n cf1 = np.empty_like(nout.block)\n # iterate through results and append\n for f in f_set1[1:]:\n fl = f.rstrip().split(\",\")\n cf1[int(fl[0]),int(fl[1]),int(fl[2])] = int(fl[6])\n # remap ids\n cf1_remap = re_map_vect(cf1)\n # determine differences in class ids:\n feature_diff = (nout.block != cf1_remap)\n # Calculate the misclassification:\n misclass = np.sum(feature_diff) / float(nout.n_total)\n return misclass\n\nfilename = r\"../../sandbox/jack/features_lowres-4 with class ID.csv\"\ncalc_misclassification(nout, filename)", "Determine validity of uncertainty estimate\nIn addition to single model realisations, an esitmate of model uncertainty is calculated (this is, actually, also one of the main \"selling points\" of the paper). So, we will now check if the correct model is actually in the range of the estimated model uncertainty bounds (i.e.: if all voxets values from the original model actually have a non-zero probability in the estimated model)!\nFirst step: load estimated class probabilities:", "# f_set1 = open(\"../../sandbox/jack/features_lowres-6 with class ID and Prob.csv\").readlines()\nf_set1 = open(\"../../sandbox/jack/features_lowres-8 with Prob (weak Beta).csv\").readlines()\n\nf_set1[0]\n\n# initialise classification results array\ncf1 = np.empty_like(nout.block)\n\n# Initialise probability array\nprobs = np.empty((5, cf1.shape[0], cf1.shape[1], cf1.shape[2]))\n\n# iterate through results and append\nfor f in f_set1[1:]:\n fl = f.rstrip().split(\",\")\n i,j,k = int(fl[0]),int(fl[1]),int(fl[2])\n # cf1[i,j,k] = int(fl[6])\n for i2 in range(5):\n probs[i2,i,j,k] = float(fl[i2+6])", "We now need to perform the remapping similar to before, but now for the probability fields:", "fig = plt.figure(figsize = (12,6))\nax1 = fig.add_subplot(121)\nax2 = fig.add_subplot(122)\nim1 = ax1.imshow(nout.block[15,:,:].transpose(), \n interpolation = 'none', cmap = 'YlOrRd', origin = 'lower left')\nplt.colorbar(im2)\nim2 = ax2.imshow(probs[4,15,:,:].transpose(), \n interpolation = 'none', \n cmap = 'YlOrRd', origin = 'lower left')\n\n# Note: map now ids from original model to probability fields in results:\nprob_mapping = {4:0, 5:1, 3:2, 1:3, 2:4}\n\n# Check membership for each class in original model\nfor i in range(1,6):\n tmp = np.ones_like(nout.block) * (nout.block==i)\n # test if voxels have non-zero probability by checking conjunction with zero-prob voxels\n prob_zero = probs[prob_mapping[i],:,:,:] == 0\n misidentified = np.sum(tmp * prob_zero)\n print i, misidentified\n \n\nprob_zero = probs[prob_mapping[1],:,:,:] == 0", "Determination of misclassification statistics\nNext step: use multiple results from one chain to determine misclassification statistics.", "f_set1 = open(\"../../sandbox/jack/features_lowres-7 with 151 realizations.csv\").readlines()\n\n# Initialise results array\nall_results = np.empty((152, cf1.shape[0], cf1.shape[1], cf1.shape[2]))\n\n# iterate through results and append\nfor f in f_set1[1:]:\n fl = f.rstrip().split(\",\")\n i,j,k = int(fl[0]),int(fl[1]),int(fl[2])\n # cf1[i,j,k] = int(fl[6])\n for i2 in range(152):\n try:\n all_results[i2,i,j,k] = float(fl[i2+5])\n except IndexError:\n print i2, i, j, k", "First, we again need to check the assignment of the units/ class ids:", "fig = plt.figure(figsize = (12,6))\nax1 = fig.add_subplot(121)\nax2 = fig.add_subplot(122)\nim1 = ax1.imshow(nout.block[15,:,:].transpose(), \n interpolation = 'none', cmap = 'YlOrRd', origin = 'lower left')\nplt.colorbar(im1)\nim2 = ax2.imshow(all_results[5,15,:,:].transpose(), \n interpolation = 'none', \n cmap = 'YlOrRd', origin = 'lower left')\n\n# mapping from results to original:\nid_mapping = {2:5, 1:4, 3:3, 5:2, 4:1}\n\ndef re_map(id_val):\n return id_mapping[id_val]\nre_map_vect = np.vectorize(re_map)\n\n# Apply remapping to all but first result (seems to be original feature)\nall_results_remap = re_map_vect(all_results[1:,:,:,:])\n\nfig = plt.figure(figsize = (12,6))\nax1 = fig.add_subplot(121)\nax2 = fig.add_subplot(122)\nim1 = ax1.imshow(nout.block[30,:,:].transpose(), \n interpolation = 'none', cmap = 'YlOrRd', origin = 'lower left')\n# plt.colorbar(im1)\nim2 = ax2.imshow(all_results_remap[85,30,:,:].transpose(), \n interpolation = 'none', \n cmap = 'YlOrRd', origin = 'lower left')", "We can now determine the misclassification for all results:", "all_misclass = np.empty(151)\nfor i in range(151):\n # determine differences in class ids:\n feature_diff = (nout.block != all_results_remap[i,:,:,:])\n # Calculate the misclassification:\n all_misclass[i] = np.sum(feature_diff) / float(nout.n_total)\n\n\nplt.plot(all_misclass)", "It seems to be the case that the upper thin layer vanishes after approimately 30-40 iterations. From then on, the misclassification rate is approximately constant at around 9.5 percent (which is still quite acceptable!).\nLet's compare this now to classifications with another (lower) beta value (which should put more weight to the data?):" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
zmechz/CarND-TrafficSign-P2
Traffic_Sign_Classifier.ipynb
mit
[ "Self-Driving Car Engineer Nanodegree\nDeep Learning\nProject: Build a Traffic Sign Recognition Classifier\nIn this notebook, a template is provided for you to implement your functionality in stages, which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission if necessary. \n\nNote: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \\n\",\n \"File -> Download as -> HTML (.html). Include the finished document along with this notebook as your submission. \n\nIn addition to implementing code, there is a writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a write up template that can be used to guide the writing process. Completing the code template and writeup template will cover all of the rubric points for this project.\nThe rubric contains \"Stand Out Suggestions\" for enhancing the project beyond the minimum requirements. The stand out suggestions are optional. If you decide to pursue the \"stand out suggestions\", you can include the code in this Ipython notebook and also discuss the results in the writeup file.\n\nNote: Code and Markdown cells can be executed using the Shift + Enter keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.\n\n\nStep 0: Load The Data", "# Load pickled data\nimport pickle\n\n# TODO: Fill this in based on where you saved the training and testing data\n\ntraining_file = \"traffic-signs/train.p\"\nvalidation_file= \"traffic-signs/valid.p\"\ntesting_file = \"traffic-signs/test.p\"\n\nwith open(training_file, mode='rb') as f:\n train = pickle.load(f)\nwith open(validation_file, mode='rb') as f:\n valid = pickle.load(f)\nwith open(testing_file, mode='rb') as f:\n test = pickle.load(f)\n \nX_train, y_train = train['features'], train['labels']\nX_validation, y_validation = valid['features'], valid['labels']\nX_test, y_test = test['features'], test['labels']", "Step 1: Dataset Summary & Exploration\nThe pickled data is a dictionary with 4 key/value pairs:\n\n'features' is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels).\n'labels' is a 1D array containing the label/class id of the traffic sign. The file signnames.csv contains id -> name mappings for each id.\n'sizes' is a list containing tuples, (width, height) representing the original width and height the image.\n'coords' is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES\n\nComplete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the pandas shape method might be useful for calculating some of the summary results. \nProvide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas", "# Basic Summary and Data Set info\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport csv\n\n# TODO: Number of training / validation / testing examples\nn_train = X_train.shape[0]\nn_validation = X_validation.shape[0]\nn_test = X_test.shape[0]\n\n# TODO: What's the shape of an traffic sign image?\nimage_shape = X_train[0].shape\nimage_shape_v = X_validation[0].shape\nimage_shape_t = X_test[0].shape\n\n# TODO: How many unique classes/labels there are in the dataset.\nn_classes = np.unique(y_train).shape[0]\nn_classes_v = np.unique(y_validation).shape[0]\nn_classes_t = np.unique(y_test).shape[0]\n\nclass_list = []\nwith open('signnames.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n class_list.append(row['SignName'])\n\nn_classes_csv = len(class_list)\n \nprint(\"Number of training examples =\", n_train)\nprint(\"Number of validation examples =\", n_validation)\nprint(\"Number of testing examples =\", n_test)\nprint(\"Image Shape:\")\nprint(\" train dataset = \", image_shape)\nprint(\" validation dataset = \", image_shape_v)\nprint(\" test dataset = \", image_shape_t)\nprint(\"Number of classes:\")\nprint(\" distinct labels in train dataset = \", n_classes)\nprint(\" distinct labels in validation dataset = \", n_classes_v)\nprint(\" distinct labels in test dataset = \", n_classes_t)\nprint(\" labels in csv = \", n_classes_csv)", "Include an exploratory visualization of the dataset\nVisualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc. \nThe Matplotlib examples and gallery pages are a great resource for doing visualizations in Python.\nNOTE: It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections. It can be interesting to look at the distribution of classes in the training, validation and test set. Is the distribution the same? Are there more examples of some classes than others?", "print(\" \")\nprint(\"Training samples distribution per class\")\n\nn_samples=[] \nfor i in range(0, n_classes):\n n_samples.append(X_train[y_train == i].shape[0])\n \nclass_list = np.asarray(list(zip(class_list, n_samples)))\n\nplt.figure(figsize=(10, 2))\nplt.bar(range(0, n_classes), n_samples,color='blue',edgecolor='black')\nplt.title(\"Training samples per class\")\nplt.xlabel(\"Id\")\nplt.ylabel(\"Number of samples\")\nplt.show()\n\nprint(\" \")\nprint(\"Validation samples distribution per class\")\n\nn_samples=[] \nfor i in range(0, n_classes_v):\n n_samples.append(X_validation[y_validation == i].shape[0])\n\nplt.figure(figsize=(10, 2))\nplt.bar(range(0, n_classes), n_samples,color='blue',edgecolor='black')\nplt.title(\"Validation samples per class\")\nplt.xlabel(\"Id\")\nplt.ylabel(\"Number of samples\")\nplt.show()\n\nprint(\" \")\nprint(\"Testing samples distribution per class\")\n\nn_samples=[] \nfor i in range(0, n_classes_t):\n n_samples.append(X_test[y_test == i].shape[0])\n\nplt.figure(figsize=(10, 2))\nplt.bar(range(0, n_classes), n_samples,color='blue',edgecolor='black')\nplt.title(\"Testing samples per class\")\nplt.xlabel(\"Id\")\nplt.ylabel(\"Number of samples\")\nplt.show()", "Select one train image:", "### German sign images are already 32x32\nimport cv2\nimport random\n\n# Visualizations will be shown in the notebook.\n%matplotlib inline\n\nn_classes_csv = len(class_list)\nn_samples=[] \nfor i in range(0, n_classes):\n n_samples.append(X_train[y_train == i].shape[0])\n \nclass_list = np.asarray(list(zip(class_list, n_samples)))\n\nindex = random.randint(0, len(X_train))\nimage = X_train[index].squeeze()\n\nplt.figure(figsize=(1,1))\nplt.imshow(image)\nprint(\"Classifier ID = \", y_train[index], \", Description = \", class_list[y_train[index],0])\n", "Step 2: Design and Test a Model Architecture\nDesign and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the German Traffic Sign Dataset.\nThe LeNet-5 implementation shown in the classroom at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play! \nWith the LeNet-5 solution from the lecture, you should expect a validation set accuracy of about 0.89. To meet specifications, the validation set accuracy will need to be at least 0.93. It is possible to get an even higher accuracy, but 0.93 is the minimum for a successful project submission. \nThere are various aspects to consider when thinking about this problem:\n\nNeural network architecture (is the network over or underfitting?)\nPlay around preprocessing techniques (normalization, rgb to grayscale, etc)\nNumber of examples per label (some have more than others).\nGenerate fake data.\n\nHere is an example of a published baseline model on this problem. It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these.\nPre-process the Data Set (normalization, grayscale, etc.)\nMinimally, the image data should be normalized so that the data has mean zero and equal variance. For image data, (pixel - 128)/ 128 is a quick way to approximately normalize the data and can be used in this project. \nOther pre-processing steps are optional. You can try different techniques to see if it improves performance. \nUse the code cell (or multiple code cells, if necessary) to implement the first step of your project.", "def perform_grayscale(image):\n return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\ndef perform_hist_equalization(grayscale_image):\n return cv2.equalizeHist(grayscale_image)\n \ndef perform_image_normalization(equalized_image):\n return equalized_image/255.-.5\n\ndef pre_process_image(image):\n image = perform_grayscale(image)\n image = perform_hist_equalization(image)\n image = perform_image_normalization(image)\n return np.expand_dims(image,axis=3)\n\noriginal_image = X_train[index].squeeze()\ngrayscale_image = perform_grayscale(original_image)\nequalized_image = perform_hist_equalization(grayscale_image)\nnormalized_image = perform_image_normalization(equalized_image)\nimage_shape = np.shape(normalized_image)\n\nprint(\"Original image:\")\nplt.figure(figsize=(1,1))\nplt.imshow(original_image)\nprint(y_train[index])\nplt.show()\nprint(\"Grayscale image data shape =\", image_shape)\n\nprint(\"Preprocess Image techiniques applied\")\nprint(\"Converted to grayscale\")\nplt.figure(figsize=(1,1))\nplt.imshow(grayscale_image, cmap='gray')\nplt.show()\n\nprint(\"Converted to grayscale + histogram equalization:\")\nplt.figure(figsize=(1,1))\nplt.imshow(equalized_image, cmap='gray')\nplt.show()\n\nprint(\"Converted to grayscale + histogram equalization + normalization:\")\nplt.figure(figsize=(1,1))\nplt.imshow(normalized_image, cmap='gray')\nplt.show()\n\nnew_image = pre_process_image(image)\nnew_image_shape = np.shape(new_image)\nprint(\"New Image data shape =\", new_image_shape)", "Changing training data", "import cv2\n\nimg_resize = 32\nN_classes = 43\nimage_shape = (img_resize,img_resize)\nimg_size_flat = img_resize*img_resize\n\n\nimage_S_train = np.array([pre_process_image(X_train[i]) for i in range(len(X_train))],\n dtype = np.float32)\n\nimage_S_valid = np.array([pre_process_image(X_validation[i]) for i in range(len(X_validation))],\n dtype = np.float32)\n\nimage_S_test = np.array([pre_process_image(X_test[i]) for i in range(len(X_test))],\n dtype = np.float32)\n\n### Shuffle the training data.\nfrom sklearn.utils import shuffle\n\nimage_S_train, y_train = shuffle(image_S_train, y_train)", "Setup TensorFlow\nThe EPOCH and BATCH_SIZE values affect the training speed and model accuracy.\nYou do not need to modify this section.", "import tensorflow as tf\n\nEPOCHS = 80\nBATCH_SIZE = 128", "Model Architecture\nUsing LeNet-5 based architecture\nImplement the LeNet-5 neural network architecture.\nInput\nThe LeNet architecture accepts a 32x32xC image as input, where C is the number of color channels. Since German sign images are 32x32 RGB, C is 3 in this case.\nArchitecture\nLayer 1: Convolutional. The output shape should be 28x28x6.\nActivation. Your choice of activation function.\nPooling. The output shape should be 14x14x6.\nLayer 2: Convolutional. The output shape should be 10x10x16.\nActivation. Your choice of activation function.\nPooling. The output shape should be 5x5x16.\nFlatten. Flatten the output shape of the final pooling layer such that it's 1D instead of 3D. The easiest way to do is by using tf.contrib.layers.flatten, which is already imported for you.\nLayer 3: Fully Connected. This should have 120 outputs.\nActivation. Your choice of activation function.\nLayer 4: Fully Connected. This should have 84 outputs.\nActivation. Your choice of activation function.\nLayer 5: Fully Connected (Logits). This should have 43 outputs.\nOutput\nReturn the result of the 2nd fully connected layer.", "from tensorflow.contrib.layers import flatten\nn_channels = 1\n\ndef dropout_layer(layer, keep_prob):\n layer_drop = tf.nn.dropout(layer, keep_prob)\n return layer_drop\n\ndef LeNet(x): \n # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer\n keep_prob = 0.75\n mu = 0\n sigma = 0.1\n \n # SOLUTION: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x12. 3 inputs colour channels\n conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, n_channels, 12), mean = mu, stddev = sigma))\n conv1_b = tf.Variable(tf.zeros(12))\n conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b\n\n # SOLUTION: Activation.\n conv1 = tf.nn.relu(conv1)\n\n # SOLUTION: Pooling. Input = 28x28x12. Output = 14x14x12.\n conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n \n #layer_conv1_drop = dropout_layer(conv1, 0.5)\n\n # SOLUTION: Layer 2: Convolutional. Output = 10x10x32.\n conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 12, 32), mean = mu, stddev = sigma))\n conv2_b = tf.Variable(tf.zeros(32))\n conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b\n \n # SOLUTION: Activation.\n conv2 = tf.nn.relu(conv2)\n\n # SOLUTION: Pooling. Input = 10x10x32. Output = 5x5x32.\n conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n \n # TODO: Layer 2-b: Convolutional. Input = 5x5x32. Output = 3x3x64. \n conv3_W = tf.Variable(tf.truncated_normal(shape=(3, 3, 32, 64), mean = mu, stddev = sigma))\n conv3_b = tf.Variable(tf.zeros(64))\n conv3 = tf.nn.conv2d(conv2, conv3_W, strides=[1, 1, 1, 1], padding='VALID') + conv3_b\n # TODO: Activation.\n conv3 = tf.nn.relu(conv3)\n\n # SOLUTION: Flatten. Input = 3x3x64. Output = 800.\n fc0 = flatten(conv2)\n fc0 = tf.nn.dropout(fc0, keep_prob)\n \n # SOLUTION: Layer 3: Fully Connected. Input = 800. Output = 256.\n fc1_W = tf.Variable(tf.truncated_normal(shape=(800, 256), mean = mu, stddev = sigma))\n fc1_b = tf.Variable(tf.zeros(256))\n fc1 = tf.matmul(fc0, fc1_W) + fc1_b\n \n # SOLUTION: Activation.\n fc1 = tf.nn.relu(fc1)\n fc1 = tf.nn.dropout(fc1, keep_prob) \n\n # SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.\n fc2_W = tf.Variable(tf.truncated_normal(shape=(256, 84), mean = mu, stddev = sigma))\n fc2_b = tf.Variable(tf.zeros(84))\n fc2 = tf.matmul(fc1, fc2_W) + fc2_b\n \n # SOLUTION: Activation.\n fc2 = tf.nn.relu(fc2)\n\n # SOLUTION: Layer 5: Fully Connected. Input = 84. Output = 43.\n fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))\n fc3_b = tf.Variable(tf.zeros(43))\n logits = tf.matmul(fc2, fc3_W) + fc3_b\n \n return logits, conv1, conv2, conv3", "Features and Labels\nTrain LeNet to classify the German signs.\nx is a placeholder for a batch of input images.\ny is a placeholder for a batch of output labels.\nYou do not need to modify this section.", "x = tf.placeholder(tf.float32, (None, 32, 32, n_channels))\ny = tf.placeholder(tf.int32, (None))\none_hot_y = tf.one_hot(y, 43)\nkeep_prob = tf.placeholder(tf.float32)", "Training Pipeline\nCreate a training pipeline that uses the model to classify German sign images.", "rate = 0.0005\n\nlogits, conv1, conv2, conv3 = LeNet(x)\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)\nloss_operation = tf.reduce_mean(cross_entropy)\noptimizer = tf.train.AdamOptimizer(learning_rate = rate)\ntraining_operation = optimizer.minimize(loss_operation)", "Model Evaluation\nEvaluate how well the loss and accuracy of the model for a given dataset.\nYou do not need to modify this section.", "correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))\naccuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nsaver = tf.train.Saver()\n\ndef evaluate(X_data, y_data):\n num_examples = len(X_data)\n total_accuracy = 0.0\n sess = tf.get_default_session()\n for offset in range(0, num_examples, BATCH_SIZE):\n batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]\n accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 1.0})\n total_accuracy += (accuracy * len(batch_x))\n return total_accuracy / num_examples", "Train, Validate and Test the Model\nA validation set can be used to assess how well the model is performing. A low accuracy on the training and validation\nsets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting.\nTrain the Model\nRun the training data through the training pipeline to train the model.\nBefore each epoch, shuffle the training set.\nAfter each epoch, measure the loss and accuracy of the validation set.\nSave the model after training.\nYou do not need to modify this section.", "with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n num_examples = len(image_S_train)\n \n print(\"Training...\")\n print()\n val_accu_list = []\n batch_acc_list = []\n for i in range(EPOCHS):\n# X_train, y_train = shuffle(X_train, y_train)\n for offset in range(0, num_examples, BATCH_SIZE):\n end = offset + BATCH_SIZE\n batch_x, batch_y = image_S_train[offset:end], y_train[offset:end]\n sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})\n \n training_accuracy = evaluate(image_S_train, y_train)\n validation_accuracy = evaluate(image_S_valid, y_validation)\n batch_accuracy = evaluate(batch_x, batch_y)\n val_accu_list.append(validation_accuracy)\n batch_acc_list.append(batch_accuracy)\n print(\"EPOCH {} ...\".format(i+1))\n print(\"Training Accuracy = {:.3f}\".format(training_accuracy))\n print(\"Validation Accuracy = {:.3f}\".format(validation_accuracy))\n print()\n \n saver.save(sess, './traffic_classifier_data')\n print(\"Model saved\")", "Plot data", "plt.plot(batch_acc_list, label=\"Train Accuracy\")\nplt.plot(val_accu_list, label=\"Validation Accuracy\")\nplt.ylim(.4,1.1)\nplt.xlim(0,EPOCHS)", "Evaluate the Model\nOnce you are completely satisfied with your model, evaluate the performance of the model on the test set.\nBe sure to only do this once!\nIf you were to measure the performance of your trained model on the test set, then improve your model, and then measure the performance of your model on the test set again, that would invalidate your test results. You wouldn't get a true measure of how well your model would perform against real data.\nYou do not need to modify this section.", "with tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('.'))\n\n test_accuracy = evaluate(image_S_test, y_test)\n print(\"Test Accuracy = {:.3f}\".format(test_accuracy))", "Step 3: Test a Model on New Images\nTo give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type.\nYou may find signnames.csv useful as it contains mappings from the class id (integer) to the actual sign name.\nLoad and Output the Images", "### Load the images and plot them here.\n### Feel free to use as many code cells as needed.\nimport os\nimport cv2\nimport matplotlib.pyplot as plt\n\nnew_images_original = []\ntest_image_labels = list()\ntest_image_labels.append(27)\ntest_image_labels.append(25)\ntest_image_labels.append(14)\ntest_image_labels.append(33)\ntest_image_labels.append(13)\n\npath = \"new_images/\"\nfiles = sorted(os.listdir(path))\nprint(\"Original images:\")\ni = 0\nfor file in files:\n print(path+file)\n image = cv2.imread(path+file)\n image = image[...,::-1] # Convert from BGR <=> RGB\n resized_image = cv2.resize(image,(32,32))\n new_images_original.append(resized_image)\n label = test_image_labels[i]\n desc = class_list[[label],0]\n print(\"Label = \", label, \". Desc = \", desc)\n i += 1\n \n plt.figure(figsize=(1, 1))\n plt.imshow(image)\n plt.show()\n\nprint(test_image_labels)\n\n\ntest_images = []\n\nprint(\"Preprocessed images:\")\nfor image in new_images_original:\n preprocessed_image = pre_process_image(image)\n test_images.append(preprocessed_image)\n plt.figure(figsize=(1, 1))\n plt.imshow(preprocessed_image[:,:,0], cmap='gray')\n plt.show()\n", "Predict the Sign Type for Each Image", "### Run the predictions here and use the model to output the prediction for each image.\n### Make sure to pre-process the images with the same pre-processing pipeline used earlier.\n### Feel free to use as many code cells as needed.\n\nwith tf.Session() as sess:\n saver.restore(sess, './traffic_classifier_data')\n top5_prob = sess.run(tf.nn.top_k(tf.nn.softmax(logits), k=5, sorted=True), feed_dict = {x: test_images, keep_prob:1})\n# predicted_logits = sess.run(logits, feed_dict={x:test_images, keep_prob:1})\n# predicts = sess.run(tf.nn.top_k(top5_prob, k=5, sorted=True))\n predicted_labels = np.argmax(top5_prob, axis=1)\n# predictions_labels = np.argmax(predictions, axis=1)\ni=0\nfor image in test_images:\n plt.figure(figsize=(1, 1))\n print(\"Index=\", top5_prob.indices[i, 0])\n plt.xlabel(class_list[top5_prob.indices[i, 0],0])\n plt.imshow(image[:,:,0], cmap='gray')\n plt.show()\n i += 1", "Analyze Performance", "### Calculate the accuracy for these 5 new images. \n### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images.\n\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('.'))\n\n test_accuracy = evaluate(test_images, test_image_labels)\n print(\"Test Accuracy = {:.2f}\".format(test_accuracy))", "Output Top 5 Softmax Probabilities For Each Image Found on the Web\nFor each of the new images, print out the model's softmax probabilities to show the certainty of the model's predictions (limit the output to the top 5 probabilities for each image). tf.nn.top_k could prove helpful here. \nThe example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image.\ntf.nn.top_k will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids.\nTake this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. tf.nn.top_k is used to choose the three classes with the highest probability:\n```\n(5, 6) array\na = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497,\n 0.12789202],\n [ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401,\n 0.15899337],\n [ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 ,\n 0.23892179],\n [ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 ,\n 0.16505091],\n [ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137,\n 0.09155967]])\n```\nRunning it through sess.run(tf.nn.top_k(tf.constant(a), k=3)) produces:\nTopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202],\n [ 0.28086119, 0.27569815, 0.18063401],\n [ 0.26076848, 0.23892179, 0.23664738],\n [ 0.29198961, 0.26234032, 0.16505091],\n [ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5],\n [0, 1, 4],\n [0, 5, 1],\n [1, 3, 5],\n [1, 4, 3]], dtype=int32))\nLooking just at the first row we get [ 0.34763842, 0.24879643, 0.12789202], you can confirm these are the 3 largest probabilities in a. You'll also notice [3, 0, 5] are the corresponding indices.", "### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web. \n### Feel free to use as many code cells as needed.\ntest_images = np.asarray(test_images)\nprint(test_images.shape)\n\nplt.figure(figsize=(16, 21))\nfor i in range(5):\n plt.subplot(12, 2, 2*i+1)\n plt.imshow(test_images[i][:,:,0], cmap=\"gray\") \n plt.axis('off')\n plt.title(i)\n plt.subplot(12, 2, 2*i+2)\n plt.axis([0, 1., 0, 6])\n plt.barh(np.arange(1, 6, 1), (np.absolute(top5_prob.values[i, :]/sum(np.absolute(top5_prob.values[i, :])))))\n labs=[class_list[j][0] for j in top5_prob.indices[i, :]]\n plt.yticks(np.arange(1, 6, 1), labs)\nplt.show()", "Project Writeup\nOnce you have completed the code implementation, document your results in a project writeup using this template as a guide. The writeup can be in a markdown or pdf file. \n\nNote: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \\n\",\n \"File -> Download as -> HTML (.html). Include the finished document along with this notebook as your submission.\n\n\nStep 4 (Optional): Visualize the Neural Network's State with Test Images\nThis Section is not required to complete but acts as an additional excersise for understaning the output of a neural network's weights. While neural networks can be a great learning device they are often referred to as a black box. We can understand what the weights of a neural network look like better by plotting their feature maps. After successfully training your neural network you can see what it's feature maps look like by plotting the output of the network's weight layers in response to a test stimuli image. From these plotted feature maps, it's possible to see what characteristics of an image the network finds interesting. For a sign, maybe the inner network feature maps react with high activation to the sign's boundary outline or to the contrast in the sign's painted symbol.\nProvided for you below is the function code that allows you to get the visualization output of any tensorflow weight layer you want. The inputs to the function should be a stimuli image, one used during training or a new one you provided, and then the tensorflow variable name that represents the layer's state during the training process, for instance if you wanted to see what the LeNet lab's feature maps looked like for it's second convolutional layer you could enter conv2 as the tf_activation variable.\nFor an example of what feature map outputs look like, check out NVIDIA's results in their paper End-to-End Deep Learning for Self-Driving Cars in the section Visualization of internal CNN State. NVIDIA was able to show that their network's inner weights had high activations to road boundary lines by comparing feature maps from an image with a clear path to one without. Try experimenting with a similar test to show that your trained network's weights are looking for interesting features, whether it's looking at differences in feature maps from images with or without a sign, or even what feature maps look like in a trained network vs a completely untrained one on the same sign image.\n<figure>\n <img src=\"visualize_cnn.png\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your output should look something like this (above)</p> \n </figcaption>\n</figure>\n<p></p>", "### Visualize your network's feature maps here.\n### Feel free to use as many code cells as needed.\n\n# image_input: the test image being fed into the network to produce the feature maps\n# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer\n# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output\n# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry\n\ndef outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):\n # Here make sure to preprocess your image_input in a way your network expects\n # with size, normalization, ect if needed\n # image_input =\n # Note: x should be the same name as your network's tensorflow data placeholder variable\n # If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function\n activation = tf_activation.eval(session=sess,feed_dict={x : image_input})\n featuremaps = activation.shape[3]\n plt.figure(plt_num, figsize=(15,15))\n for featuremap in range(featuremaps):\n plt.subplot(8,8, featuremap+1) # sets the number of feature maps to show on each row and column\n plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number\n if activation_min != -1 & activation_max != -1:\n plt.imshow(activation[0,:,:, featuremap], interpolation=\"nearest\", vmin =activation_min, vmax=activation_max, cmap=\"gray\")\n elif activation_max != -1:\n plt.imshow(activation[0,:,:, featuremap], interpolation=\"nearest\", vmax=activation_max, cmap=\"gray\")\n elif activation_min !=-1:\n plt.imshow(activation[0,:,:, featuremap], interpolation=\"nearest\", vmin=activation_min, cmap=\"gray\")\n else:\n plt.imshow(activation[0,:,:, featuremap], interpolation=\"nearest\", cmap=\"gray\")\n\n\nwith tf.Session() as sess:\n saver.restore(sess, './traffic_classifier_data')\n print(\"Convolution #1\")\n print(test_images[0].shape)\n print(test_images.shape)\n outputFeatureMap(test_images,conv1)\n\nwith tf.Session() as sess:\n saver.restore(sess, './traffic_classifier_data')\n print(\"Convolution #2\")\n outputFeatureMap(test_images,conv2)\n\nwith tf.Session() as sess:\n saver.restore(sess, './traffic_classifier_data')\n print(\"Convolution #3\")\n outputFeatureMap(test_images,conv3)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
RTHMaK/RPGOne
scipy-2017-sklearn-master/notebooks/19 In Depth - Trees and Forests.ipynb
apache-2.0
[ "%load_ext watermark\n%watermark -d -u -a 'Andreas Mueller, Kyle Kastner, Sebastian Raschka' -v -p numpy,scipy,matplotlib,scikit-learn", "SciPy 2016 Scikit-learn Tutorial\nIn Depth - Decision Trees and Forests", "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt", "Here we'll explore a class of algorithms based on decision trees.\nDecision trees at their root are extremely intuitive. They\nencode a series of \"if\" and \"else\" choices, similar to how a person might make a decision.\nHowever, which questions to ask, and how to proceed for each answer is entirely learned from the data.\nFor example, if you wanted to create a guide to identifying an animal found in nature, you\nmight ask the following series of questions:\n\nIs the animal bigger or smaller than a meter long?\nbigger: does the animal have horns?\nyes: are the horns longer than ten centimeters?\nno: is the animal wearing a collar\n\n\nsmaller: does the animal have two or four legs?\ntwo: does the animal have wings?\nfour: does the animal have a bushy tail?\n\n\n\n\n\nand so on. This binary splitting of questions is the essence of a decision tree.\nOne of the main benefit of tree-based models is that they require little preprocessing of the data.\nThey can work with variables of different types (continuous and discrete) and are invariant to scaling of the features.\nAnother benefit is that tree-based models are what is called \"nonparametric\", which means they don't have a fix set of parameters to learn. Instead, a tree model can become more and more flexible, if given more data.\nIn other words, the number of free parameters grows with the number of samples and is not fixed, as for example in linear models.\nDecision Tree Regression\nA decision tree is a simple binary classification tree that is\nsimilar to nearest neighbor classification. It can be used as follows:", "from figures import make_dataset\nx, y = make_dataset()\nX = x.reshape(-1, 1)\n\nplt.xlabel('Feature X')\nplt.ylabel('Target y')\nplt.scatter(X, y);\n\nfrom sklearn.tree import DecisionTreeRegressor\n\nreg = DecisionTreeRegressor(max_depth=5)\nreg.fit(X, y)\n\nX_fit = np.linspace(-3, 3, 1000).reshape((-1, 1))\ny_fit_1 = reg.predict(X_fit)\n\nplt.plot(X_fit.ravel(), y_fit_1, color='blue', label=\"prediction\")\nplt.plot(X.ravel(), y, '.k', label=\"training data\")\nplt.legend(loc=\"best\");", "A single decision tree allows us to estimate the signal in a non-parametric way,\nbut clearly has some issues. In some regions, the model shows high bias and\nunder-fits the data.\n(seen in the long flat lines which don't follow the contours of the data),\nwhile in other regions the model shows high variance and over-fits the data\n(reflected in the narrow spikes which are influenced by noise in single points).\nDecision Tree Classification\nDecision tree classification work very similarly, by assigning all points within a leaf the majority class in that leaf:", "from sklearn.datasets import make_blobs\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom figures import plot_2d_separator\n\n\nX, y = make_blobs(centers=[[0, 0], [1, 1]], random_state=61526, n_samples=100)\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)\n\nclf = DecisionTreeClassifier(max_depth=5)\nclf.fit(X_train, y_train)\n\n\nplot_2d_separator(clf, X, fill=True)\nplt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, s=60, alpha=.7)\nplt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, s=60);", "There are many parameter that control the complexity of a tree, but the one that might be easiest to understand is the maximum depth. This limits how finely the tree can partition the input space, or how many \"if-else\" questions can be asked before deciding which class a sample lies in.\nThis parameter is important to tune for trees and tree-based models. The interactive plot below shows how underfit and overfit looks like for this model. Having a max_depth of 1 is clearly an underfit model, while a depth of 7 or 8 clearly overfits. The maximum depth a tree can be grown at for this dataset is 8, at which point each leave only contains samples from a single class. This is known as all leaves being \"pure.\"\nIn the interactive plot below, the regions are assigned blue and red colors to indicate the predicted class for that region. The shade of the color indicates the predicted probability for that class (darker = higher probability), while yellow regions indicate an equal predicted probability for either class.", "from figures import plot_tree_interactive\nplot_tree_interactive()", "Decision trees are fast to train, easy to understand, and often lead to interpretable models. However, single trees often tend to overfit the training data. Playing with the slider above you might notice that the model starts to overfit even before it has a good separation between the classes.\nTherefore, in practice it is more common to combine multiple trees to produce models that generalize better. The most common methods for combining trees are random forests and gradient boosted trees.\nRandom Forests\nRandom forests are simply many trees, built on different random subsets (drawn with replacement) of the data, and using different random subsets (drawn without replacement) of the features for each split.\nThis makes the trees different from each other, and makes them overfit to different aspects. Then, their predictions are averaged, leading to a smoother estimate that overfits less.", "from figures import plot_forest_interactive\nplot_forest_interactive()", "Selecting the Optimal Estimator via Cross-Validation", "from sklearn.model_selection import GridSearchCV\nfrom sklearn.datasets import load_digits\nfrom sklearn.ensemble import RandomForestClassifier\n\ndigits = load_digits()\nX, y = digits.data, digits.target\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)\n\nrf = RandomForestClassifier(n_estimators=200)\nparameters = {'max_features':['sqrt', 'log2', 10],\n 'max_depth':[5, 7, 9]}\n\nclf_grid = GridSearchCV(rf, parameters, n_jobs=-1)\nclf_grid.fit(X_train, y_train)\n\nclf_grid.score(X_train, y_train)\n\nclf_grid.score(X_test, y_test)", "Another option: Gradient Boosting\nAnother Ensemble method that can be useful is Boosting: here, rather than\nlooking at 200 (say) parallel estimators, We construct a chain of 200 estimators\nwhich iteratively refine the results of the previous estimator.\nThe idea is that by sequentially applying very fast, simple models, we can get a\ntotal model error which is better than any of the individual pieces.", "from sklearn.ensemble import GradientBoostingRegressor\nclf = GradientBoostingRegressor(n_estimators=100, max_depth=5, learning_rate=.2)\nclf.fit(X_train, y_train)\n\nprint(clf.score(X_train, y_train))\nprint(clf.score(X_test, y_test))", "Exercise: Cross-validating Gradient Boosting\nUse a grid search to optimize the learning_rate and max_depth for a Gradient Boosted\nDecision tree on the digits data set.", "from sklearn.datasets import load_digits\nfrom sklearn.ensemble import GradientBoostingClassifier\n\ndigits = load_digits()\nX_digits, y_digits = digits.data, digits.target\n\n# split the dataset, apply grid-search\n\n#%load solutions/19_gbc_grid.py" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tpin3694/tpin3694.github.io
machine-learning/loading_scikit-learns_boston_housing_dataset.ipynb
mit
[ "Title: Loading scikit-learn's Boston Housing Dataset\nSlug: loading_scikit-learns_boston_housing-dataset\nSummary: Loading the built-in Boston housing datasets of scikit-learn. \nDate: 2016-08-31 12:00\nCategory: Machine Learning\nTags: Basics\nAuthors: Chris Albon \nPreliminaries", "# Load libraries\nfrom sklearn import datasets\nimport matplotlib.pyplot as plt ", "Load Boston Housing Dataset\nThe Boston housing dataset is a famous dataset from the 1970s. It contains 506 observations on housing prices around Boston. It is often used in regression examples and contains 15 features.", "# Load digits dataset\nboston = datasets.load_boston()\n\n# Create feature matrix\nX = boston.data\n\n# Create target vector\ny = boston.target\n\n# View the first observation's feature values\nX[0]", "As you can see, the features are not standardized. This is more easily seen if we display the values as decimals:", "# Display each feature value of the first observation as floats\n['{:f}'.format(x) for x in X[0]]", "Therefore, it is often beneficial and/or required to standardize the value of the features." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
theavey/ParaTemp
examples/paratemp_analysis_examples.ipynb
apache-2.0
[ "Imports and such", "import collections\nimport errno\nimport sys, os, re, subprocess, glob\nimport time\nimport matplotlib.pyplot as plt\nimport MDAnalysis\nimport MDAnalysis.analysis\nimport MDAnalysis.analysis.rdf\nimport numpy as np\nimport pandas as pd\nimport six\nfrom importlib import reload\n\nimport paratemp.coordinate_analysis as ca\nimport paratemp as pt\nfrom paratemp.re_universe import REUniverse\nimport thtools\nfrom thtools import cd, merge_two_dicts\nfrom thtools import save_obj, load_obj, make_obj_dir\n\nreload(ca)\nreload(pt)\nreload(thtools)", "Parse distances from PLUMED input", "def parse_plumed_dists(p_plumed, verbose=True):\n \"\"\"\n Read a plumed input and return dict of defined dists\n \n Note, this returns 1-based indexes for the atoms which is what\n MDAnalysis will need and what PLUMED/GROMACS use, but it is\n different than VMD (and Python, generally).\"\"\"\n with open(p_plumed, 'r') as f_plumed:\n init_lines = f_plumed.readlines()\n lines = []\n for line in init_lines:\n lines.append(line.split('#')[0])\n dists = dict()\n for line in lines:\n if 'DISTANCE' in line:\n m = re.search(r'(\\S+):.+ATOMS=(\\d+),(\\d+)', line)\n if m:\n n, a1, a2 = m.groups()\n dists[n] = (int(a1), int(a2))\n else:\n n = re.search(r'(\\S+):.+', line).group(1)\n if verbose:\n print('Unable to define atoms '\n 'for distance: {}'.format(n))\n return dists", "Create dicts of files, folders, and distances\nUsing parse_plumed_dists for a single set of simulations", "l_configs = ['MaEn', \n 'MaEx', \n 'MiEn', \n 'MiEx']\ndp_configs = dict()\nfor c in l_configs:\n dp_configs[c] = os.path.abspath(\n os.path.join('PTAD-cinnamate/', c))\n\np_gro = os.path.abspath('PTAD-cinnamate/MaEn/tad-MaEn-solutes.gro')\n\nd_temp = parse_plumed_dists('PTAD-cinnamate/MaEn/plumed-cin-ptad-MaEn.dat')\nd_metad_dists = {'CV1': d_temp['dm1'], 'CV2': d_temp['dm2']}\ndel(d_temp)\nd_ox_dists = {'O-O': [69, 71], 'O(l)-Cy': [69, 75], 'O(r)-Cy': [71, 75]}", "For multiple simulations", "dp_catff = dict(phen_cg='CGenFF-3-body/PT/PTAD-cinnamate/',\n phen_ga='repeat-juanma-w-pt/tad-cinnamate/',\n naph_ga='repeat-juanma-w-pt/ntad-cinnamate/')\ndp_l_configs = dict(MaEn='major-endo',\n MaEx='major-exo',\n MiEn='minor-endo',\n MiEx='minor-exo')\ndp_s_configs = dict(MaEn='MaEn',\n MaEx='MaEx',\n MiEn='MiEn',\n MiEx='MiEx')\ndd_configs = dict(phen_cg=dp_s_configs,\n phen_ga=append_to_keys(dp_l_configs, '13-3htmf-etc/05'),\n naph_ga=append_to_keys(dp_l_configs, '02-PT'))\ndp_gros = dict(phen_cg='/projectnb/nonadmd/theavey/CGenFF-3-body/PT/PTAD-cinnamate/MaEn/tad-MaEn-solutes.gro',\n phen_ga=os.path.abspath('repeat-juanma-w-pt/tad-cinnamate/solutes.gro'),\n naph_ga=os.path.abspath('repeat-juanma-w-pt/ntad-cinnamate/ntad-cinnamate.gro'))\ndd_cv_def = dict(naph_ga={'O-O': [53, 29], 'O(l)-dm': [53, 4], 'O(r)-dm': [29, 4], 'CV1':[129,53], 'CV2':[102,68]})", "Create REUniverses and calculate some dists\nJust import the Universes and read_data", "d_reus = dict()\nfor config in dp_configs:\n reu = REUniverse(p_gro, dp_configs[config], traj_glob='npt*xtc')\n d_reus[config] = reu\n for u in reu:\n u.read_data()", "Import and calculate distances if necessary\nCould also be done by read, calc, save because it now will not do any unnecessary steps", "d_reus = dict()\n\nfor catff in dp_catff:\n dp_configs = dd_configs[catff]\n top = dp_gros[catff]\n for config in dp_configs:\n key = f'{catff}_{config}'\n bf = os.path.join(dp_catff[catff], dp_configs[config])\n reu = REUniverse(top, bf, traj_glob='npt*.xtc')\n for u in reu:\n try:\n u.read_data()\n except OSError:\n d_cv_def = dd_cv_def[catff]\n u.calculate_distances(**d_cv_def)\n u.save_data()\n d_reus[key] = reu", "Plotting\nSimple 1D FESs for Universes in an REUniverse", "figs = []\nfor u in reu:\n fig = u.fes_1d('O-O', bins=15, linewidth=2)[3]\n figs.append(fig)", "Simple 2D FESs for Universes in an REUniverse", "figs = []\nfor u in reu:\n fig, ax = u.fes_2d(x='CV1', y='CV2', \n xlabel='CV 1', ylabel='CV 2')[3:5]", "Using ca.fes_array_3_legend", "x_lims = np.zeros([64, 2])\nj = 0\nfor config in d_ptus:\n for i, u in enumerate(d_ptus[config]):\n u.figs = dict()\n u.read_data(ignore_no_data=True)\n fig, axes = ca.fes_array_3_legend(u.data, temp=u.temperature, \n labels=('O-O', 'O(l)-Cy', 'O(r)-Cy'),\n bins=15, linewidth=2.0)[3:]\n ax = axes.flat[0]\n if ax.get_ylim()[1] > 10:\n for ax in axes.flat[:3]:\n ax.set_ylim((-0.5, 7))\n fig.tight_layout(rect=[0, 0, 1, 0.95])\n fig.suptitle('{} {:.0f} K'.format(config, u.temperature))\n u.figs['fes_ox_dists_bins'] = fig\n x_lims[j] = ax.get_xlim()\n j += 1", "Radial distributions\nCalculate radial distributions", "name_gro = p_gro\n\nname_gro = os.path.abspath(name_gro)\n\nfig, ax = plt.subplots()\n\nbins_CV_Os = {}\nrdfs_CV_Os = {}\n\nfor key in sorted(dp_configs):\n# for key in ['MiEx']:\n with cd(dp_configs[key]):\n i = 0\n print 'Now starting on {} {}...'.format(key, i)\n univ = d_reus[key][i]\n final_time = univ.final_time_str\n file_name_end = '-PT-phen-cg-{}-{}-{}.pdf'.format(key, i, final_time)\n \n reactant_CV_Os = univ.select_atoms('(resname is 3htmf) and (name is O1 or name is O2)')\n catalyst_CV_Os = univ.select_atoms('(resname is TAD or resname is tad) and (name is O1 or name is OH)')\n \n rcrdf = MDAnalysis.analysis.rdf.InterRDF(\n reactant_CV_Os, catalyst_CV_Os, range=(2.0, 12.0))\n rcrdf.run()\n \n print rcrdf.count\n \n bins_CV_Os[key] = rcrdf.bins\n rdfs_CV_Os[key] = rcrdf.rdf\n \n ax.plot(rcrdf.bins, rcrdf.rdf, label=key)\n\nax.legend()\nfig", "Make FES from radial distributions", "r = 0.0019872\ntemp = univ.temperature\n\ng_CV_Os = {}\nfor key in rdfs_CV_Os:\n rdfs = rdfs_CV_Os[key]\n g_CV_Os[key] = - r * temp * np.log(rdfs + 1e-40)\nmin_g = min([min(gs) for gs in g_CV_Os.values()])\nfor key in g_CV_Os:\n g_CV_Os[key] = g_CV_Os[key] - min_g\n\nfig, ax = plt.subplots()\n\nfor key in sorted(g_CV_Os):\n ax.plot(bins_CV_Os[key], g_CV_Os[key], label=key)\n ax.set_xlim([2.4,9.9])\n ax.set_ylim([-0.1,2.7])\nax.legend()\nax.set_ylabel(r'$\\Delta G$ / (kcal / mol)')\nax.set_xlabel('distance / $\\mathrm{\\AA}$')\n\nfig", "Plot FES from two Universes on the same axes", "x_lims = np.zeros((64, 2))\nj = 0\ndf_pvn_same = dict()\ndf_figs = df_pvn_same\nfor key in d_reus:\n if 'naph' not in key:\n continue\n config = key[-4:]\n reu = d_reus[key]\n equiv_ga_reu = d_reus['phen_ga_'+config]\n for i, u in enumerate(reu):\n fig, ax = plt.subplots(1, 1)\n df_figs[f'{config}_{i}'] = fig\n u.fes_1d('O-O', \n bins=15, \n ax=ax, linewidth=2, label='naphthyl')\n equiv_ga_reu[i].fes_1d('O-O', \n bins=15, \n ax=ax, linewidth=2, label='phenanthryl')\n ax.set_xlim((3.24, 5.85))\n ax.set_aspect(0.3, adjustable='box-forced')\n if ax.get_ylim()[1] > 10:\n ax.set_ylim((-0.5, 7))\n ax.legend()\n fig.tight_layout()\n x_lims[j] = ax.get_xlim()\n j += 1\n# break", "Pull out certain frames from a trajectory and save to disk", "# Cutoff values used for frame selection\ncv1_cuts = [6.5, 9.]\ncv2_cuts = [1.5, 3.]\nname_set = 'lCV1-sCV2' # (partial) name for the file\n\n# instantiate the Universe object\nuniv = ca.Taddol('solutes.gro', 'major-endo/13-3htmf-etc/05/pbc-MaEn-0.xtc')\n# Calculate/read-in the distance data\ntry:\n univ.data['CV1']\nexcept KeyError:\n univ.read_data(filename='major-endo/13-3htmf-etc/05/npt-PT-MaEn-out0.h5')\n\n# Create boolean array telling where the cutoffs are satisfied\nbool_array = ((univ.data['CV1'] > cv1_cuts[0]) & (univ.data['CV1'] < cv1_cuts[1]) \n & (univ.data['CV2'] > cv2_cuts[0]) & (univ.data['CV2'] < cv2_cuts[1]))\nnum = len(univ.data[bool_array])\nprint('These cutoffs include {} frames.'.format(num))\n\n# Create solute atomselection to not save the solvent to disk\nsolutes = univ.select_atoms('resname is 3HT or resname is CIN or resname is TAD')\n\n# write the selected frames into a new trajectory file\nwith mda.Writer('minim-structs-'+name_set+'-rjm-PT-MaEn-0.xtc', \n solutes.n_atoms) as W:\n for ts in univ.trajectory:\n if bool_array[univ.trajectory.frame]:\n W.write(solutes)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
vinutah/UGAN
02_code/gan.ipynb
mit
[ "Generative Adversarial Networks\nGenerative adversarial networks (GANs) are a powerful approach for\nprobabilistic modeling (I. Goodfellow et al., 2014; I. Goodfellow, 2016).\nThey posit a deep generative model and they enable fast and accurate\ninferences.\nThis mimics Edward tutorial on GAN at:\nhttp://edwardlib.org/tutorials/gan.", "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport edward as ed\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport numpy as np\nimport os\nimport tensorflow as tf\n\nfrom edward.models import Uniform\nfrom tensorflow.contrib import slim\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nimport pandas as pd", "Data", "# load data set\n# put the dimensions of preprocessed images\ndata_width = 150\ndata_height = 150\ndata_dim = data_width * data_height\n\ndata_file_path = 'ganesh_preproecessed_images.csv'\nX = np.array(pd.read_csv(data_file_path, header=None))\n\ndef plot(samples):\n fig = plt.figure(figsize=(4, 4))\n gs = gridspec.GridSpec(4, 4)\n gs.update(wspace=0.05, hspace=0.05)\n\n for i, sample in enumerate(samples):\n ax = plt.subplot(gs[i])\n plt.axis('off')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_aspect('equal')\n plt.imshow(sample.reshape(data_width, data_height), cmap='Greys_r')\n\n return fig\n\n\ned.set_seed(42)\n\nM = 300 # batch size during training\nd = 100 # latent dimension\n\nIMG_DIR = \"img\"\n\nif not os.path.exists(IMG_DIR):\n os.makedirs(IMG_DIR)\n\nx_ph = tf.placeholder(tf.float32, [M, data_dim])", "Model\nGANs posit generative models using an implicit mechanism. Given some\nrandom noise, the data is assumed to be generated by a deterministic\nfunction of that noise.\nFormally, the generative process is\n\\begin{align}\n\\mathbf{\\epsilon} &\\sim p(\\mathbf{\\epsilon}), \\\n\\mathbf{x} &= G(\\mathbf{\\epsilon}; \\theta),\n\\end{align}\nwhere $G(\\cdot; \\theta)$ is a neural network that takes the samples\n$\\mathbf{\\epsilon}$ as input. The distribution\n$p(\\mathbf{\\epsilon})$ is interpreted as random noise injected to\nproduce stochasticity in a physical system; it is typically a fixed\nuniform or normal distribution with some latent dimensionality.\nIn Edward, we build the model as follows, using TensorFlow Slim to\nspecify the neural network. It defines a 3-layer fully connected neural\nnetwork and outputs a vector of length data_width $\\times$ data_height with values in\n$[0,1]$.", "def generative_network(eps):\n h1 = slim.fully_connected(eps, 128, activation_fn=tf.nn.relu)\n h2 = slim.fully_connected(h1, 128, activation_fn=tf.nn.relu)\n x = slim.fully_connected(h2, data_dim, activation_fn=tf.sigmoid)\n return x\n\nwith tf.variable_scope(\"Gen\"):\n eps = Uniform(tf.zeros([M, d]) - 1.0, tf.ones([M, d]))\n x = generative_network(eps)", "Inference\nA key idea in likelihood-free methods is to learn by\ncomparison (e.g., Rubin (1984; Gretton, Borgwardt, Rasch, Schölkopf, & Smola, 2012)): by\nanalyzing the discrepancy between samples from the model and samples\nfrom the true data distribution, we have information on where the\nmodel can be improved in order to generate better samples.\nIn GANs, a neural network $D(\\cdot;\\phi)$ makes this comparison,\nknown as the discriminator.\n$D(\\cdot;\\phi)$ takes data $\\mathbf{x}$ as input (either\ngenerations from the model or data points from the data set), and it\ncalculates the probability that $\\mathbf{x}$ came from the true data.\nIn Edward, we use the following discriminative network. It is simply a\nfeedforward network with 2 ReLU hidden layers. It returns the\nprobability in the logit (unconstrained) scale.", "def discriminative_network(x):\n \"\"\"Outputs probability in logits.\"\"\"\n h1 = slim.fully_connected(x, 128, activation_fn=tf.nn.relu)\n h2 = slim.fully_connected(h1, 128, activation_fn=tf.nn.relu)\n logit = slim.fully_connected(h1, 1, activation_fn=None)\n return logit", "Let $p^*(\\mathbf{x})$ represent the true data distribution.\nThe optimization problem used in GANs is\n\\begin{equation}\n\\min_\\theta \\max_\\phi~\n\\mathbb{E}_{p^(\\mathbf{x})} [ \\log D(\\mathbf{x}; \\phi) ]\n+ \\mathbb{E}_{p(\\mathbf{x}; \\theta)} [ \\log (1 - D(\\mathbf{x}; \\phi)) ].\n\\end{equation*}\nThis optimization problem is bilevel: it requires a minima solution\nwith respect to generative parameters and a maxima solution with\nrespect to discriminative parameters.\nIn practice, the algorithm proceeds by iterating gradient updates on\neach. An\nadditional heuristic also modifies the objective function for the\ngenerative model in order to avoid saturation of gradients\n(I. J. Goodfellow, 2014).\nMany sources of intuition exist behind GAN-style training. One, which\nis the original motivation, is based on idea that the two neural\nnetworks are playing a game. The discriminator tries to best\ndistinguish samples away from the generator. The generator tries\nto produce samples that are indistinguishable by the discriminator.\nThe goal of training is to reach a Nash equilibrium.\nAnother source is the idea of casting unsupervised learning as\nsupervised learning\n(M. U. Gutmann, Dutta, Kaski, & Corander, 2014; M. Gutmann & Hyvärinen, 2010).\nThis allows one to leverage the power of classification—a problem that\nin recent years is (relatively speaking) very easy.\nA third comes from classical statistics, where the discriminator is\ninterpreted as a proxy of the density ratio between the true data\ndistribution and the model\n (Mohamed & Lakshminarayanan, 2016; Sugiyama, Suzuki, & Kanamori, 2012). By augmenting an\noriginal problem that may require the model's density with a\ndiscriminator (such as maximum likelihood), one can recover the\noriginal problem when the discriminator is optimal. Furthermore, this\napproximation is very fast, and it justifies GANs from the perspective\nof approximate inference.\nIn Edward, the GAN algorithm (GANInference) simply takes the\nimplicit density model on x as input, binded to its\nrealizations x_ph. In addition, a parameterized function\ndiscriminator is provided to distinguish their\nsamples.", "inference = ed.GANInference(\n data={x: x_ph}, discriminator=discriminative_network)", "We'll use ADAM as optimizers for both the generator and discriminator.\nWe'll run the algorithm for 15,000 iterations and print progress every\n1,000 iterations.", "optimizer = tf.train.AdamOptimizer()\noptimizer_d = tf.train.AdamOptimizer()\n\ninference = ed.GANInference(\n data={x: x_ph}, discriminator=discriminative_network)\ninference.initialize(\n optimizer=optimizer, optimizer_d=optimizer_d,\n n_iter=20000, n_print=1000)", "We now form the main loop which trains the GAN. At each iteration, it\ntakes a minibatch and updates the parameters according to the\nalgorithm. At every 1000 iterations, it will print progress and also\nsaves a figure of generated samples from the model.", "sess = ed.get_session()\ntf.global_variables_initializer().run()\n\nidx = np.random.randint(M, size=16)\ni = 0\nfor t in range(inference.n_iter):\n if t % inference.n_print == 0:\n samples = sess.run(x)\n samples = samples[idx, ]\n\n fig = plot(samples)\n plt.savefig(os.path.join(IMG_DIR, '{}.png').format(\n str(i).zfill(3)), bbox_inches='tight')\n plt.close(fig)\n i += 1\n\n# x_batch, _ = mnist.train.next_batch(M)\n x_batch = X[np.random.choice(np.arange(X.shape[0]), size=M)]\n info_dict = inference.update(feed_dict={x_ph: x_batch})\n inference.print_progress(info_dict)", "Examining convergence of the GAN objective can be meaningless in\npractice. The algorithm is usually run until some other criterion is\nsatisfied, such as if the samples look visually okay, or if the GAN\ncan capture meaningful parts of the data.\nCriticism\nEvaluation of GANs remains an open problem---both in criticizing their\nfit to data and in assessing convergence.\nRecent advances have considered alternative objectives and\nheuristics to stabilize training (see also Soumith Chintala's\nGAN hacks repo)." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
tensorflow/docs-l10n
site/ja/model_optimization/guide/pruning/pruning_with_keras.ipynb
apache-2.0
[ "Copyright 2020 The TensorFlow Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "Keras でのプルーニングの例\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td> <a target=\"_blank\" href=\"https://www.tensorflow.org/model_optimization/guide/pruning/pruning_with_keras\"> <img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\"> TensorFlow.org で表示</a>\n</td>\n <td> <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/model_optimization/guide/pruning/pruning_with_keras.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\">Google Colab で実行</a>\n</td>\n <td><a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ja/model_optimization/guide/pruning/pruning_with_keras.ipynb\"> <img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\"> GitHubでソースを表示</a></td>\n <td><a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/model_optimization/guide/pruning/pruning_with_keras.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\">ノートブックをダウンロード</a></td>\n</table>\n\n概要\nマグニチュードベースの重みプルーニングに関するエンドツーエンドの例へようこそ。\nその他のページ\n重みプルーニングの紹介、およびプルーニングを使用すべきかどうかの判定(サポート情報も含む)については、概要ページをご覧ください。\nユースケースに合った API を素早く特定するには(80%のスパース性を持つモデルの完全プルーニングを超えるユースケース)、総合ガイドをご覧ください。\n要約\nこのチュートリアルでは、次について説明しています。\n\nMNIST の tf.keras モデルを最初からトレーニングする\nプルーニング API を適用してモデルを微調整し、精度を確認する\nプルーニングによって 3 倍小さな TF および TFLite モデルを作成する\nプルーニングとポストトレーニング量子化を組み合わせて、10 倍小さな TFLite モデルを作成する\nTF から TFLite への精度の永続性を確認する\n\nセットアップ", "! pip install -q tensorflow-model-optimization\n\nimport tempfile\nimport os\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom tensorflow import keras\n\n%load_ext tensorboard", "プルーニングを使用せずに、MNIST のモデルをトレーニングする", "# Load MNIST dataset\nmnist = keras.datasets.mnist\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\n# Normalize the input image so that each pixel value is between 0 and 1.\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\n# Define the model architecture.\nmodel = keras.Sequential([\n keras.layers.InputLayer(input_shape=(28, 28)),\n keras.layers.Reshape(target_shape=(28, 28, 1)),\n keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation='relu'),\n keras.layers.MaxPooling2D(pool_size=(2, 2)),\n keras.layers.Flatten(),\n keras.layers.Dense(10)\n])\n\n# Train the digit classification model\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nmodel.fit(\n train_images,\n train_labels,\n epochs=4,\n validation_split=0.1,\n)", "ベースラインのテスト精度を評価して、後で使用できるようにモデルを保存します。", "_, baseline_model_accuracy = model.evaluate(\n test_images, test_labels, verbose=0)\n\nprint('Baseline test accuracy:', baseline_model_accuracy)\n\n_, keras_file = tempfile.mkstemp('.h5')\ntf.keras.models.save_model(model, keras_file, include_optimizer=False)\nprint('Saved baseline model to:', keras_file)", "プルーニングを使ってトレーニング済みのモデルを微調整する\nモデルを定義する\nモデル全体にプルーニングを適用し、モデルの概要でこれを確認します。\nこの例では、50% のスパース性(50% が重みゼロ)でモデルを開始し、80% のスパース性で終了します。\n総合ガイドでは、モデルの精度を改善するために、一部のレイヤーをプルーニングする方法をご覧いただけます。", "import tensorflow_model_optimization as tfmot\n\nprune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude\n\n# Compute end step to finish pruning after 2 epochs.\nbatch_size = 128\nepochs = 2\nvalidation_split = 0.1 # 10% of training set will be used for validation set. \n\nnum_images = train_images.shape[0] * (1 - validation_split)\nend_step = np.ceil(num_images / batch_size).astype(np.int32) * epochs\n\n# Define model for pruning.\npruning_params = {\n 'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0.50,\n final_sparsity=0.80,\n begin_step=0,\n end_step=end_step)\n}\n\nmodel_for_pruning = prune_low_magnitude(model, **pruning_params)\n\n# `prune_low_magnitude` requires a recompile.\nmodel_for_pruning.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nmodel_for_pruning.summary()", "モデルをトレーニングしてベースラインに対して評価する\n2 エポック、プルーニングを使って微調整します。\nトレーニング中は、tfmot.sparsity.keras.UpdatePruningStep が必要です。また、tfmot.sparsity.keras.PruningSummaries により、進捗状況の追跡とデバッグのログを得られます。", "logdir = tempfile.mkdtemp()\n\ncallbacks = [\n tfmot.sparsity.keras.UpdatePruningStep(),\n tfmot.sparsity.keras.PruningSummaries(log_dir=logdir),\n]\n \nmodel_for_pruning.fit(train_images, train_labels,\n batch_size=batch_size, epochs=epochs, validation_split=validation_split,\n callbacks=callbacks)", "この例では、ベースラインと比較し、プルーニング後のテスト精度に最小限の損失があります。", "_, model_for_pruning_accuracy = model_for_pruning.evaluate(\n test_images, test_labels, verbose=0)\n\nprint('Baseline test accuracy:', baseline_model_accuracy) \nprint('Pruned test accuracy:', model_for_pruning_accuracy)", "ログには、レイヤーごとのスパース性の進行状況が示されます。", "#docs_infra: no_execute\n%tensorboard --logdir={logdir}", "Colab を使用していないユーザーは、TensorBoard.dev で、このノートブックの前回の実行結果を閲覧できます。\nプルーニングによって 3 倍小さなモデルを作成する\ntfmot.sparsity.keras.strip_pruning と標準圧縮アルゴリズム(gzip など)の適用は、プルーニングの圧縮のメリットを確認する上で必要です。\n\nstrip_pruning は、トレーニング時にのみプルーニングが必要とするすべての tf.Variable を除去するため、必要です。そうでない場合、推論中にモデルのサイズが増大してしまいます。\nシリアル化された重み行列はプルーニング前と同じサイズであるため、標準の圧縮アルゴリズムの適用が必要です。ただし、プルーニングによってほとんどの重みがゼロになるため、モデルをさらに圧縮するためにアルゴリズムが使用できる冗長性が追加されます。\n\nまず、TensorFlow の圧縮可能なモデルを作成します。", "model_for_export = tfmot.sparsity.keras.strip_pruning(model_for_pruning)\n\n_, pruned_keras_file = tempfile.mkstemp('.h5')\ntf.keras.models.save_model(model_for_export, pruned_keras_file, include_optimizer=False)\nprint('Saved pruned Keras model to:', pruned_keras_file)", "次に、TFLite の圧縮可能なモデルを作成します。", "converter = tf.lite.TFLiteConverter.from_keras_model(model_for_export)\npruned_tflite_model = converter.convert()\n\n_, pruned_tflite_file = tempfile.mkstemp('.tflite')\n\nwith open(pruned_tflite_file, 'wb') as f:\n f.write(pruned_tflite_model)\n\nprint('Saved pruned TFLite model to:', pruned_tflite_file)", "実際に gzip でモデルを圧縮し、zip 圧縮されたサイズを測定するヘルパー関数を定義します。", "def get_gzipped_model_size(file):\n # Returns size of gzipped model, in bytes.\n import os\n import zipfile\n\n _, zipped_file = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(zipped_file, 'w', compression=zipfile.ZIP_DEFLATED) as f:\n f.write(file)\n\n return os.path.getsize(zipped_file)", "比較して、モデルがプルーニングによって 3 倍小さくなっていることを確認します。", "print(\"Size of gzipped baseline Keras model: %.2f bytes\" % (get_gzipped_model_size(keras_file)))\nprint(\"Size of gzipped pruned Keras model: %.2f bytes\" % (get_gzipped_model_size(pruned_keras_file)))\nprint(\"Size of gzipped pruned TFlite model: %.2f bytes\" % (get_gzipped_model_size(pruned_tflite_file)))", "プルーニングとポストトレーニング量子化を組み合わせて、10倍 小さなモデルを作成する\nさらにメリットを得るために、ポストトレーニング量子化をプルーニングされたモデルに適用できます。", "converter = tf.lite.TFLiteConverter.from_keras_model(model_for_export)\nconverter.optimizations = [tf.lite.Optimize.DEFAULT]\nquantized_and_pruned_tflite_model = converter.convert()\n\n_, quantized_and_pruned_tflite_file = tempfile.mkstemp('.tflite')\n\nwith open(quantized_and_pruned_tflite_file, 'wb') as f:\n f.write(quantized_and_pruned_tflite_model)\n\nprint('Saved quantized and pruned TFLite model to:', quantized_and_pruned_tflite_file)\n\nprint(\"Size of gzipped baseline Keras model: %.2f bytes\" % (get_gzipped_model_size(keras_file)))\nprint(\"Size of gzipped pruned and quantized TFlite model: %.2f bytes\" % (get_gzipped_model_size(quantized_and_pruned_tflite_file)))", "TF から TFLite への精度の永続性を確認する\nテストデータセットで TFLite モデルを評価するヘルパー関数を定義します。", "import numpy as np\n\ndef evaluate_model(interpreter):\n input_index = interpreter.get_input_details()[0][\"index\"]\n output_index = interpreter.get_output_details()[0][\"index\"]\n\n # Run predictions on ever y image in the \"test\" dataset.\n prediction_digits = []\n for i, test_image in enumerate(test_images):\n if i % 1000 == 0:\n print('Evaluated on {n} results so far.'.format(n=i))\n # Pre-processing: add batch dimension and convert to float32 to match with\n # the model's input data format.\n test_image = np.expand_dims(test_image, axis=0).astype(np.float32)\n interpreter.set_tensor(input_index, test_image)\n\n # Run inference.\n interpreter.invoke()\n\n # Post-processing: remove batch dimension and find the digit with highest\n # probability.\n output = interpreter.tensor(output_index)\n digit = np.argmax(output()[0])\n prediction_digits.append(digit)\n\n print('\\n')\n # Compare prediction results with ground truth labels to calculate accuracy.\n prediction_digits = np.array(prediction_digits)\n accuracy = (prediction_digits == test_labels).mean()\n return accuracy", "プルーニングされ、量子化されたモデルを評価し、TensorFlow の精度が TFLite バックエンドに持続されていることを確認します。", "interpreter = tf.lite.Interpreter(model_content=quantized_and_pruned_tflite_model)\ninterpreter.allocate_tensors()\n\ntest_accuracy = evaluate_model(interpreter)\n\nprint('Pruned and quantized TFLite test_accuracy:', test_accuracy)\nprint('Pruned TF test accuracy:', model_for_pruning_accuracy)", "まとめ\nこのチュートリアルでは、TensorFlow と TFLite の両方に TensorFlow Model Optimization Toolkit API を使用してスパースモデルを作成する方法を確認しました。また、プルーニングとポストトレーニング量子化を組み合わせて、さらにメリットを得ることを確認しました。\n精度の損失を最小限に抑えて、10 倍小さい MNIST のモデルを作成しました。\nこの新しい機能をぜひお試しください。リソースが制限される環境でのデプロイにおいて、特に重要となります。" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
AndreySheka/dl_ekb
hw5/Seminar5.ipynb
mit
[ "Week6\nIn this part, we'll load a pre-trained network and play with it.", "from __future__ import print_function\nfrom sys import version_info\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport scipy\nimport theano\nimport theano.tensor as T\nimport lasagne\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\n%matplotlib inline\n\nfrom scipy.misc import imread, imsave, imresize\nfrom lasagne.utils import floatX", "Model Zoo (4 pts)\nLasagne has a plethora of pre-training netrworks in the model zoo\n* Even more models within the community (neighbor repos, PRs, etc.)\nWe'll start by picking VGG16 and deploying it in our notebook.\nWarning! VGG16 network requires around 3GB of memory to predict event for single-image batch. If you don't have that luxury, try binder or azure notebooks.", "!wget https://s3.amazonaws.com/lasagne/recipes/pretrained/imagenet/vgg16.pkl -O weights.pkl\n\n# copyright: see http://www.robots.ox.ac.uk/~vgg/research/very_deep/\n\n\nfrom lasagne.layers import InputLayer\nfrom lasagne.layers import DenseLayer\nfrom lasagne.layers import NonlinearityLayer\nfrom lasagne.layers import DropoutLayer\nfrom lasagne.layers import Pool2DLayer as PoolLayer\nfrom lasagne.layers import Conv2DLayer as ConvLayer\nfrom lasagne.nonlinearities import softmax\n\n\ndef build_model():\n <paste network architecture here>\n return net\n\n#classes' names are stored here\nclasses = pickle.load(open('classes.pkl', 'rb'))\n#for example, 10th class is ostrich:\nprint(classes[9])", "You have to implement two functions in the cell below.\nPreprocess function should take the image with shape (w, h, 3) and transform it into a tensor with shape (1, 3, 224, 224). Without this transformation, our net won't be able to digest input image. \nAdditionally, your preprocessing function have to rearrange channels RGB -> BGR and subtract mean values from every channel.", "MEAN_VALUES = np.array([104, 117, 123])\nIMAGE_W = 224\n\ndef preprocess(img):\n img = <convert RGB to BGR>\n \n img = <substract mean>\n \n #convert from [w,h,3 to 1,3,w,h]\n img = np.transpose(img, (2, 0, 1))[None]\n return floatX(img)\n\ndef deprocess(img):\n img = img.reshape(img.shape[1:]).transpose((1, 2, 0))\n for i in range(3):\n img[:,:, i] += MEAN_VALUES[i]\n return img[:, :, :: -1].astype(np.uint8)\n\nimg = (np.random.rand(IMAGE_W, IMAGE_W, 3) * 256).astype(np.uint8)\n\nprint(np.linalg.norm(deprocess(preprocess(img)) - img))", "If your implementation is correct, the number above will be small, because deprocess function is the inverse of preprocess function\nDeploy the network", "net = build_model()\n\nwith open('weights.pkl', 'rb') as f:\n if version_info.major == 2:\n weights = pickle.load(f)\n elif version_info.major == 3:\n weights = pickle.load(f, encoding='latin1')\n \n<load weights into the network>\n\ninput_image = T.tensor4('input')\noutput = lasagne.layers.get_output(net[<which layer>], input_image)\n\nprob = theano.function([input_image], output) ", "Sanity check\nLet's make sure our network actually works. \nTo do so, we'll feed it with some example images.", "img = imread('sample_images/albatross.jpg')\nplt.imshow(img)\nplt.show()\n\np = prob(preprocess(img))\n\nlabels = p.ravel().argsort()[-1:-6:-1]\nprint('top-5 classes are:')\nfor l in labels:\n print('%3f\\t%s' % (p.ravel()[l], classes[l].split(',')[0]))", "Ouch!\nTry running network 2-3 times. If output changes, then we've probably done something wrong.\nFigure out, what's the problem with the network.\nhint there are two such 'problematic' layers in vgg16. They're all near the end.\nYou can make network deterministic by giving it such flag in the lasagne.layers.get_output function above.\nFun opportunity\nImageNet does not contain any human classes, so if you feed the network with some human photo, it will most likely hallucinate something which is closest to your image.\nTry feeding the network with something peculiar: your avatar, Donald Trump, Victor Lempitsky or anyone.\nGrand-quest: Dogs Vs Cats (6 pts)\n\noriginal competition\nhttps://www.kaggle.com/c/dogs-vs-cats\n25k JPEG images of various size, 2 classes (guess what)\n\nYour main objective\n\nIn this seminar your goal is to fine-tune a pre-trained model to distinguish between the two rivaling animals\nThe first step is to just reuse some network layer as features", "!wget https://www.dropbox.com/s/d61lupw909hc785/dogs_vs_cats.train.zip?dl=1 -O data.zip\n!unzip data.zip\n#you may need to adjust paths in the next section, depending on your OS", "for starters\n\nTrain sklearn model, evaluate validation accuracy (should be >80%", "#extract features from images\nfrom tqdm import tqdm\nfrom scipy.misc import imresize\n\nX = []\nY = []\n\n#this may be a tedious process. If so, store the results in some pickle and re-use them.\nfor fname in tqdm(os.listdir('train/')):\n y = fname.startswith(\"cat\")\n img = imread(\"train/\"+fname)\n img = preprocess(imresize(img,(IMAGE_W,IMAGE_W)))\n features = <preprocess the image into features)\n Y.append(y)\n X.append(features)\n\n\nX = np.concatenate(X) #stack all [1xfeature] matrices into one. \nassert X.ndim==2\n#WARNING! the concatenate works for [1xN] matrices. If you have other format, stack them yourself.\n\n#crop if we ended prematurely\nY = Y[:len(X)]\n\nfrom sklearn.cross_validation import train_test_split\n\n<split data either here or by cross-validation>", "load our dakka", "from sklearn.ensemble import RandomForestClassifier,ExtraTreesClassifier,GradientBoostingClassifier,AdaBoostClassifier\nfrom sklearn.linear_model import LogisticRegression, RidgeClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier", "Main quest\n\nGet the score improved!\n\nNo methods are illegal: ensembling, data augmentation, NN hacks. \nJust don't let test data slip into training.\nThe main requirement is that you implement the NN fine-tuning recipe:\nSplit the raw image data\n\nplease do train/validation/test instead of just train/test\nreasonable but not optimal split is 20k/2.5k/2.5k or 15k/5k/5k\n\nChoose which vgg layers are you going to use\n\nAnything but for prob is okay\nDo not forget that vgg16 uses dropout\n\nBuild a few layers on top of chosen \"neck\" layers.\n\na good idea is to just stack more layers inside the same network\nalternative: stack on top of get_output\n\nTrain the newly added layers for some iterations\n\nyou can selectively train some weights by only sending them to your optimizer\nlasagne.updates.mysupermegaoptimizer(loss, only_those_weights_i_wanna_train)\n\n\nselecting all weights from the head but not below the neck:\nall_params = lasagne.layers.get_all_params(new_output_layer_or_layers,trainable=True)\nold_params= lasagne.layers.get_all_params(neck_layers,trainable=True)\nnew_params = [w for w in all_params if w not in old_params]\n\n\nit's cruicial to monitor the network performance at this and following steps\n\nFine-tune the network body\n\nprobably a good idea to SAVE your new network weights now 'cuz it's easy to mess things up.\nMoreover, saving weights periodically is a no-nonsense idea\neven more cruicial to monitor validation performance\nmain network body may need a separate, much lower learning rate\nsince updates are dictionaries, one can just compute union\nupdates = {}\nupdates.update(lasagne.updates.how_i_optimize_old_weights())\nupdates.update(lasagne.updates.how_i_optimize_old_weights())\nmake sure they do not have overlapping keys. Otherwise, earlier one will be forgotten.\nassert len(updates) == len(old_updates) + len(new_updates)\n\n\n\nPROFIT!!!\n\nEvaluate the final score\nSubmit to kaggle\ncompetition page https://www.kaggle.com/c/dogs-vs-cats\nget test data https://www.kaggle.com/c/dogs-vs-cats/data\n\n\n\nSome ways to get bonus points\n\nexplore other networks from the model zoo\nplay with architecture\n85%/90%/93%/95%/97% kaggle score (screen pls).\ndata augmentation, prediction-time data augmentation\nuse any more advanced fine-tuning technique you know/read anywhere\nml hacks that benefit the final score", "print(\"I can do it!\")" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ML4DS/ML4all
C5.Classification_NN/NeuralNetworks_professor.ipynb
mit
[ "<font color='teal'> Introduction to Neural Networks and Pytorch </font>\nNotebook version: 0.2. (Nov 5, 2021)\n\nAuthors: Jerónimo Arenas García (jarenas@ing.uc3m.es)\n Jesús Cid-Sueiro (jcid@tsc.uc3m.es)\n\nChanges: v.0.1. (Nov 14, 2020) - First version\n v.0.2. (Nov 5, 2021) - Structuring code, revisiting formulation\n\nPending changes:\n Use epochs instead of iters in first part of notebook\n Add an example with dropout\n Add theory about CNNs\n Define some functions to simplify code cells", "import numpy as np\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\n\nsize = 18\nparams = {'legend.fontsize': 'Large',\n 'axes.labelsize': size,\n 'axes.titlesize': size,\n 'xtick.labelsize': size*0.75,\n 'ytick.labelsize': size*0.75}\nplt.rcParams.update(params)", "<font color='teal'> 1. Introduction and purpose of this Notebook </font>\n<font color='teal'> 1.1. About Neural Networks </font>\n\n\nNeural Networks (NN) have become the state of the art for many machine learning problems\n\nNatural Language Processing\nComputer Vision\nImage Recognition\n\n\n\nThey are in widespread use for many applications, e.g.,\n\nLanguage translation (<a href=\"https://arxiv.org/pdf/1609.08144.pdf\">Google Neural Machine Translation System</a>) \nAutomatic speech recognition (<a href=\"https://machinelearning.apple.com/research/hey-siri\">Hey Siri!</a> DNN overview)\nAutonomous navigation (<a href=\"https://venturebeat.com/2020/04/13/facebooks-ai-teaches-robots-to-navigate-environments-using-less-data/\">Facebook Robot Autonomous 3D Navigation</a>)\nAutomatic plate recognition\n\n\n\n<center><img src=\"figures/ComputerVision.png\" /></center>\nFeed Forward Neural Networks have been around since 1960 but only recently (last 10-12 years) have they met their expectations, and improve other machine learning algorithms\n\nComputation resources are now available at large scale\nCloud Computing (AWS, Azure)\nFrom MultiLayer Perceptrons to Deep Learning\nBig Data sets\nThis has also made possible an intense research effort resulting in\nTopologies better suited to particular problems (CNNs, RNNs)\nNew training strategies providing better generalization\n\n\n\nIn parallel, Deep Learning Platforms have emerged that make design, implementation, training, and production of DNNs feasible for everyone\n<font color='teal'> 1.2. Scope</font>\n\nTo provide just an overview of most important NNs and DNNs concepts\nConnecting with already studied methods as starting point\nIntroduction to PyTorch\nProviding links to external sources for further study\n\n<font color='teal'> 1.3. Outline</font>\n\nIntroduction and purpose of this Notebook\nIntroduction to Neural Networks\nImplementing Deep Networks with PyTorch\n\n<font color='teal'> 1.4. Other resources </font>\n\nWe point here to external resources and tutorials that are excellent material for further study of the topic\nMost of them include examples and exercises using numpy and PyTorch\nThis notebook uses examples and other material from some of these sources\n\n|Tutorial|Description|\n|-----|---------------------|\n|<a href=\"https://www.simplilearn.com/tutorials/deep-learning-tutorial\"> <img src=\"figures/simplilearn.png\" width=\"100\"/> </a>|Very general tutorial including videos and an overview of top deep learning platforms|\n|<a href=\"http://d2l.ai/\"> <img src=\"figures/dl2ai.png\" width=\"100\"/> </a>|Very complete book with a lot of theory and examples for MxNET, PyTorch, and TensorFlow|\n|<a href=\"https://pytorch.org/tutorials/\"> <img src=\"figures/PyTorch.png\" width=\"100\"/> </a>|Official tutorials from the PyTorch project. Contains a 60 min overview, and a very practical learning PyTorch with examples tutorial|\n|<a href=\"https://www.kaggle.com/kanncaa1/deep-learning-tutorial-for-beginners\"> <img src=\"figures/kaggle.png\" width=\"100\"/> </a>|Kaggle tutorials covering an introduction to Neural Networks using Numpy, and a second one offering a PyTorch tutorial|\nIn addition to this, PyTorch MOOCs can be followed for free in main sites: edX, Coursera, Udacity\n<font color='teal'> 2. Introduction to Neural Networks </font>\nIn this section, we will implement neural networks from scratch using Numpy arrays\n\nNo need to learn any new Python libraries\nBut we need to deal with complexity of multilayer networks\nLow-level implementation will be useful to grasp the most important concepts concerning DNNs\nBack-propagation\nActivation functions\nLoss functions\nOptimization methods\nGeneralization\nSpecial layers and configurations\n\n\n\n<font color='teal'> 2.1. Data preparation </font>\nWe start by loading some data sets that will be used to carry out the exercises\n<font color='olive'>Sign language digits data set</font>\n\nDataset is taken from <a href=\"https://www.kaggle.com/ardamavi/sign-language-digits-dataset\"> Kaggle</a> and used in the above referred tutorial\n2062 digits in sign language. $64 \\times 64$ images\nProblem with 10 classes. One hot encoding for the label matrix\nInput data are images, we create also a flattened version", "digitsX = np.load('./data/Sign-language-digits-dataset/X.npy')\ndigitsY = np.load('./data/Sign-language-digits-dataset/Y.npy')\nK = digitsX.shape[0]\nimg_size = digitsX.shape[1]\ndigitsX_flatten = digitsX.reshape(K,img_size*img_size)\n\nprint('Size of Input Data Matrix:', digitsX.shape)\nprint('Size of Flattned Input Data Matrix:', digitsX_flatten.shape)\nprint('Size of label Data Matrix:', digitsY.shape)\nselected = [260, 1400]\nplt.subplot(1, 2, 1), plt.imshow(digitsX[selected[0]].reshape(img_size, img_size)), plt.axis('off')\nplt.subplot(1, 2, 2), plt.imshow(digitsX[selected[1]].reshape(img_size, img_size)), plt.axis('off')\nplt.show()\nprint('Labels corresponding to figures:', digitsY[selected,])", "<font color='olive'>Dogs vs Cats data set</font>\n\nDataset is taken from <a href=\"https://www.kaggle.com/c/dogs-vs-cats\"> Kaggle</a>\n25000 pictures of dogs and cats\nBinary problem\nInput data are images, we create also a flattened version\nOriginal images are RGB, and arbitrary size\nPreprocessed images are $64 \\times 64$ and gray scale", "# Preprocessing of original Dogs and Cats Pictures\n# Adapted from https://medium.com/@mrgarg.rajat/kaggle-dogs-vs-cats-challenge-complete-step-by-step-guide-part-1-a347194e55b1\n# RGB channels are collapsed in GRAYSCALE\n# Images are resampled to 64x64\n\n\"\"\"\nimport os, cv2 # cv2 -- OpenCV\n\ntrain_dir = './data/DogsCats/train/'\nrows = 64\ncols = 64\ntrain_images = sorted([train_dir+i for i in os.listdir(train_dir)])\n\ndef read_image(file_path):\n image = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)\n return cv2.resize(image, (rows, cols),interpolation=cv2.INTER_CUBIC)\n\ndef prep_data(images):\n m = len(images)\n X = np.ndarray((m, rows, cols), dtype=np.uint8)\n y = np.zeros((m,))\n print(\"X.shape is {}\".format(X.shape))\n \n for i,image_file in enumerate(images) :\n image = read_image(image_file)\n X[i,] = np.squeeze(image.reshape((rows, cols)))\n if 'dog' in image_file.split('/')[-1].lower():\n y[i] = 1\n elif 'cat' in image_file.split('/')[-1].lower():\n y[i] = 0\n \n if i%5000 == 0 :\n print(\"Proceed {} of {}\".format(i, m))\n \n return X,y\n\nX_train, y_train = prep_data(train_images)\nnp.save('./data/DogsCats/X.npy', X_train)\nnp.save('./data/DogsCats/Y.npy', y_train)\n\"\"\"\n\nDogsCatsX = np.load('./data/DogsCats/X.npy')\nDogsCatsY = np.load('./data/DogsCats/Y.npy')\nK = DogsCatsX.shape[0]\nimg_size = DogsCatsX.shape[1]\nDogsCatsX_flatten = DogsCatsX.reshape(K,img_size*img_size)\n\nprint('Size of Input Data Matrix:', DogsCatsX.shape)\nprint('Size of Flattned Input Data Matrix:', DogsCatsX_flatten.shape)\nprint('Size of label Data Matrix:', DogsCatsY.shape)\nselected = [260, 16000]\nplt.subplot(1, 2, 1), plt.imshow(DogsCatsX[selected[0]].reshape(img_size, img_size)), plt.axis('off')\nplt.subplot(1, 2, 2), plt.imshow(DogsCatsX[selected[1]].reshape(img_size, img_size)), plt.axis('off')\nplt.show()\nprint('Labels corresponding to figures:', DogsCatsY[selected,])", "<font color='teal'> 2.2. Logistic Regression as a Simple Neural Network </font>\n\nWe can consider logistic regression as an extremely simple (1 layer) neural network\n\n<center><img src=\"figures/LR_network.png\" width=\"600\"/></center>\n\n\nIn this context, $\\text{NLL}({\\bf w})$ is normally referred to as cross-entropy loss\n\n\nWe need to find parameters $\\bf w$ and $b$ to minimize the loss $\\rightarrow$ GD / SGD\n\nGradient computation can be simplified using the <font color='navy'>chain rule</font>\n\n<br>\n\\begin{align}\n\\frac{\\partial \\text{NLL}}{\\partial {\\bf w}} & = \\frac{\\partial \\text{NLL}}{\\partial {\\hat y}} \\cdot \\frac{\\partial \\hat y}{\\partial o} \\cdot \\frac{\\partial o}{\\partial {\\bf w}} \\\n& = \\sum_{k=0}^{K-1} \\left[\\frac{1 - y_k}{1 - \\hat y_k} - \\frac{y_k}{\\hat y_k}\\right]\\hat y_k (1-\\hat y_k) {\\bf x}k \\\n& = \\sum{k=0}^{K-1} \\left[\\hat y_k - y_k\\right] {\\bf x}k \\\n\\frac{\\partial \\text{NLL}}{\\partial b} & = \\sum{k=0}^{K-1} \\left[\\hat y_k - y_k \\right]\n\\end{align}\n\nGradient Descent Optimization\n\n<br>\n$${\\bf w}{n+1} = {\\bf w}_n + \\rho_n \\sum{k=0}^{K-1} (y_k - \\hat y_k){\\bf x}k$$\n$$b{n+1} = b_n + \\rho_n \\sum_{k=0}^{K-1} (y_k - \\hat y_k)$$", "from sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\n\ndef get_dataset(dataset_name, forze_binary=False):\n \"\"\"\n Loads the selected dataset, among two options: DogsCats or digits.\n \n If dataset_name == 'digits', you can take a dataset with two classes only,\n using forze_binary == True\n \"\"\"\n\n if dataset_name == 'DogsCats':\n X = DogsCatsX_flatten\n y = DogsCatsY\n elif dataset_name == 'digits':\n if forze_binary:\n #Zero and Ones are one hot encoded in columns 1 and 4\n X0 = digitsX_flatten[np.argmax(digitsY, axis=1)==1,]\n X1 = digitsX_flatten[np.argmax(digitsY, axis=1)==4,]\n X = np.vstack((X0, X1))\n y = np.zeros(X.shape[0])\n y[X0.shape[0]:] = 1\n else:\n X = digitsX_flatten\n y = digitsY\n else:\n print(\"-- ERROR: Unknown dataset\")\n return\n \n # Joint normalization of all data. For images [-.5, .5] scaling is frequent\n min_max_scaler = MinMaxScaler(feature_range=(-.5, .5))\n X = min_max_scaler.fit_transform(X)\n\n # Generate train and validation data, shuffle\n X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42, shuffle=True)\n \n return X_train, X_val, y_train, y_val\n\n# Define some useful functions\ndef logistic(t):\n \"\"\" \n Computes the logistic function\n \"\"\"\n return 1.0 / (1 + np.exp(-t))\n\ndef forward(w,b,x):\n \"\"\"\n Computes the network output\n \"\"\"\n # return logistic(x.dot(w) + b)\n return logistic(x @ w + b)\n\ndef backward(y, y_hat, x):\n \"\"\"\n Computes the gradient of the loss function for a single sample x with\n ouput y_hat, given label y.\n \"\"\"\n # w_grad = x.T.dot((1-y)*y_hat - y*(1-y_hat))/len(y)\n # b_grad = np.sum((1-y)*y_hat - y*(1-y_hat))/len(y)\n w_grad = x.T @ (y_hat - y) / len(y)\n b_grad = np.mean(y_hat - y)\n return w_grad, b_grad\n \ndef accuracy(y, y_hat):\n return np.mean(y == (y_hat >= 0.5))\n\ndef loss(y, y_hat):\n return - (y @ np.log(y_hat) + (1 - y) @ np.log(1 - y_hat)) / len(y)\n\nX_train, X_val, y_train, y_val = get_dataset('digits', forze_binary=True)\n\n#Neural Network Training\nepochs = 50\nrho = .05 # Use this setting for Sign Digits Dataset\n\n#Parameter initialization\nw = .1 * np.random.randn(X_train.shape[1])\nb = .1 * np.random.randn(1)\n\nloss_train = np.zeros(epochs)\nloss_val = np.zeros(epochs)\nacc_train = np.zeros(epochs)\nacc_val = np.zeros(epochs)\n\nfor epoch in np.arange(epochs):\n y_hat_train = forward(w, b, X_train)\n y_hat_val = forward(w, b, X_val)\n w_grad, b_grad = backward(y_train, y_hat_train, X_train)\n w = w - rho * w_grad\n b = b - rho * b_grad\n \n loss_train[epoch] = loss(y_train, y_hat_train)\n loss_val[epoch] = loss(y_val, y_hat_val)\n acc_train[epoch] = accuracy(y_train, y_hat_train)\n acc_val[epoch] = accuracy(y_val, y_hat_val)\n\nplt.figure(figsize=(14,5))\nplt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')\nplt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')\nplt.show()", "<font color='olive'>Exercise</font>\n\n\nStudy the behavior of the algorithm changing the number of epochs and the learning rate\n\n\nRepeat the analysis for the other dataset, trying to obtain as large an accuracy value as possible\n\n\nWhat do you believe are the reasons for the very different performance for both datasets?\n\n\nLinear logistic regression allowed us to review a few concepts that are key for Neural Networks:\n\nNetwork topology (In this case, a linear network with one layer)\nActivation functions\nParametric approach ($\\bf w$/$b$)\nParameter initialization\nObtaining the network prediction using forward computation\nLoss function\nParameter gradient calculus using backward computation\nOptimization method for parameters update (here, GD)\n\n<font color='teal'> 2.3. (Multiclass) SoftMax Regression </font>\n\nOne hot encoding output, e.g., $[0, 1, 0, 0]$, $[0, 0, 0, 1]$\nUsed to encode categorial variables without predefined order\nSimilar to logistic regression, network tries to predict class probability\n$$\\hat y_{k,j} = \\hat P(y_k=j|{\\bf x}_k)$$\n\nNetwork output should satisfy \"probability constraints\"\n$$\\hat y_{k,j} \\in [0,1]\\qquad \\text{and} \\qquad \\sum_j \\hat y_{k,j} = 1$$\n\n\nSoftmax regression network topology:\n<img src=\"figures/SR_network.png\" width=\"600\"/>\n\n\n<font color='olive'>Notation</font>\nIn this section, it is important to pay attention to subindexes:\n|Notation/ Variable Name|Definition|\n|-----------------------|---------------------------------|\n|$y_k \\in [0,\\dots,M-1]$|The label of pattern $k$|\n|${\\bf y}k$|One hot encoding of the label of pattern $k$|\n|$y{k,m}$|$m$-th component of vector ${\\bf y}k$|\n|$y{m}$|$m$-th component of generic vector ${\\bf y}$ (i.e., for an undefined pattern)|\n|$\\hat {\\bf y}k$|Network output for pattern $k$|\n|$\\hat y{k,m}$|$m$-th network output for pattern $k$|\n|$\\hat y_{m}$|$m$-th network output for an undefined pattern)|\n|$k$|Index used for pattern enumeration|\n|$m$|Index used for network output enumeration|\n|$j$|Secondary index for selected network output|\n<font color='olive'>The softmax function</font>\n\nIt is to multiclass problems as the logistic function for binary classification\nInvented in 1959 by the social scientist R. Duncan Luce\nTransforms a set of $M$ real numbers to satisfy \"probability\" constraints\n\n<br>\n$${\\bf \\hat y} = \\text{softmax}({\\bf o}) \\qquad \\text{where} \\qquad \\hat y_j = \\frac{\\exp(o_j)}{\\sum_m \\exp(o_m)} $$\n\nContinuous and <font color=\"navy\">differentiable</font> function\n\n<br>\n$$\\frac{\\partial \\hat y_j}{\\partial o_j} = \\hat y_j (1 - \\hat y_j) \\qquad \\text{and} \\qquad \\frac{\\partial \\hat y_j}{\\partial o_m} = - \\hat y_j \\hat y_m$$\n\nThe classifier is still linear, since\n\n<br>\n$$\\arg\\max \\hat {\\bf y} = \\arg\\max \\hat {\\bf o} = \\arg\\max {{\\bf W} {\\bf x} + {\\bf b}}$$\n<font color='olive'>Cross-entropy loss for multiclass problems</font>\n\nSimilarly to logistic regression, minimization of the log-likelihood can be stated to obtain ${\\bf W}$ and ${\\bf b}$\n\n<br>\n$$\\text{Binary}: \\text{NLL}({\\bf w}, b) = - \\sum_{k=0}^{K-1} \\log \\hat P(y_k|{\\bf x}k)$$\n$$\\text{Multiclass}: \\text{NLL}({\\bf W}, {\\bf b}) = - \\sum{k=0}^{K-1} \\log \\hat P(y_k|{\\bf x}_k)$$\n\nUsing one hot encoding for the label vector of each sample, e.g., $y_k = 2 \\rightarrow {\\bf y}_k = [0, 0, 1, 0]$\n\n$$\\text{NLL}({\\bf W}, {\\bf b}) = - \\sum_{k=0}^{K-1} \\sum_{m=0}^{M-1} y_{k,m} \\log \\hat P(m|{\\bf x}k)= - \\sum{k=0}^{K-1} \\sum_{m=0}^{M-1} y_{k,m} \\log \\hat y_{k,m} = \\sum_{k=0}^{K-1} l({\\bf y}_k, \\hat {\\bf y}_k)$$\n\n\nNote that for each pattern, only one element in the inner sum (the one indexed with $m$) is non-zero\n\n\nIn the context of Neural Networks, this cost is referred to as the cross-entropy loss\n\n\n<br>\n$$l({\\bf y}, \\hat {\\bf y}) = - \\sum_{m=0}^{M-1} y_{m} \\log \\hat y_{m}$$\n<font color='olive'>Network optimization</font>\n\nGradient Descent Optimization\n\n<br>\n$${\\bf W}{n+1} = {\\bf W}_n - \\rho_n \\sum{k=0}^{K-1} \\frac{\\partial l({\\bf y}k,{\\hat {\\bf y}_k})}{\\partial {\\bf W}}$$\n$${\\bf b}{n+1} = {\\bf b}n - \\rho_n \\sum{k=0}^{K-1} \\frac{\\partial l({\\bf y}_k,{\\hat {\\bf y}_k})}{\\partial {\\bf b}}$$\n\nWe compute derivatives using the chain rule twice\n\n<br>\n\\begin{align}\n\\frac{\\partial l({\\bf y}, \\hat{\\bf y})}{\\partial {\\bf W}}\n &= \\frac{\\partial l({\\bf y}, \\hat{\\bf y})}{\\partial {\\bf o}} \n \\cdot \\frac{\\partial {\\bf o}}{\\partial {\\bf W}} \\ \n &= \\sum_{i=0}^{M-1}\n \\frac{\\partial l({\\bf y}, \\hat{\\bf y})}{\\partial o_i} \n \\cdot \\frac{\\partial o_i}{\\partial {\\bf W}} \\ \n &= \\frac{\\partial l({\\bf y}, \\hat{\\bf y})}{\\partial {\\bf o}} \n \\cdot {\\bf x}^\\intercal \\ \n &= \\frac{\\partial \\hat{\\bf y}}{\\partial {\\bf o}} \n \\cdot \\frac{\\partial l({\\bf y}, \\hat{\\bf y})}{\\partial \\hat{\\bf y}}\n \\cdot {\\bf x}^\\intercal \\ \n & = \\left[\\begin{array}{ccccc}\n \\hat y_1 (1 - \\hat y_1) & -\\hat y_1 \\hat y_2 & \\dots & -\\hat y_1 \\hat y_{M-1} \\ \n -\\hat y_2 \\hat y_1 & \\hat y_2 (1 - \\hat y_2) & \\dots & -\\hat y_2 \\hat y_{M-1} \\\n \\vdots & \\vdots & \\ddots & \\vdots \\\n - \\hat y_{M-1} \\hat y_1 & -\\hat y_{M-1} \\hat y_2 & \\dots & \\hat y_{M-1} (1-\\hat y_{M-1})\n \\end{array}\\right] \n \\left[\\begin{array}{c} -y_1/\\hat y_1 \\ -y_2/\\hat y_2 \\ \\vdots \\ - y_{M-1}/\\hat y_{M-1} \\end{array}\\right] \n {\\bf x}^\\top \\\n & = (\\hat {\\bf y} - {\\bf y}){\\bf x}^\\top \\\n\\\n\\frac{\\partial l({\\bf y},{\\hat {\\bf y}})}{\\partial {\\bf b}} \n & = \\hat {\\bf y} - {\\bf y}\n\\end{align}", "dataset = 'digits'\nX_train, X_val, y_train, y_val = get_dataset('digits')\n\n# Define some useful functions\ndef softmax(t):\n \"\"\"Compute softmax values for each sets of scores in t.\"\"\"\n e_t = np.exp(t)\n return e_t / e_t.sum(axis=1, keepdims=True)\n\ndef forward(w, b, x):\n # Calcula la salida de la red\n return softmax(x @ w.T + b.T)\n\ndef backward(y, y_hat, x):\n #Calcula los gradientes\n W_grad = (y_hat - y).T @ x / len(y)\n b_grad = (y_hat - y).T.mean(axis=1, keepdims=True)\n return W_grad, b_grad\n \ndef accuracy(y, y_hat):\n return np.mean(np.argmax(y, axis=1) == np.argmax(y_hat, axis=1))\n\ndef loss(y, y_hat):\n return - np.sum(y * np.log(y_hat)) / len(y)\n\n# Neural Network Training\n\nepochs = 300\nrho = .1\n\n#Parameter initialization\nW = .1 * np.random.randn(y_train.shape[1], X_train.shape[1])\nb = .1 * np.random.randn(y_train.shape[1], 1)\n\nloss_train = np.zeros(epochs)\nloss_val = np.zeros(epochs)\nacc_train = np.zeros(epochs)\nacc_val = np.zeros(epochs)\n\nfor epoch in np.arange(epochs):\n print(f\"Epoch {epoch} out of {epochs} \\r\", end=\"\")\n y_hat_train = forward(W, b, X_train)\n y_hat_val = forward(W, b, X_val)\n W_grad, b_grad = backward(y_train, y_hat_train, X_train)\n W = W - rho * W_grad\n b = b - rho * b_grad\n \n loss_train[epoch] = loss(y_train, y_hat_train)\n loss_val[epoch] = loss(y_val, y_hat_val)\n acc_train[epoch] = accuracy(y_train, y_hat_train)\n acc_val[epoch] = accuracy(y_val, y_hat_val)\n \n\nplt.figure(figsize=(14,5))\nplt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')\nplt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')\nplt.show()", "<font color='olive'>Exercise</font>\n\n\nStudy the behavior of the algorithm changing the number of iterations and the learning rate\n\n\nObtain the confusion matrix, and study which classes are more difficult to classify\n\n\nThink about the differences between using this 10-class network, vs training 10 binary classifiers, one for each class\n\n\nAs in linear logistic regression note that we covered the following aspects of neural network design, implementation, and training:\n\nNetwork topology (In this case, a linear network with one layer and $M$ ouptuts)\nActivation functions (softmax activation)\nParameter initialization ($\\bf W$/$b$)\nObtaining the network prediction using forward computation\nLoss function\nParameter gradient calculus using backward computation\nOptimization method for parameters update (here, GD)\n\n<font color='teal'> 2.3. Multi Layer Networks (Deep Networks) </font>\nPrevious networks are constrained in the sense that they can only implement linear classifiers. In this section we analyze how we can extend them to implement non-linear classification:\n* Fixed non-linear transformations of inputs: ${\\bf z} = {\\bf{f}}({\\bf x})$\n\n\nParametrize the transformation using additional non-linear layers\n<center><img src=\"figures/LR_MLPnetwork.png\" width=\"600\"/></center>\n\n\nWhen counting layers, we normally ignore the input layer, since there is no computation involved\n\nIntermediate layers are normally referred to as \"hidden\" layers\nNon-linear activations result in an overall non-linear classifier\nWe can still use Gradient Descent Optimization as long as the network loss derivatives with respect to all parameters exist and are continuous\nThis is already deep learning. We can have two layers or more, each with different numbers of neurons. But as long as derivatives with respect to parameters can be calculated, the network can be optimized\nFinding an appropriate number of layers for a particular problem, as well as the number of neurons per layer, requires exploration\nThe more data we have for training the network, the more parameters we can afford, making feasible the use of more complex topologies\n\n<font color='olive'>Example: 2-layer network for binary classification</font>\n\n\nNetwork topology\n\nHidden layer with $n_h$ neurons\nHyperbolic tangent activation function for the hidden layer\n$${\\bf h} = \\text{tanh}({\\bf o}^{(1)})= \\text{tanh}\\left({\\bf W}^{(1)} {\\bf x} + {\\bf b}^{(1)}\\right)$$\nOutput layer is linear with logistic activation (as in logistic regression)\n$$\\hat y = \\text{logistic}(o) = \\text{logistic}\\left({{\\bf w}^{(2)}}^\\top {\\bf h} + b^{(2)}\\right)$$\n\n\n\nCross-entropy loss\n\n\n$$l(y,\\hat y) = -\\left[ y \\log(\\hat y) + (1 - y ) \\log(1 - \\hat y) \\right], \\qquad \\text{with } y\\in [0,1]$$\n\nUpdate of output layer weights as in logistic regression (use ${\\bf h}$ instead of ${\\bf x}$)\n\n$${\\bf w}{n+1}^{(2)} = {\\bf w}_n^{(2)} + \\rho_n \\sum{k=0}^{K-1} (y_k - \\hat y_k){\\bf h}k$$\n$$b{n+1}^{(2)} = b_n^{(2)} + \\rho_n \\sum_{k=0}^{K-1} (y_k - \\hat y_k)$$\n<center><img src=\"figures/forward_graph.png\" width=\"500\"/></center>\n\nFor updating the input layer parameters we need to use the chain rule (we ignore dimensions and rearrange at the end)\n\n\\begin{align}\n\\frac{\\partial l(y, \\hat y)}{\\partial {\\bf W}^{(1)}} \n & = \\frac{\\partial l(y, \\hat y)}{\\partial o} \n \\cdot \\frac{\\partial o}{\\partial {\\bf h}} \n \\cdot \\frac{\\partial {\\bf h}}{\\partial {\\bf o}^{(1)}} \n \\cdot \\frac{\\partial {\\bf o}^{(1)}}{\\partial {\\bf W}^{(1)}} \\\n & = (\\hat y - y) [{\\bf w}^{(2)} \\odot ({\\bf 1}-{\\bf h})^2] {\\bf x}^{\\top}\n\\end{align}\n(note that $\\dfrac{\\partial {\\bf o}^{(1)}}{\\partial {\\bf W}^{(1)}}$ is actually a three dimensional matrix (i.e. a tensor). To apply the chain rule properly, the multiplications in the above equation must represent the adequate tensor products)\n\\begin{align}\n\\frac{\\partial l(y, \\hat y)}{\\partial {\\bf b}^{(1)}} \n & = \\frac{\\partial l(y, \\hat y)}{\\partial o} \n \\cdot \\frac{\\partial o}{\\partial {\\bf h}} \n \\cdot \\frac{\\partial {\\bf h}}{\\partial {\\bf o}^{(1)}} \n \\cdot \\frac{\\partial {\\bf o}^{(1)}}{\\partial {\\bf b}^{(1)}} \\\n& = (\\hat y - y) [{\\bf w}^{(2)} \\odot ({\\bf 1}-{\\bf h})^2]\n\\end{align}\nwhere $\\odot$ denotes component-wise multiplication and the square after $({\\bf 1}-{\\bf h})$ should be computed component-wise\n\nGD update rules become\n$${\\bf W}{n+1}^{(1)} = {\\bf W}_n^{(1)} + \\rho_n \\sum{k=0}^{K-1} (y_k - \\hat y_k)[{\\bf w}^{(2)} \\odot ({\\bf 1}-{\\bf h}k)^2] {\\bf x}_k^{\\top}$$\n$${\\bf b}{n+1}^{(1)} = {\\bf b}n^{(1)} + \\rho_n \\sum{k=0}^{K-1} (y_k - \\hat y_k)[{\\bf w}^{(2)} \\odot ({\\bf 1}-{\\bf h}_k)^2]$$\n\n<center><img src=\"figures/forward_graph.png\" width=\"500\"/></center>\n\n\nThe process can be implemented as long as the derivatives of the network overall loss with respect to parameters can be computed\n\n\nForward computation graphs represent how the network output can be computed\n\n\nWe can then reverse the graph to compute derivatives with respect to parameters\n\n\nDeep Learning libraries implement automatic gradient camputation\n\nWe just define network topology\nComputation of gradients is carried out automatically", "# Define some useful functions\ndef logistic(t):\n return 1.0 / (1 + np.exp(-t))\n\ndef forward(W1, b1, w2, b2, x):\n #Calcula la salida de la red\n h = x.dot(W1.T) + b1\n y_hat = logistic(h.dot(w2) + b2)\n #Provide also hidden units value for backward gradient step\n return h, y_hat\n\ndef backward(y, y_hat, h, x, w2):\n #Calcula los gradientes\n w2_grad = h.T.dot(y_hat - y) / len(y)\n b2_grad = np.sum(y_hat - y) / len(y)\n W1_grad = ((w2[np.newaxis,] * ((1 - h)**2) * (y_hat - y)[:,np.newaxis]).T.dot(x)) / len(y)\n b1_grad = ((w2[np.newaxis,] * ((1 - h)**2) * (y_hat - y)[:,np.newaxis]).sum(axis=0)) / len(y)\n return w2_grad, b2_grad, W1_grad, b1_grad\n \ndef accuracy(y, y_hat):\n return np.mean(y == (y_hat >= 0.5))\n\ndef loss(y, y_hat):\n return - np.sum(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat)) / len(y)\n\ndef evaluate_model(\n X_train, X_val, y_train, y_val, n_h=5, epochs=1000, rho=.005):\n \n W1 = .01 * np.random.randn(n_h, X_train.shape[1])\n b1 = .01 * np.random.randn(n_h)\n w2 = .01 * np.random.randn(n_h)\n b2 = .01 * np.random.randn(1)\n\n loss_train = np.zeros(epochs)\n loss_val = np.zeros(epochs)\n acc_train = np.zeros(epochs)\n acc_val = np.zeros(epochs)\n\n for epoch in np.arange(epochs):\n print(f'Current epoch: {epoch + 1} \\r', end=\"\") \n\n h, y_hat_train = forward(W1, b1, w2, b2, X_train)\n dum, y_hat_val = forward(W1, b1, w2, b2, X_val)\n w2_grad, b2_grad, W1_grad, b1_grad = backward(y_train, y_hat_train, h, X_train, w2)\n W1 = W1 - rho/10 * W1_grad\n b1 = b1 - rho/10 * b1_grad\n w2 = w2 - rho * w2_grad\n b2 = b2 - rho * b2_grad\n\n loss_train[epoch] = loss(y_train, y_hat_train)\n loss_val[epoch] = loss(y_val, y_hat_val)\n acc_train[epoch] = accuracy(y_train, y_hat_train)\n acc_val[epoch] = accuracy(y_val, y_hat_val)\n\n return loss_train, loss_val, acc_train, acc_val\n", "<font color='olive'>Results in Dogs vs Cats dataset ($epochs = 1000$ and $\\rho = 0.05$)</font>", "dataset = 'DogsCats'\n\nX_train, X_val, y_train, y_val = get_dataset(dataset)\nloss_train, loss_val, acc_train, acc_val = evaluate_model(\n X_train, X_val, y_train, y_val, n_h=5, epochs=1000, rho=0.05)\n\nplt.figure(figsize=(14,5))\nplt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')\nplt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')\nplt.show()", "<font color='olive'>Results in Binary Sign Digits Dataset ($epochs = 10000$ and $\\rho = 0.001$)</font>", "dataset = 'digits'\nX_train, X_val, y_train, y_val = get_dataset(dataset, forze_binary=True)\nloss_train, loss_val, acc_train, acc_val = evaluate_model(\n X_train, X_val, y_train, y_val, n_h=5, epochs=10000, rho=0.001)\n\nplt.figure(figsize=(14,5))\nplt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')\nplt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')\nplt.show()", "<font color='olive'>Exercises</font>\n\n\nTrain the network using other settings for:\n\nThe number of iterations\nThe learning step\nThe number of neurons in the hidden layer\n\n\n\nYou may find divergence issues for some settings\n\nRelated to the use of the hyperbolic tangent function in the hidden layer (numerical issues)\nThis is also why learning step was selected smaller for the hidden layer\nOptimized libraries rely on certain modifications to obtain more robust implementations\n\n\n\nTry to solve both problems using <a href=\"https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html\">scikit-learn implementation</a>\n\nYou can also explore other activation functions\nYou can also explore other solvers to speed up convergence\nYou can also adjust the size of minibatches\nTake a look at the early_stopping parameter\n\n\n\n<font color='teal'> 2.4. Multi Layer Networks for Regression </font>\n\n\nDeep Learning networks can be used to solve regression problems with the following common adjustments\n\n\nLinear activation for the output unit\n\n\nSquare loss: \n$$l(y, \\hat y) = (y - \\hat y)^2, \\qquad \\text{where} \\qquad y, \\hat y \\in \\Re$$\n\n\n\n\n<font color='teal'> 2.5. Activation Functions</font>\nYou can refer to the <a href=\"http://d2l.ai/chapter_multilayer-perceptrons/mlp.html#activation-functions\">Dive into Deep Learning book</a> for a more detailed discussion on common actiation functions for the hidden units. \nWe extract some information about the very important ReLU function\n\nThe most popular choice, due to both simplicity of implementation and its good performance on a variety of predictive tasks, is the rectified linear unit (ReLU). ReLU provides a very simple nonlinear transformation. Given an element $x$, the function is defined as the maximum of that element and 0.\nWhen the input is negative, the derivative of the ReLU function is 0, and when the input is positive, the derivative of the ReLU function is 1. When the input takes value precisely equal to 0, we say that the derivative is 0 when the input is 0.\nThe reason for using ReLU is that its derivatives are particularly well behaved: either they vanish or they just let the argument through. This makes optimization better behaved and it mitigated the well-documented problem of vanishing gradients that plagued previous versions of neural networks.", "x_array = np.linspace(-6,6,100)\ny_array = np.clip(x_array, 0, a_max=None)\nplt.plot(x_array, y_array)\nplt.title('ReLU activation function')\nplt.show()", "<font color='teal'> 3. Implementing Deep Networks with PyTorch </font>\n\n\nPytorch is a Python library that provides different levels of abstraction for implementing deep neural networks\n\n\nThe main features of PyTorch are:\n\nDefinition of numpy-like n-dimensional tensors. They can be stored in / moved to GPU for parallel execution of operations\nAutomatic calculation of gradients, making backward gradient calculation transparent to the user\nDefinition of common loss functions, NN layers of different types, optimization methods, data loaders, etc, simplifying NN implementation and training\nProvides different levels of abstraction, thus a good balance between flexibility and simplicity\n\n\n\nThis notebook provides just a basic review of the main concepts necessary to train NNs with PyTorch taking materials from:\n\n<a href=\"https://pytorch.org/tutorials/beginner/pytorch_with_examples.html\">Learning PyTorch with Examples</a>, by Justin Johnson\n<a href=\"https://pytorch.org/tutorials/beginner/nn_tutorial.html\">What is torch.nn really?</a>, by Jeremy Howard\n<a href=\"https://www.kaggle.com/kanncaa1/pytorch-tutorial-for-deep-learning-lovers\">Pytorch Tutorial for Deep Learning Lovers</a>, by Kaggle user kanncaa1\n\n\n\n<font color='teal'> 3.1. Installation and PyTorch introduction</font>\n\n\nPyTorch can be installed with or without GPU support\n\nIf you have an Anaconda installation, you can install from the command line, using the <a href=\"https://pytorch.org/\">instructions of the project website</a>\n\n\n\nPyTorch is also preinstalled in Google Collab with free GPU access\n\nFollow RunTime -> Change runtime type, and select GPU for HW acceleration\n\n\n\nPlease, refer to Pytorch getting started tutorial for a quick introduction regarding tensor definition, GPU vs CPU storage of tensors, operations, and bridge to Numpy\n\n\n<font color='teal'> 3.2. Torch tensors (very) general overview</font>\n\nWe can create tensors with different construction methods provided by the library, either to create new tensors from scratch or from a Numpy array", "import torch\n\nx = torch.rand((100,200))\ndigitsX_flatten_tensor = torch.from_numpy(digitsX_flatten)\n\nprint(x.type())\nprint(digitsX_flatten_tensor.size())", "Tensors can be converted back to numpy arrays\n\n\nNote that in this case, a tensor and its corresponding numpy array will share memory\n\n\nOperations and slicing use a syntax similar to numpy", "print('Size of tensor x:', x.size())\nprint('Tranpose of vector has size', x.t().size()) #Transpose and compute size\nprint('Extracting upper left matrix of size 3 x 3:', x[:3,:3])\nprint(x.mm(x.t()).size()) #mm for matrix multiplications\nxpx = x.add(x)\nxpx2 = torch.add(x,x)\nprint((xpx!=xpx2).sum()) #Since all are equal, count of different terms is zero", "Adding underscore performs operations \"in place\", e.g., x.add_(y)\n\n\nIf a GPU is available, tensors can be moved to and from the GPU device\n\n\nOperations on tensors stored in a GPU will be carried out using GPU resources and will typically be highly parallelized", "if torch.cuda.is_available():\n device = torch.device('cuda')\n x = x.to(device)\n y = x.add(x)\n y = y.to('cpu')\nelse:\n print('No GPU card is available')", "<font color='teal'> 3.3. Automatic gradient calculation </font>\n\n\nPyTorch tensors have a property requires_grad. When true, PyTorch automatic gradient calculation will be activated for that variable\n\n\nIn order to compute these derivatives numerically, PyTorch keeps track of all operations carried out on these variables, organizing them in a forward computation graph.\n\n\nWhen executing the backward() method, derivatives will be calculated\n\n\nHowever, this should only be activated when necessary, to save computation", "x.requires_grad = True\ny = (3 * torch.log(x)).sum()\ny.backward()\nprint(x.grad[:2,:2])\nprint(3/x[:2,:2])\n\nx.requires_grad = False\nx.grad.zero_()\nprint('Automatic gradient calculation is deactivated, and gradients set to zero')", "<font color='olive'>Exercise</font>\n\nInitialize a tensor x with the upper right $5 \\times 10$ submatrix of flattened digits\nCompute output vector y applying a function of your choice to x\nCompute scalar value z as the sum of all elements in y squared\nCheck that x.grad calculation is correct using the backward method\nTry to run your cell multiple times to see if the calculation is still correct. If not, implement the necessary mnodifications so that you can run the cell multiple times, but the gradient does not change from run to run\n\nNote: The backward method can only be run on scalar variables\n<font color='teal'> 3.4. Feed Forward Network using PyTorch </font>\n\n\nIn this section we will change our code for a neural network to use tensors instead of numpy arrays. We will work with the sign digits datasets.\n\n\nWe will introduce all concepts using a single layer perceptron (softmax regression), and then implement networks with additional hidden layers\n\n\n<font color='olive'> 3.4.1. Using Automatic differentiation </font>\n\n\nWe start by loading the data, and converting to tensors.\n\n\nAs a first step, we refactor our code to use tensor operations\n\n\nWe do not need to pay too much attention to particular details regarding tensor operations, since these will not be necessary when moving to higher PyTorch abstraction levels\n\n\nWe do not need to implement gradient calculation. PyTorch will take care of that", "from sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\n\ndataset = 'digits'\n\n#Joint normalization of all data. For images [-.5, .5] scaling is frequent\nmin_max_scaler = MinMaxScaler(feature_range=(-.5, .5))\nX = min_max_scaler.fit_transform(digitsX_flatten)\n\n#Generate train and validation data, shuffle\nX_train, X_val, y_train, y_val = train_test_split(X, digitsY, test_size=0.2, random_state=42, shuffle=True)\n\n#Convert to Torch tensors\nX_train_torch = torch.from_numpy(X_train)\nX_val_torch = torch.from_numpy(X_val)\ny_train_torch = torch.from_numpy(y_train)\ny_val_torch = torch.from_numpy(y_val)\n\n# Define some useful functions\ndef softmax(t):\n \"\"\"Compute softmax values for each sets of scores in t\"\"\"\n return t.exp() / t.exp().sum(-1).unsqueeze(-1)\n\ndef model(w,b,x):\n #Calcula la salida de la red\n return softmax(x.mm(w) + b)\n \ndef accuracy(y, y_hat):\n return (y.argmax(axis=-1) == y_hat.argmax(axis=-1)).float().mean()\n\ndef nll(y, y_hat):\n return -(y * y_hat.log()).mean()", "Syntaxis is a bit different because input variables are tensors, not arrays\n\n\nThis time we did not need to implement the backward function", "# Parameter initialization\nW = .1 * torch.randn(X_train_torch.size()[1], y_train_torch.size()[1])\nW.requires_grad_()\nb = torch.zeros(y_train_torch.size()[1], requires_grad=True)\n\nepochs = 500\nrho = .5\n\nloss_train = np.zeros(epochs)\nloss_val = np.zeros(epochs)\nacc_train = np.zeros(epochs)\nacc_val = np.zeros(epochs)\n\n# Network training\nfor epoch in range(epochs):\n \n print(f'Current epoch: {epoch + 1} \\r', end=\"\")\n \n #Compute network output and cross-entropy loss\n pred = model(W,b,X_train_torch)\n loss = nll(y_train_torch, pred)\n \n #Compute gradients\n loss.backward()\n \n #Deactivate gradient automatic updates\n with torch.no_grad():\n #Computing network performance after iteration\n loss_train[epoch] = loss.item()\n acc_train[epoch] = accuracy(y_train_torch, pred).item()\n pred_val = model(W, b, X_val_torch)\n loss_val[epoch] = nll(y_val_torch, pred_val).item()\n acc_val[epoch] = accuracy(y_val_torch, pred_val).item()\n\n #Weight update\n W -= rho * W.grad\n b -= rho * b.grad\n #Reset gradients\n W.grad.zero_()\n b.grad.zero_()", "It is important to deactivate gradient updates after the network has been evaluated on training data, and gradients of the loss function have been computed", "plt.figure(figsize=(14,5))\nplt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')\nplt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')\nplt.show()", "<font color='olive'> 3.4.2. Using torch nn module </font>\n\n\nPyTorch nn module provides many attributes and methods that make the implementation and training of Neural Networks simpler\n\n\nnn.Module and nn.Parameter allow to implement a more concise training loop\n\n\nnn.Module is a PyTorch class that will be used to encapsulate and design a specific neural network, thus, it is central to the implementation of deep neural nets using PyTorch\n\n\nnn.Parameter allow the definition of trainable network parameters. In this way, we will simplify the implementation of the training loop.\n\n\nAll parameters defined with nn.Parameter will have requires_grad = True", "from torch import nn\n\nclass my_multiclass_net(nn.Module):\n def __init__(self, nin, nout):\n \"\"\"This method initializes the network parameters\n Parameters nin and nout stand for the number of input parameters (features in X)\n and output parameters (number of classes)\"\"\"\n super().__init__()\n self.W = nn.Parameter(.1 * torch.randn(nin, nout))\n self.b = nn.Parameter(torch.zeros(nout))\n \n def forward(self, x):\n return softmax(x.mm(self.W) + self.b)\n \n def softmax(t):\n \"\"\"Compute softmax values for each sets of scores in t\"\"\"\n return t.exp() / t.exp().sum(-1).unsqueeze(-1)\n\nmy_net = my_multiclass_net(X_train_torch.size()[1], y_train_torch.size()[1])\n\nepochs = 500\nrho = .5\n\nloss_train = np.zeros(epochs)\nloss_val = np.zeros(epochs)\nacc_train = np.zeros(epochs)\nacc_val = np.zeros(epochs)\n\nfor epoch in range(epochs):\n \n print(f'Current epoch: {epoch + 1} \\r', end=\"\")\n \n #Compute network output and cross-entropy loss\n pred = my_net(X_train_torch)\n loss = nll(y_train_torch, pred)\n \n #Compute gradients\n loss.backward()\n \n #Deactivate gradient automatic updates\n with torch.no_grad():\n #Computing network performance after iteration\n loss_train[epoch] = loss.item()\n acc_train[epoch] = accuracy(y_train_torch, pred).item()\n pred_val = my_net(X_val_torch)\n loss_val[epoch] = nll(y_val_torch, pred_val).item()\n acc_val[epoch] = accuracy(y_val_torch, pred_val).item()\n\n #Weight update\n for p in my_net.parameters():\n p -= p.grad * rho\n #Reset gradients\n my_net.zero_grad()\n\nplt.figure(figsize=(14,5))\nplt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')\nplt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')\nplt.show()", "nn.Module comes with several kinds of pre-defined layers, thus making it even simpler to implement neural networks\n\n\nWe can also import the Cross Entropy Loss from nn.Module. When doing so:\n\nWe do not have to compute the softmax, since the nn.CrossEntropyLoss already does so\nnn.CrossEntropyLoss receives two input arguments, the first is the output of the network, and the second is the true label as a 1-D tensor (i.e., an array of integers, one-hot encoding should not be used)", "from torch import nn\n\nclass my_multiclass_net(nn.Module):\n def __init__(self, nin, nout):\n \"\"\"Note that now, we do not even need to initialize network parameters ourselves\"\"\"\n super().__init__()\n self.lin = nn.Linear(nin, nout)\n \n def forward(self, x):\n return self.lin(x)\n \nloss_func = nn.CrossEntropyLoss()\n\nmy_net = my_multiclass_net(X_train_torch.size()[1], y_train_torch.size()[1])\n\nepochs = 500\nrho = .1\n\nloss_train = np.zeros(epochs)\nloss_val = np.zeros(epochs)\nacc_train = np.zeros(epochs)\nacc_val = np.zeros(epochs)\n\nfor epoch in range(epochs):\n \n print(f'Current epoch: {epoch + 1} \\r', end=\"\")\n \n #Compute network output and cross-entropy loss\n pred = my_net(X_train_torch)\n loss = loss_func(pred, y_train_torch.argmax(axis=-1))\n \n #Compute gradients\n loss.backward()\n \n #Deactivate gradient automatic updates\n with torch.no_grad():\n #Computing network performance after iteration\n loss_train[epoch] = loss.item()\n acc_train[epoch] = accuracy(y_train_torch, pred).item()\n pred_val = my_net(X_val_torch)\n loss_val[epoch] = loss_func(pred_val, y_val_torch.argmax(axis=-1)).item()\n acc_val[epoch] = accuracy(y_val_torch, pred_val).item()\n\n #Weight update\n for p in my_net.parameters():\n p -= p.grad * rho\n #Reset gradients\n my_net.zero_grad()\n\nplt.figure(figsize=(14,5))\nplt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')\nplt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')\nplt.show()", "Note faster convergence is observed in this case. It is actually due to a more convenient initialization of the hidden layer\n<font color='olive'> 3.4.3. Network Optimization </font>\n\n\nWe cover in this subsection two different aspects about network training using PyTorch:\n\n\nUsing torch.optim allows an easier and more interpretable encoding of neural network training, and opens the door to more sophisticated training algorithms\n\n\nUsing minibatches can speed up network convergence\n\n\n\n\ntorch.optim provides two convenient methods for neural network training:\n\nopt.step() updates all network parameters using current gradients\nopt.zero_grad() resets all network parameters", "from torch import optim\n\nmy_net = my_multiclass_net(X_train_torch.size()[1], y_train_torch.size()[1])\nopt = optim.SGD(my_net.parameters(), lr=0.1)\n\nepochs = 500\n\nloss_train = np.zeros(epochs)\nloss_val = np.zeros(epochs)\nacc_train = np.zeros(epochs)\nacc_val = np.zeros(epochs)\n\nfor epoch in range(epochs):\n \n print(f'Current epoch: {epoch + 1} \\r', end=\"\")\n \n #Compute network output and cross-entropy loss\n pred = my_net(X_train_torch)\n loss = loss_func(pred, y_train_torch.argmax(axis=-1))\n \n #Compute gradients\n loss.backward()\n \n #Deactivate gradient automatic updates\n with torch.no_grad():\n #Computing network performance after iteration\n loss_train[epoch] = loss.item()\n acc_train[epoch] = accuracy(y_train_torch, pred).item()\n pred_val = my_net(X_val_torch)\n loss_val[epoch] = loss_func(pred_val, y_val_torch.argmax(axis=-1)).item()\n acc_val[epoch] = accuracy(y_val_torch, pred_val).item()\n\n opt.step()\n opt.zero_grad()", "Note network optimization is carried out outside torch.no_grad() but network evaluation (other than forward output calculation for the training patterns) still need to deactivate gradient updates", "plt.figure(figsize=(14,5))\nplt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')\nplt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')\nplt.show()", "<font color='olive'> Exercise </font>\nImplement network training with other optimization methods. You can refer to the <a href=\"https://pytorch.org/docs/stable/optim.html\">official documentation</a> and select a couple of methods. You can also try to implement adaptive learning rates using torch.optim.lr_scheduler\n\n\nEach epoch of the previous implementation of network training was actually implementing Gradient Descent\n\n\nIn SGD only a minibatch of training patterns are used at every iteration\n\n\nIn each epoch we iterate over all training patterns sequentially selecting non-overlapping minibatches\n\n\nOverall, convergence is usually faster than when using Gradient Descent\n\n\nTorch provides methods that simplify the implementation of this strategy", "from torch.utils.data import TensorDataset, DataLoader\n\ntrain_ds = TensorDataset(X_train_torch, y_train_torch)\ntrain_dl = DataLoader(train_ds, batch_size=64)\n\nfrom torch import optim\n\nmy_net = my_multiclass_net(X_train_torch.size()[1], y_train_torch.size()[1])\nopt = optim.SGD(my_net.parameters(), lr=0.1)\n\nepochs = 200\n\nloss_train = np.zeros(epochs)\nloss_val = np.zeros(epochs)\nacc_train = np.zeros(epochs)\nacc_val = np.zeros(epochs)\n\nfor epoch in range(epochs):\n \n print(f'Current epoch: {epoch + 1} \\r', end=\"\")\n \n for xb, yb in train_dl:\n \n #Compute network output and cross-entropy loss for current minibatch\n pred = my_net(xb)\n loss = loss_func(pred, yb.argmax(axis=-1))\n \n #Compute gradients and optimize parameters\n loss.backward()\n opt.step()\n opt.zero_grad()\n \n #At the end of each epoch, evaluate overall network performance\n with torch.no_grad():\n #Computing network performance after iteration\n pred = my_net(X_train_torch)\n loss_train[epoch] = loss_func(pred, y_train_torch.argmax(axis=-1)).item()\n acc_train[epoch] = accuracy(y_train_torch, pred).item()\n pred_val = my_net(X_val_torch)\n loss_val[epoch] = loss_func(pred_val, y_val_torch.argmax(axis=-1)).item()\n acc_val[epoch] = accuracy(y_val_torch, pred_val).item()\n\nplt.figure(figsize=(14,5))\nplt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')\nplt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')\nplt.show()", "<font color='olive'> 3.4.4. Multi Layer networks using nn.Sequential </font>\n\n\nPyTorch simplifies considerably the implementation of neural network training, since we do not need to implement derivatives ourselves\n\n\nWe can also make a simpler implementation of multilayer networks using nn.Sequential function\n\n\nIt returns directly a network with the requested topology, including parameters and forward evaluation method", "my_net = nn.Sequential(\n nn.Linear(X_train_torch.size()[1], 200),\n nn.ReLU(),\n nn.Linear(200,50),\n nn.ReLU(),\n nn.Linear(50,20),\n nn.ReLU(),\n nn.Linear(20,y_train_torch.size()[1])\n)\n\nopt = optim.SGD(my_net.parameters(), lr=0.1)\n\nepochs = 200\n\nloss_train = np.zeros(epochs)\nloss_val = np.zeros(epochs)\nacc_train = np.zeros(epochs)\nacc_val = np.zeros(epochs)\n\nfor epoch in range(epochs):\n \n print(f'Current epoch: {epoch + 1} \\r', end=\"\")\n \n for xb, yb in train_dl:\n \n #Compute network output and cross-entropy loss for current minibatch\n pred = my_net(xb)\n loss = loss_func(pred, yb.argmax(axis=-1))\n \n #Compute gradients and optimize parameters\n loss.backward()\n opt.step()\n opt.zero_grad()\n \n #At the end of each epoch, evaluate overall network performance\n with torch.no_grad():\n #Computing network performance after iteration\n pred = my_net(X_train_torch)\n loss_train[epoch] = loss_func(pred, y_train_torch.argmax(axis=-1)).item()\n acc_train[epoch] = accuracy(y_train_torch, pred).item()\n pred_val = my_net(X_val_torch)\n loss_val[epoch] = loss_func(pred_val, y_val_torch.argmax(axis=-1)).item()\n acc_val[epoch] = accuracy(y_val_torch, pred_val).item()\n\nplt.figure(figsize=(14,5))\nplt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')\nplt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')\nplt.show()\n\nprint('Validation accuracy with this net:', acc_val[-1])", "<font color='teal'> 3.5. Generalization</font>\n\n\nFor complex network topologies (i.e., many parameters), network training can incur in over-fitting issues\n\n\nSome common strategies to avoid this are:\n\nEarly stopping\nDropout regularization\n\n\n\n<center><a href=\"https://medium.com/analytics-vidhya/a-simple-introduction-to-dropout-regularization-with-code-5279489dda1e\"><img src=\"figures/dropout.png\" width=\"450\"/>Image Source</a></center>\n\nData augmentation can also be used to avoid overfitting, as well as to achieve improved accuracy by providing the network some a priori expert knowledge\nE.g., if image rotations and scalings do not affect the correct class, we could enlarge the dataset by creating artificial images with these transformations\n\n\n\n<font color='teal'> 3.6. Convolutional Networks for Image Processing </font>\n\n\nPyTorch implements other layers that are better suited for different applications\n\n\nIn image processing, we normally recur to Convolutional Neural Networks, since they are able to capture the true spatial information of the image\n\n\n<center><a href=\"https://pytorch.org/tutorials/beginner/blitz/neural_networks_tutorial.html\"><img src=\"figures/CNN.png\" width=\"800\"/>Image Source</a></center>", "dataset = 'digits'\n\n#Generate train and validation data, shuffle\nX_train, X_val, y_train, y_val = train_test_split(digitsX[:,np.newaxis,:,:], digitsY, test_size=0.2, random_state=42, shuffle=True)\n\n#Convert to Torch tensors\nX_train_torch = torch.from_numpy(X_train)\nX_val_torch = torch.from_numpy(X_val)\ny_train_torch = torch.from_numpy(y_train)\ny_val_torch = torch.from_numpy(y_val)\n\ntrain_ds = TensorDataset(X_train_torch, y_train_torch)\ntrain_dl = DataLoader(train_ds, batch_size=64)\n\nclass Lambda(nn.Module):\n def __init__(self, func):\n super().__init__()\n self.func = func\n\n def forward(self, x):\n return self.func(x)\n\nmy_net = nn.Sequential(\n nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1),\n nn.ReLU(),\n nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1),\n nn.ReLU(),\n nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1),\n nn.ReLU(),\n nn.AvgPool2d(4),\n Lambda(lambda x: x.view(x.size(0), -1)),\n)\n\nopt = optim.SGD(my_net.parameters(), lr=0.1)\n\nepochs = 2500\n\nloss_train = np.zeros(epochs)\nloss_val = np.zeros(epochs)\nacc_train = np.zeros(epochs)\nacc_val = np.zeros(epochs)\n\nfor epoch in range(epochs):\n \n print(f'Número de épocas: {epoch + 1}\\r', end=\"\")\n \n for xb, yb in train_dl:\n \n #Compute network output and cross-entropy loss for current minibatch\n pred = my_net(xb)\n loss = loss_func(pred, yb.argmax(axis=-1))\n \n #Compute gradients and optimize parameters\n loss.backward()\n opt.step()\n opt.zero_grad()\n \n #At the end of each epoch, evaluate overall network performance\n with torch.no_grad():\n # Computing network performance after iteration\n pred = my_net(X_train_torch)\n loss_train[epoch] = loss_func(pred, y_train_torch.argmax(axis=-1)).item()\n acc_train[epoch] = accuracy(y_train_torch, pred).item()\n pred_val = my_net(X_val_torch)\n loss_val[epoch] = loss_func(pred_val, y_val_torch.argmax(axis=-1)).item()\n acc_val[epoch] = accuracy(y_val_torch, pred_val).item()\n\nplt.figure(figsize=(14,5))\nplt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')\nplt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Kaggle/learntools
notebooks/python/raw/tut_7.ipynb
apache-2.0
[ "In this tutorial, you will learn about imports in Python, get some tips for working with unfamiliar libraries (and the objects they return), and dig into operator overloading.\nImports\nSo far we've talked about types and functions which are built-in to the language. \nBut one of the best things about Python (especially if you're a data scientist) is the vast number of high-quality custom libraries that have been written for it. \nSome of these libraries are in the \"standard library\", meaning you can find them anywhere you run Python. Other libraries can be easily added, even if they aren't always shipped with Python.\nEither way, we'll access this code with imports.\nWe'll start our example by importing math from the standard library.", "import math\n\nprint(\"It's math! It has type {}\".format(type(math)))", "math is a module. A module is just a collection of variables (a namespace, if you like) defined by someone else. We can see all the names in math using the built-in function dir().", "print(dir(math))", "We can access these variables using dot syntax. Some of them refer to simple values, like math.pi:", "print(\"pi to 4 significant digits = {:.4}\".format(math.pi))", "But most of what we'll find in the module are functions, like math.log:", "math.log(32, 2)", "Of course, if we don't know what math.log does, we can call help() on it:", "help(math.log)", "We can also call help() on the module itself. This will give us the combined documentation for all the functions and values in the module (as well as a high-level description of the module). Click the \"output\" button to see the whole math help page.", "help(math)", "Other import syntax\nIf we know we'll be using functions in math frequently we can import it under a shorter alias to save some typing (though in this case \"math\" is already pretty short).", "import math as mt\nmt.pi", "You may have seen code that does this with certain popular libraries like Pandas, Numpy, Tensorflow, or Matplotlib. For example, it's a common convention to import numpy as np and import pandas as pd.\n\nThe as simply renames the imported module. It's equivalent to doing something like:", "import math\nmt = math", "Wouldn't it be great if we could refer to all the variables in the math module by themselves? i.e. if we could just refer to pi instead of math.pi or mt.pi? Good news: we can do that.", "from math import *\nprint(pi, log(32, 2))", "import * makes all the module's variables directly accessible to you (without any dotted prefix).\nBad news: some purists might grumble at you for doing this.\nWorse: they kind of have a point.", "from math import *\nfrom numpy import *\nprint(pi, log(32, 2))", "What has happened? It worked before!\nThese kinds of \"star imports\" can occasionally lead to weird, difficult-to-debug situations.\nThe problem in this case is that the math and numpy modules both have functions called log, but they have different semantics. Because we import from numpy second, its log overwrites (or \"shadows\") the log variable we imported from math.\nA good compromise is to import only the specific things we'll need from each module:", "from math import log, pi\nfrom numpy import asarray", "Submodules\nWe've seen that modules contain variables which can refer to functions or values. Something to be aware of is that they can also have variables referring to other modules.", "import numpy\nprint(\"numpy.random is a\", type(numpy.random))\nprint(\"it contains names such as...\",\n dir(numpy.random)[-15:]\n )", "So if we import numpy as above, then calling a function in the random \"submodule\" will require two dots.", "# Roll 10 dice\nrolls = numpy.random.randint(low=1, high=6, size=10)\nrolls", "Oh the places you'll go, oh the objects you'll see\nSo after 6 lessons, you're a pro with ints, floats, bools, lists, strings, and dicts (right?). \nEven if that were true, it doesn't end there. As you work with various libraries for specialized tasks, you'll find that they define their own types which you'll have to learn to work with. For example, if you work with the graphing library matplotlib, you'll be coming into contact with objects it defines which represent Subplots, Figures, TickMarks, and Annotations. pandas functions will give you DataFrames and Series. \nIn this section, I want to share with you a quick survival guide for working with strange types.\nThree tools for understanding strange objects\nIn the cell above, we saw that calling a numpy function gave us an \"array\". We've never seen anything like this before (not in this course anyways). But don't panic: we have three familiar builtin functions to help us here.\n1: type() (what is this thing?)", "type(rolls)", "2: dir() (what can I do with it?)", "print(dir(rolls))\n\n# If I want the average roll, the \"mean\" method looks promising...\nrolls.mean()\n\n# Or maybe I just want to turn the array into a list, in which case I can use \"tolist\"\nrolls.tolist()", "3: help() (tell me more)", "# That \"ravel\" attribute sounds interesting. I'm a big classical music fan.\nhelp(rolls.ravel)\n\n# Okay, just tell me everything there is to know about numpy.ndarray\n# (Click the \"output\" button to see the novel-length output)\nhelp(rolls)", "(Of course, you might also prefer to check out the online docs.)\nOperator overloading\nWhat's the value of the below expression?", "[3, 4, 1, 2, 2, 1] + 10", "What a silly question. Of course it's an error. \nBut what about...", "rolls + 10", "We might think that Python strictly polices how pieces of its core syntax behave such as +, &lt;, in, ==, or square brackets for indexing and slicing. But in fact, it takes a very hands-off approach. When you define a new type, you can choose how addition works for it, or what it means for an object of that type to be equal to something else.\nThe designers of lists decided that adding them to numbers wasn't allowed. The designers of numpy arrays went a different way (adding the number to each element of the array).\nHere are a few more examples of how numpy arrays interact unexpectedly with Python operators (or at least differently from lists).", "# At which indices are the dice less than or equal to 3?\nrolls <= 3\n\nxlist = [[1,2,3],[2,4,6],]\n# Create a 2-dimensional array\nx = numpy.asarray(xlist)\nprint(\"xlist = {}\\nx =\\n{}\".format(xlist, x))\n\n# Get the last element of the second row of our numpy array\nx[1,-1]\n\n# Get the last element of the second sublist of our nested list?\nxlist[1,-1]", "numpy's ndarray type is specialized for working with multi-dimensional data, so it defines its own logic for indexing, allowing us to index by a tuple to specify the index at each dimension.\nWhen does 1 + 1 not equal 2?\nThings can get weirder than this. You may have heard of (or even used) tensorflow, a Python library popularly used for deep learning. It makes extensive use of operator overloading.", "import tensorflow as tf\n# Create two constants, each with value 1\na = tf.constant(1)\nb = tf.constant(1)\n# Add them together to get...\na + b", "a + b isn't 2, it is (to quote tensorflow's documentation)...\n\na symbolic handle to one of the outputs of an Operation. It does not hold the values of that operation's output, but instead provides a means of computing those values in a TensorFlow tf.Session.\n\nIt's important just to be aware of the fact that this sort of thing is possible and that libraries will often use operator overloading in non-obvious or magical-seeming ways.\nUnderstanding how Python's operators work when applied to ints, strings, and lists is no guarantee that you'll be able to immediately understand what they do when applied to a tensorflow Tensor, or a numpy ndarray, or a pandas DataFrame.\nOnce you've had a little taste of DataFrames, for example, an expression like the one below starts to look appealingly intuitive:\n```python\nGet the rows with population over 1m in South America\ndf[(df['population'] > 10**6) & (df['continent'] == 'South America')]\n```\nBut why does it work? The example above features something like 5 different overloaded operators. What's each of those operations doing? It can help to know the answer when things start going wrong.\nCurious how it all works?\nHave you ever called help() or dir() on an object and wondered what the heck all those names with the double-underscores were?", "print(dir(list))", "This turns out to be directly related to operator overloading.\nWhen Python programmers want to define how operators behave on their types, they do so by implementing methods with special names beginning and ending with 2 underscores such as __lt__, __setattr__, or __contains__. Generally, names that follow this double-underscore format have a special meaning to Python.\nSo, for example, the expression x in [1, 2, 3] is actually calling the list method __contains__ behind-the-scenes. It's equivalent to (the much uglier) [1, 2, 3].__contains__(x). \nIf you're curious to learn more, you can check out Python's official documentation, which describes many, many more of these special \"underscores\" methods.\nWe won't be defining our own types in these lessons (if only there was time!), but I hope you'll get to experience the joys of defining your own wonderful, weird types later down the road.\nYour turn!\nHead over to the final coding exercise for one more round of coding questions involving imports, working with unfamiliar objects, and, of course, more gambling." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
jtwhite79/pyemu
examples/modflow_to_pest_like_a_boss.ipynb
bsd-3-clause
[ "Freyberg Model PEST setup example\nHerein, we will show users how to use pyEMU to setup a groundwater model for use in pest. We will cover the following topics:\n- setup pilot points as parameters, including 1st-order tikhonov regularization\n- setup other model inputs as parameters\n- setup simulated water levels as observations\n- setup simulated water budget components as observations (or forecasts)\n- create a pest control file and adjust observation weights to balance the objective function\nNote that, in addition to pyemu, this notebook relies on flopy. flopy can be obtained (along with installation instructions) at https://github.com/modflowpy/flopy.", "%matplotlib inline\nimport os\nimport shutil\nimport platform\nimport numpy as np\nimport pandas as pd\nfrom matplotlib.patches import Rectangle as rect\nimport matplotlib.pyplot as plt\nimport warnings\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport matplotlib as mpl\nnewparams = {'legend.fontsize':10, 'axes.labelsize':10,\n 'xtick.labelsize':10, 'ytick.labelsize':10,\n 'font.family':'Univers 57 Condensed', \n 'pdf.fonttype':42}\nplt.rcParams.update(newparams)\nimport pyemu", "Model background\nThis example is based on the synthetic classroom model of Freyberg(1988). The model is a 2-dimensional MODFLOW model with 1 layer, 40 rows, and 20 columns. The model has 2 stress periods: an initial steady-state stress period used for calibration, and a 5-year transient stress period. The calibration period uses the recharge and well flux of Freyberg(1988); the last stress period use 25% less recharge and 25% more pumping to represent future conditions for a forecast period.\nFreyberg, David L. \"AN EXERCISE IN GROUND‐WATER MODEL CALIBRATION AND PREDICTION.\" Groundwater 26.3 (1988): 350-360.", "#load the existing model and save it in a new dir and make sure it runs\nimport flopy\nmodel_ws = os.path.join(\"Freyberg_transient\")\nml = flopy.modflow.Modflow.load(\"freyberg.nam\",model_ws=model_ws,verbose=False)\nml.exe_name = \"mfnwt\"\nml.model_ws = \"temp\"\nEXE_DIR = os.path.join(\"..\",\"bin\")\nif \"window\" in platform.platform().lower():\n EXE_DIR = os.path.join(EXE_DIR,\"win\")\nelif \"darwin\" in platform.platform().lower() or \"macos\" in platform.platform().lower():\n EXE_DIR = os.path.join(EXE_DIR,\"mac\")\nelse:\n EXE_DIR = os.path.join(EXE_DIR,\"linux\")\n\n[shutil.copy2(os.path.join(EXE_DIR,f),os.path.join(\"temp\",f)) for f in os.listdir(EXE_DIR)]\n\nml.write_input()\nml.run_model()", "Observations\nHOB observations\nHere we are going to setup an hob package to handle getting the observations from modflow. Normally, you would already have this file made, but here we are just making one for fun", "rc_df = pd.read_csv(os.path.join(\"Freyberg\",\"misc\",\"obs_rowcol.dat\"),delim_whitespace=True)\nhds = flopy.utils.HeadFile(os.path.join(ml.model_ws,\"freyberg.hds\"))\ndata = hds.get_alldata()\nobs = []\nroff = 0.0#ml.dis.delc.array[0] / 2.0\ncoff = 0.0#ml.dis.delr.array[0] / 2.0\nfor n,r,c in zip(rc_df.name,rc_df.row,rc_df.col):\n name = \"i{1:02d}j{2:02d}\".format(n,r-1,c-1)\n d = np.zeros((data.shape[0]-1,2))\n d[:,0] = hds.times[1:]\n d[:,1] = data[1:,0,r-1,c-1] + np.random.randn(d.shape[0]) #add some random noise to the observations\n obs.append(flopy.modflow.HeadObservation(ml,obsname=name,layer=0,row=r-1,\n column=c-1,roff=roff,coff=coff,\n time_series_data=d))\nflopy.modflow.ModflowHob(ml,obs_data=obs,iuhobsv=600)\next_path = os.path.join(ml.model_ws,\"ref\")\nif os.path.exists(ext_path):\n shutil.rmtree(ext_path)\nprint(ext_path)\nos.mkdir(ext_path)\nml.external_path = os.path.split(ext_path)[-1]\nml.upw.hk.fmtin = \"(FREE)\"\nml.upw.sy.fmtin = \"(FREE)\"\nml.rch.rech.fmtin = \"(FREE)\"\nml.write_input()\nml.run_model()", "pyemu has a helper function to setup this instruction file for you and also load observations into dataframe", "hob_df = pyemu.gw_utils.modflow_hob_to_instruction_file(os.path.join(ml.model_ws,ml.name+\".hob.out\"))", "The dataframe returned has a lot of useful info that we will use later...", "hob_df.head()", "list file budget components as observations (or forecasts)\nHere we will use flopy and pyemu to load each of the flux and volume budget components from the modflow list file to use as observations (or forecasts). These are valuable pieces of information and since observations are free, why not include them? This helper function writes two instruction files: &lt;flx_filename&gt;.ins and &lt;vol_filename&gt;.ins", "# the flux budget output filename that will be written during each forward run\nflx_filename=os.path.join(ml.model_ws,\"flx.out\")\n\n# the volumne budget output filename that will be written during each forward run\nvol_filename = os.path.join(ml.model_ws,\"vol.out\")\ndf_wb = pyemu.gw_utils.setup_mflist_budget_obs(os.path.join(ml.model_ws,ml.name+\".list\"))\n\ndf_wb.head()", "Parameters\npilot points\nHere we will setup pilot points for several array-based modflow inputs using pyemu\nsetup pilot point locations\nfirst specify what pilot point names we want to use for each model layer (counting from 0). Here we will setup pilot points for hk, sy and rech. The rech pilot points will be used as a single multiplier array for all stress periods to account for potential spatial bias in recharge.", "prefix_dict= {0:[\"hk1\",\"sy1\",\"rech1\"]}\n", "This helper function is doing a lot of things: writing templates, pilot point files, and creating a shapefile of pilot points. The every_n_cell arg is key: it decides how many cells to skip between pilot point locations - since we passed the model, only active model cells get pilot points (using bas6.ibound). Like many things with flopy, the SpatialReference is used to define pilot point x and y coordinates", "pp_cells = 3\npp_df = pyemu.pp_utils.setup_pilotpoints_grid(ml,prefix_dict=prefix_dict,every_n_cell=pp_cells,pp_dir=ml.model_ws,\n tpl_dir=ml.model_ws,shapename=os.path.join(ml.model_ws,\"pp.shp\"))", "The dataframe return has the same info as the shapefile that was written - useful info, right?", "pp_df.index = pp_df.parnme\npp_df", "geostats and kriging\nnow that we have pilot points setup, we need to solve the kriging equations for each model cell using pilot point locations. Since we only have a single set of pilot points that we are reusing for several array-based modflow inputs, we only need to get the kriging factors once", "hk_pp = pyemu.pp_utils.pp_file_to_dataframe(os.path.join(ml.model_ws,\"hk1pp.dat\"))\n\nhk_pp.head()", "Let's setup a geostatistical structure. The contribution doesn't matter for pilot point interpolation, but it does matter when we want to form a prior parameter covariance matrix - we will get to that later. A good rule of thumb is to use an a value that is three times the pilot point spacing. Also, since the all of these pilot points will be log transformed, we need to use a log-based geostatistical structure", "a = pp_cells * ml.dis.delr.array[0] * 3.0\nv = pyemu.geostats.ExpVario(contribution=1.0,a=a)\ngs = pyemu.geostats.GeoStruct(variograms=v,transform=\"log\")\ngs.plot()", "This is where things get fun. First we create an OrdinaryKrige object", "ok = pyemu.geostats.OrdinaryKrige(geostruct=gs,point_data=hk_pp)", "Now we use a helper function to solve the kriging factors for each active model cell: OrdinaryKrige.calc_factors_grid() includes all the standard kriging arguments, such as search radius, min and max interpolation points,zone_array, as well as the option to save the kriging variance array \nNote: we need to pass out model's spatial reference information. For flopy this used to be contained in model.sr\n However this has been superseded by model.modelgrid. To avoid reliance on a changing (and not always backward\n compatible) code base the sr method has been abstracted into pyemu.", "sr = pyemu.helpers.SpatialReference.from_namfile(os.path.join(ml.model_ws, ml.namefile),\n delr=ml.dis.delr, delc=ml.dis.delc)\nok.calc_factors_grid(sr, zone_array=ml.bas6.ibound[0].array,var_filename=os.path.join(ml.model_ws,\"layer1_var.dat\"))", "Ok, we know that this function is slow for bigly models, but it is super convienent and allows a lot of flexibility. So, once we have calculated the kriging factors for each active model cell, we need to write this to a factors file", "ok.to_grid_factors_file(os.path.join(ml.model_ws,\"pp.fac\"))", "Let's check out that kriging variance array....", "var_arr = np.ma.masked_invalid(np.loadtxt(os.path.join(ml.model_ws,\"layer1_var.dat\")))\nfig = plt.figure(figsize=(20,20))\nax = plt.subplot(111,aspect=\"equal\")\nax.pcolormesh(sr.xcentergrid,sr.ycentergrid,var_arr,alpha=0.5)\nax.scatter(hk_pp.x, hk_pp.y,marker='.',s=10)\n\nsr.xcentergrid[0,0], sr.ycentergrid[0,0]\n\nhk_pp.iloc[0,:].values", "other inputs as parameters\nSince we rarely know any model inputs perfectly, it is advisable to subject them to adjustment...not to get a good fit, but so we can account for there contribution to uncertainty...How about the conductance between the surface water and groundwater systems. In this model, we are using drain type boundaries. So, let's setup a multiplier parameter for each drain cell's conductance. \nSince we told flopy to write external files, all of the list-type modflow inputs are also external, which makes this so much easier! The first thing to do is copy the orginal drain list files (and all other files in the external directory) to a safe place:", "ext_path = os.path.join(ml.model_ws,\"ref\")\next_files = [f for f in os.listdir(ext_path)]\ndrain_files = [f for f in ext_files if \"drn\" in f.lower()]\n#print(drain_files)\nassert len(drain_files) == ml.nper,\"{0},{1}\".format(len(drain_files),ml.nper)\nbak_path = os.path.join(ml.model_ws,\"bak\")\nif os.path.exists(bak_path):\n shutil.rmtree(bak_path)\nos.mkdir(bak_path)\nfor f in ext_files:\n shutil.copy2(os.path.join(ext_path,f),os.path.join(bak_path,f))\n#assert len(os.listdir(bak_path)) == ml.nper", "Now all we need to do is write a template file. We will also write a generic cooresponding input file that will make testing easier later", "drn_df = pd.read_csv(os.path.join(bak_path,drain_files[0]),\n header=None,names=[\"l\",\"r\",\"c\",\"stage\",\"cond\"],\n delim_whitespace=True)\nf_tpl = open(os.path.join(ml.model_ws,\"drain_mlt.dat.tpl\"),'w')\nf_in = open(os.path.join(ml.model_ws,\"drain_mlt.dat\"),'w')\nf_tpl.write(\"ptf ~\\n\")\n#build parameter names from model cell info\ndrn_df.loc[:,\"parnme\"] = drn_df.apply(lambda x: \"drn_i{1:02.0f}j{2:02.0f}\".format(x.l-1,x.r-1,x.c-1),axis=1)\nfor parnme in drn_df.parnme:\n f_tpl.write(\"{0} ~ {0} ~\\n\".format(parnme))\n f_in.write(\"{0} 1.0\\n\".format(parnme))\nf_tpl.close()\nf_in.close()", "Building the pest control file...Finally!\nHere we will use the template and instruction files to construct a control file. Then we will use some pandas magic to set the appropriate parameter and observation info", "tpl_files = [os.path.join(ml.model_ws,f) for f in os.listdir(ml.model_ws) if f.endswith(\".tpl\")]\ninput_files = [f.replace(\".tpl\",'') for f in tpl_files]\ntpl_files", "See why it is important to use a consistent naming structure for the templates-input file pairs? Its the same for the instruction files", "ins_files = [os.path.join(ml.model_ws,f) for f in os.listdir(ml.model_ws) if f.endswith(\".ins\")]\noutput_files = [f.replace(\".ins\",'') for f in ins_files]\nins_files", "Now use these files to get a pyemu.Pst instance. This object has lots of cool functionality...", "pst = pyemu.Pst.from_io_files(tpl_files,input_files,ins_files,output_files)", "Let's look at some of the important parts of the Pst class. First, all attributes coorespond to the names in list in the pest manual. For instance, the * parameter data section of the control file is a pandas.DataFrame attribute named parameter_data:", "pst.parameter_data.head()", "We see that the columns of the DataFrame follow the pest naming conventions. Its the same for * observation data:", "pst.observation_data.head()", "What pyemu has set as the obsval is the simulated equivalent, if it is available - in the pst_from_io_files() helper, pyemu tries to call inschek, and, if successful, loads the output files from inschek. This can be very handy for error checking in the forward-run process. However, we still need to get the actual observed data into obsval...remember that dataframe from hob processing?", "hob_df.head()", "Notice the obsval column? Let's just set the index of this dataframe to obsnme, then pandas does the hard work for us:", "hob_df.index = hob_df.obsnme\nhob_df.head()\n\npst.observation_data.loc[hob_df.index,\"obsval\"] = hob_df.obsval\npst.observation_data.loc[hob_df.index,:].head()", "BOOM! that was easy...trying doing that without pandas....not fun!\nWe still have a few more items to set to specific values. The biggest one is initial values for parameters - they are given default values of 1.0:", "pst.parameter_data.head()", "Luckily, pandas makes this very easy. For example, let's set the DRN conductance parameters to have initial values of mean of the values in the model currently:", "avg = ml.drn.stress_period_data[0][\"cond\"].mean()\npar = pst.parameter_data #just a pointer to the full, long-named attribute\ndrn_pars = par.loc[par.parnme.apply(lambda x: x.startswith(\"drn\")),\"parnme\"].values\npar.loc[drn_pars,\"parval1\"] = avg\n#set the par group to mean something\npar.loc[drn_pars,\"pargp\"] = \"drn_cond\"\npar.loc[drn_pars,\"parubnd\"] = avg * 10.0\npar.loc[drn_pars,\"parlbnd\"] = avg * 0.1", "Let's set the pargp for the remaining parameters using that cool pilot point dataframe from eariler...", "par.loc[pp_df.parnme,\"pargp\"] = pp_df.pargp", "We need to reset the model run command:", "pst.model_command", "that is just a generic command. I prefer to use python scripts for this:", "pst.model_command = [\"python forward_run.py\"]", "Let's save this version of the control file", "pst.write(os.path.join(ml.model_ws,\"pest.pst\"))", "But this means we need to write forward_run.py and it needs to perform several actions:\n- apply kriging factors (using pyemu.gw_utils.fac2real())\n- apply the drain multipliers\n- call MODFLOW\n- process the MODFLOW list file\nLucky for you, I already made this file....", "shutil.copy2(os.path.join(\"Freyberg_transient\",\"forward_run.py\"),os.path.join(ml.model_ws,\"forward_run.py\"))", "adding prior information\npyemu supports both zero-order (preferred value) and first-order (preferred difference) Tikhonov regularization. Let's set preferred value for the conductance parameters:", "pyemu.utils.helpers.zero_order_tikhonov(pst,par_groups=[\"drn_cond\"])\npst.prior_information.head()", "Now, let's set preferred difference equations for pilot point groups. We will use the Pearson coef as the weight...", "pp_groups = pp_df.groupby(\"pargp\").groups\nfor pargp,par_names in pp_groups.items():\n this_pp_df = pp_df.loc[par_names,:]\n cov = gs.covariance_matrix(this_pp_df.x,this_pp_df.y,this_pp_df.parnme)\n pyemu.helpers.first_order_pearson_tikhonov(pst,cov,reset=False,abs_drop_tol=0.2)\n\npst.prior_information\n\npst.control_data.pestmode = \"regularization\"", "setting PEST++ options\nSome things I like to add:", "pst.pestpp_options[\"svd_pack\"] = \"redsvd\"\n#pst.pestpp_options[\"forecasts\"] = ", "saving the new control file", "pst.write(\"freyberg_reg.pst\")" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
bmidgley/pi-learner
learner2.ipynb
apache-2.0
[ "Experiment\nDon't worry if you make a mistake in coding. The output below the code will be a helpful message trying to get you back on track. We'll have lots of opportunity to improve or occasionally break the code. For example, can you find and fix the problem in the code below that prevents it from running? Python tries to tell us as concisely as it can, but it sometimes comes off a little terse. Fix the error and hit shift and enter to run it again.\nIf you aren't familiar with python syntax, compare the if statement below with the for and proc statements in the code above to see what is missing.", "import datetime;\n\nif((datetime.datetime.today().hour) > 12)\n print(\"good afternoon\")\nelse:\n print(\"good morning\")", "Mousing around\nWhat more can we do with the environment? How about making buttons to turn the LED on or off:", "import RPi.GPIO as GPIO\nfrom ipywidgets import widgets\nfrom IPython.display import display\n\nprint(\"using Raspberry Pi board pin numbers\")\nGPIO.setmode(GPIO.BOARD)\nled_pin = 11\n\nprint(\"set up GPIO output channel\")\nGPIO.setwarnings(False)\nGPIO.setup(led_pin, GPIO.OUT)\n\ndef led_on(btn):\n print('on')\n GPIO.output(led_pin, GPIO.HIGH)\n \ndef led_off(btn):\n print('off')\n GPIO.output(led_pin, GPIO.LOW)\n \non_btn = widgets.Button(description=\"On!\")\non_btn.on_click(led_on)\noff_btn = widgets.Button(description=\"Off!\")\noff_btn.on_click(led_off)\ndisplay(on_btn)\ndisplay(off_btn)\n", "Watch the messages that appear under the code as you hit the buttons. You can change the messages if you want to. Edit the code above and hit shift and enter together to see how it works.\nGo on to Learner lesson 3" ]
[ "markdown", "code", "markdown", "code", "markdown" ]
andrescodas/casadi
docs/notebooks/demo.ipynb
lgpl-3.0
[ "CasADi demo\nWhat is CasADi?\n\nA tool for quick & efficient implementation of algorithms for dynamic optimization\nOpen source, LGPL-licensed, <a href=\"http://casadi.org\">casadi.org</a>\nC++ / C++11\nInterfaces to Python, Haskell, (Matlab?)\nNumerical backends: <a href=\"https://projects.coin-or.org/Ipopt\">IPOPT</a>, <a href=\"http://computation.llnl.gov/casc/sundials/main.html\">Sundials</a>, ...\nDevelopers in group of Moritz Diehl:\nJoel Andersson\nJoris Gillis\nGreg Horn\n\nOutline of demo\n\nScalar expression (SX) graphs\nFunctions of SX graphs\nMatrices of scalar expressions\nAutomatic differentiation (AD)\nIntegrators\nMatrix expression (MX) graphs\nFunctions of MX graphs\nSolving an optimal control problem\n\nScalar expression (SX) graphs", "from pylab import *\nfrom casadi import *\nfrom casadi.tools import * # for dotdraw\n%matplotlib inline\n\nx = SX.sym(\"x\") # scalar symbolic primitives\ny = SX.sym(\"y\")\n\nz = x*sin(x+y) # common mathematical operators\n\nprint z\n\ndotdraw(z,direction=\"BT\")\n\nJ = jacobian(z,x)\nprint J\n\ndotdraw(J,direction=\"BT\")", "Note 1: subexpressions are shared.\nGraph $\\leftrightarrow$ Tree\nDifferent from Maple, Matlab symbolic, sympy, ...\n\nA (very) little bit of Computer Algebra", "print x*y/x-y\n\nH = hessian(z,x)\nprint H", "Functions of SX graphs\nSort graph into algorithm", "f = Function(\"f\",[x,y],[z])\n\nprint f", "Note 2: re-use of tape variables: live-variables", "print f(1.2,3.4)\n\nprint f(1.2,x+y)\n\nf.generate(\"f.c\")\nprint file(\"f.c\").read()", "Matrices of scalar expressions", "A = SX.sym(\"A\",3,3)\nB = SX.sym(\"B\",3)\nprint A\n\nprint solve(A,B)\n\nprint trace(A) # Trace\n\nprint mtimes(A,B) # Matrix multiplication\n\nprint norm_fro(A) # Frobenius norm\n\nprint A[2,:] # Slicing", "Rule 1: Everything is a matrix", "print A.shape, z.shape\n\nI = SX.eye(3)\nprint I\n\nAk = kron(I,A)\nprint Ak", "Rule 1: Everything is a sparse matrix", "Ak.sparsity().spy()\n\nA.sparsity().spy()\n\nz.sparsity().spy()", "Automatic differentiation (AD)\nConsider an ode:\n\\begin{equation}\n\\dot{p} = (1 - q^2)p-q+u\n\\end{equation}\n\\begin{equation}\n\\dot{q} = p\n\\end{equation}\n\\begin{equation}\n\\dot{c} = p^2+q^2+u^2\n\\end{equation}", "t = SX.sym(\"t\") # time\nu = SX.sym(\"u\") # control\np = SX.sym(\"p\")\nq = SX.sym(\"q\")\nc = SX.sym(\"c\")\nx = vertcat(p,q,c) # state\n\node = vertcat((1 - q**2)*p - q + u, p, p**2+q**2+u**2)\nprint ode, ode.shape\n\nJ = jacobian(ode,x)\nprint J\n\nf = Function(\"f\",[t,u,x],[ode])\n\nffwd = f.forward(1)\n\nfadj = f.reverse(1)\n\n# side-by-side printing\nprint '{:*^24} || {:*^28} || {:*^28}'.format(\"f\",\"ffwd\",\"fadj\")\ndef short(f):\n import re\n return re.sub(r\", a\\.k\\.a\\. \\\"(\\w+)\\\"\",r\". \\1\",str(f).replace(\", No description available\",\"\").replace(\"Input \",\"\").replace(\"Output \",\"\"))\nfor l in zip(short(f).split(\"\\n\"),short(ffwd).split(\"\\n\"),short(fadj).split(\"\\n\")):\n print '{:<24} || {:<28} || {:<28}'.format(*l)", "Performing forward sweeps gives the columns of J", "print I\n\nfor i in range(3):\n print ffwd(t,u,x, ode, 0,0,I[:,i])\n\nprint J", "Performing adjoint sweeps gives the rows of J", "for i in range(3):\n print fadj(t,u,x, ode, I[:,i])[2]", "Often, you can do better than slicing with unit vectors\n\nNote 3: CasADi does graph coloring for efficient sparse jacobians\n\nIntegrators\n$\\dot{x}=f(x,u,t)$ with $x = [p,q,c]^T$", "f = {'x':x,'t':t,'p':u,'ode':ode}", "Construct an integrating block $x_{k+1} = \\Phi(f;\\Delta t;x_k,u_k)$", "tf = 10.0\nN = 20\ndt = tf/N\n\nPhi = integrator(\"Phi\",\"cvodes\",f,{\"tf\":dt})\n\nx0 = DM([0,1,0])\n\nprint Phi(x0=x0)\n\nx = x0\nxs = [x]\n\nfor i in range(N):\n x = Phi(x0=x)[\"xf\"]\n \n xs.append(x)\n\nplot(horzcat(*xs).T)\nlegend([\"p\",\"q\",\"c\"])", "Rule 2: Everything is a Function (see http://docs.casadi.org)\n\nMatrix expression (MX) graphs\n\nNote 4: this is what makes CasADi stand out among AD tools\n\nRecall", "n = 3\n\nA = SX.sym(\"A\",n,n)\nB = SX.sym(\"B\",n,n)\nC = mtimes(A,B)\nprint C\ndotdraw(C,direction='BT')", "What if you don't want to expand into scalar operations? ( avoid $O(n^3)$ storage)", "A = MX.sym(\"A\",n,n)\nB = MX.sym(\"B\",n,n)\nC = mtimes(A,B)\nprint C\ndotdraw(C,direction='BT')", "What if you cannot expand into matrix operations? ( numerical algorithm )", "C = solve(A,B)\nprint C\ndotdraw(C,direction='BT')\n\nX0 = MX.sym(\"x\",3)\n\nXF = Phi(x0=X0)[\"xf\"]\nprint XF\n\nexpr = sin(XF)+X0\ndotdraw(expr,direction='BT')", "Functions of MX graphs", "F = Function(\"F\",[X0],[ expr ])\nprint F\n\nprint F(x0)\n\nJ = F.jacobian()\n\nprint J(x0)", "This shows how an integrator-call can be embedded in matrix graph.\nMore possibilities: external compiled library, a call to Matlab/Scipy\nSolving an optimal control problem\n\\begin{equation}\n\\begin{array}{cl}\n\\underset{p(.),q(.),u(.)}{\\text{minimize}} & \\displaystyle \\int_{0}^{T}{ p(t)^2 + q(t)^2 + u(t)^2 dt} \\\\\n\\text{subject to}\n& \\dot{p} = (1 - q^2)p-q+u \\\\\n& \\dot{q} = p \\\\\n& p(0) = 0, q(0) = 1 \\\\\n&-1 \\le u(t) \\le 1\n\\end{array}\n\\end{equation}\nRemember, $\\dot{x}=f(x,u,t)$ with $x = [p,q,c]^T$\n\\begin{equation}\n\\begin{array}{cl}\n\\underset{x(.),u(.)}{\\text{minimize}} & c(T) \\\\\n\\text{subject to}\n& \\dot{x} = f(x,u) \\\\\n& p(0) = 0, q(0) = 1, c(0)= 0 \\\\\n&-1 \\le u(t) \\le 1\n\\end{array}\n\\end{equation}\nDiscretization with multiple shooting\n\\begin{equation}\n\\begin{array}{cl}\n\\underset{x_{\\bullet},u_{\\bullet}}{\\text{minimize}} & c_N \\\\\n\\text{subject to}\n& x_{k+1} - \\Phi(x_k,u_k) = 0 , \\quad \\quad k = 0,1,\\ldots, (N-1) \\\\\n& p_0 = 0, q_0 = 1, c_0 = 0 \\\\\n&-1 \\le u_k \\le 1 , \\quad \\quad k = 0,1,\\ldots, (N-1) \n\\end{array}\n\\end{equation}\nCast as NLP\n\\begin{equation}\n\\begin{array}{cl}\n\\underset{X}{\\text{minimize}} & F(X,P) \\\\\n\\text{subject to}\n& \\text{lbx} \\le X \\le \\text{ubx} \\\\\n& \\text{lbg} \\le G(X,P) \\le \\text{ubg} \\\\\n\\end{array}\n\\end{equation}", "X = struct_symMX([\n (\n entry(\"x\", repeat=N+1, struct=struct([\"p\",\"q\",\"c\"]) ),\n entry(\"u\", repeat=N)\n )\n ])", "X is a symbolic matrix primitive, but with fancier indexing", "print X.shape\nprint (N+1)*3+N", "Demo: $\\Phi(x_0,u_0)$", "Xf = Phi( x0=X[\"x\",0],p=X[\"u\",0] )[\"xf\"]\n\nprint Xf", "$ x_{k+1} - \\Phi(x_k,u_k) = 0 , \\quad \\quad k = 0,1,\\ldots, (N-1)$", "g = [] # List of constraint expressions\n\nfor k in range(N):\n Xf = Phi( x0=X[\"x\",k],p=X[\"u\",k] )[\"xf\"]\n g.append( X[\"x\",k+1]-Xf )\n\nobj = X[\"x\",N,\"c\"] # c_N\n\nnlp = dict(x=X, g=vertcat(*g),f=obj)\n\nprint nlp", "Block structure in the constraint Jacobian", "jacG = jacobian(nlp[\"g\"],nlp[\"x\"])\n\nS = jacG.sparsity()\n\nprint S.shape\n\nDM.ones(S)[:20,:20].sparsity().spy()", "Recall\n\\begin{equation}\n\\begin{array}{cl}\n\\underset{X}{\\text{minimize}} & F(X,P) \\\\\n\\text{subject to}\n& \\text{lbx} \\le X \\le \\text{ubx} \\\\\n& \\text{lbg} \\le G(X,P) \\le \\text{ubg} \\\\\n\\end{array}\n\\end{equation}", "solver = nlpsol(\"solver\",\"ipopt\",nlp)\n\nlbx = X(-inf)\nubx = X(inf)\n\nlbx[\"u\",:] = -1; ubx[\"u\",:] = 1 # -1 <= u(t) <= 1\n\nlbx[\"x\",0] = ubx[\"x\",0] = x0 # Initial condition\n\nsol_out = solver(\n lbg = 0, # Equality constraints for shooting constraints\n ubg = 0, # 0 <= g <= 0\n lbx = lbx,\n ubx = ubx)\n\nprint sol_out[\"x\"]\n\nsol = X(sol_out[\"x\"])\n\nplot(horzcat(*sol[\"x\",:]).T)\n\nstep(range(N),sol[\"u\",:])", "Wrapping up\nShowcase: kite-power optimization by Greg Horn, using CasADi backend", "from IPython.display import YouTubeVideo\nYouTubeVideo('tmjIBpb43j0')\n\n\n\nYouTubeVideo('SW6ZJzcMWAk')", "Distinction with other software:\n<table>\n <tr>\n <th>ACADOtoolkit</th><th>CasADi</th>\n </tr>\n <tr>\n <td><ul><li>Black-box solver</li><li>Standard-form OCP</li><li>Good at small-scale real-time NMPC</li><li>Easy to get started</li></ul></td>\n <td><ul><li>Write your own solver using a pool of building-blocks</li><li>No limitations on formulation</li><li>Good at large-scale OCP</li><li>Easy to extend</li></ul></td>\n </tr>\n</table>\n\n<table>\n <tr>\n <th>Other operator-overloading AD tools</th><th>CasADi</th>\n </tr>\n <tr>\n <td><ul><li>Scalar graphs only, checkpointing</li></ul></td>\n <td><ul><li>Scalar and matrix graphs</li><li>Batteries included: Ipopt, Sundials</li></ul></td>\n </tr>\n</table>\n\nClosest similarity: AMPL" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
Aroogz/Introduction-to-Data-Science-in-Python
Week 2/Assignment+2.ipynb
mit
[ "You are currently looking at version 1.2 of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the Jupyter Notebook FAQ course resource.\n\nAssignment 2 - Pandas Introduction\nAll questions are weighted the same in this assignment.\nPart 1\nThe following code loads the olympics dataset (olympics.csv), which was derrived from the Wikipedia entry on All Time Olympic Games Medals, and does some basic data cleaning. \nThe columns are organized as # of Summer games, Summer medals, # of Winter games, Winter medals, total # number of games, total # of medals. Use this dataset to answer the questions below.", "import pandas as pd\n\ndf = pd.read_csv('olympics.csv', index_col=0, skiprows=1)\n\nfor col in df.columns:\n if col[:2]=='01':\n df.rename(columns={col:'Gold'+col[4:]}, inplace=True)\n if col[:2]=='02':\n df.rename(columns={col:'Silver'+col[4:]}, inplace=True)\n if col[:2]=='03':\n df.rename(columns={col:'Bronze'+col[4:]}, inplace=True)\n if col[:1]=='№':\n df.rename(columns={col:'#'+col[1:]}, inplace=True)\n\nnames_ids = df.index.str.split('\\s\\(') # split the index by '('\n\ndf.index = names_ids.str[0] # the [0] element is the country name (new index) \ndf['ID'] = names_ids.str[1].str[:3] # the [1] element is the abbreviation or ID (take first 3 characters from that)\n\ndf = df.drop('Totals')\ndf.head()", "Question 0 (Example)\nWhat is the first country in df?\nThis function should return a Series.", "# You should write your whole answer within the function provided. The autograder will call\n# this function and compare the return value against the correct solution value\ndef answer_zero():\n # This function returns the row for Afghanistan, which is a Series object. The assignment\n # question description will tell you the general format the autograder is expecting\n return df.iloc[0]\n\n# You can examine what your function returns by calling it in the cell. If you have questions\n# about the assignment formats, check out the discussion forums for any FAQs\nanswer_zero() ", "Question 1\nWhich country has won the most gold medals in summer games?\nThis function should return a single string value.", "def answer_one():\n maxim = df['Gold'].max()\n most_gold = df.index[df['Gold'] == maxim]\n return most_gold[0]\nanswer_one()", "Question 2\nWhich country had the biggest difference between their summer and winter gold medal counts?\nThis function should return a single string value.", "def answer_two():\n gold_sum_win_diff = abs(df['Gold'] - df['Gold.1'])\n biggest_idx = gold_sum_win_diff.idxmax()\n \n return biggest_idx\n\nanswer_two()", "Question 3\nWhich country has the biggest difference between their summer gold medal counts and winter gold medal counts relative to their total gold medal count? \n$$\\frac{Summer~Gold - Winter~Gold}{Total~Gold}$$\nOnly include countries that have won at least 1 gold in both summer and winter.\nThis function should return a single string value.", "def answer_three():\n new_df = df[(df['Gold'] > 0) & (df['Gold.1'] > 0)]\n summer_gold = new_df['Gold']\n wint_gold = new_df['Gold.1']\n tot_gold = new_df['Gold.2']\n measure = (summer_gold - wint_gold)/tot_gold\n return measure.idxmax()\n\nanswer_three()\n\ndf.shape", "Question 4\nWrite a function that creates a Series called \"Points\" which is a weighted value where each gold medal (Gold.2) counts for 3 points, silver medals (Silver.2) for 2 points, and bronze medals (Bronze.2) for 1 point. The function should return only the column (a Series object) which you created.\nThis function should return a Series named Points of length 146", "def answer_four():\n gold_2_val = df['Gold.2']*3\n silver_2_val = df['Silver.2']*2\n bronze_2_val = df['Bronze.2']*1\n \n Points = pd.Series(gold_2_val+silver_2_val+bronze_2_val, name='Points')\n return Points\n\n#answer_four()", "Part 2\nFor the next set of questions, we will be using census data from the United States Census Bureau. Counties are political and geographic subdivisions of states in the United States. This dataset contains population data for counties and states in the US from 2010 to 2015. See this document for a description of the variable names.\nThe census dataset (census.csv) should be loaded as census_df. Answer questions using this as appropriate.\nQuestion 5\nWhich state has the most counties in it? (hint: consider the sumlevel key carefully! You'll need this for future questions too...)\nThis function should return a single string value.", "census_df = pd.read_csv('census.csv', encoding='iso-8859-1')\ncensus_df.head()\n\ndf_level = census_df[census_df['SUMLEV'] == 50]\ngroup = df_level.groupby(['STNAME']).size()\n#group\n\n\ndef answer_five():\n df_level = census_df[census_df['SUMLEV'] == 50]\n group = df_level.groupby(['STNAME']).size().reset_index(name='freq')\n \n max_count_state = group['freq'].idxmax()\n return group['STNAME'][max_count_state]\n\nanswer_five()", "Question 6\nOnly looking at the three most populous counties for each state, what are the three most populous states (in order of highest population to lowest population)? Use CENSUS2010POP.\nThis function should return a list of string values.", "def answer_six():\n df_level = census_df[census_df['SUMLEV'] == 50]\n top_count = df_level.sort_values(by=['STNAME', 'CENSUS2010POP'], ascending=False)\n top_count = top_count.groupby('STNAME').head(3) #based on first 3 for each state\n\n maxim = top_count.groupby('STNAME').sum()\n maxim = maxim.sort_values(by='CENSUS2010POP', ascending=False).head(3)\n\n return maxim.index.tolist()\nanswer_six()\n\ndf_county = census_df[census_df['SUMLEV'] == 50]\ndf_county['max'] = df_county[['STNAME','CTYNAME', 'POPESTIMATE2015',\n 'POPESTIMATE2014', 'POPESTIMATE2013',\n 'POPESTIMATE2012', 'POPESTIMATE2011',\n 'POPESTIMATE2010']].max(axis=1)\n\ndf_county['min'] = df_county[['STNAME','CTYNAME', 'POPESTIMATE2015',\n 'POPESTIMATE2014', 'POPESTIMATE2013',\n 'POPESTIMATE2012', 'POPESTIMATE2011',\n 'POPESTIMATE2010']].min(axis=1)\n\ndf_county['max_diff'] = abs(df_county['max'] - df_county['min'])\ndf_county.sort_values(by='max_diff', ascending=False)\n\ndf_county.head()", "Question 7\nWhich county has had the largest absolute change in population within the period 2010-2015? (Hint: population values are stored in columns POPESTIMATE2010 through POPESTIMATE2015, you need to consider all six columns.)\ne.g. If County Population in the 5 year period is 100, 120, 80, 105, 100, 130, then its largest change in the period would be |130-80| = 50.\nThis function should return a single string value.", "def answer_seven():\n df_county = census_df[census_df['SUMLEV'] == 50]\n df_county['max'] = df_county[['STNAME','CTYNAME', 'POPESTIMATE2015',\n 'POPESTIMATE2014', 'POPESTIMATE2013',\n 'POPESTIMATE2012', 'POPESTIMATE2011',\n 'POPESTIMATE2010']].max(axis=1)\n\n df_county['min'] = df_county[['STNAME','CTYNAME', 'POPESTIMATE2015',\n 'POPESTIMATE2014', 'POPESTIMATE2013',\n 'POPESTIMATE2012', 'POPESTIMATE2011',\n 'POPESTIMATE2010']].min(axis=1)\n\n df_county['max_diff'] = abs(df_county['max'] - df_county['min'])\n df_county.sort_values(by='max_diff', ascending=False)\n\n \n \n return df_county['CTYNAME'].iloc[0]\n\nanswer_seven()", "Question 8\nIn this datafile, the United States is broken up into four regions using the \"REGION\" column. \nCreate a query that finds the counties that belong to regions 1 or 2, whose name starts with 'Washington', and whose POPESTIMATE2015 was greater than their POPESTIMATE 2014.\nThis function should return a 5x2 DataFrame with the columns = ['STNAME', 'CTYNAME'] and the same index ID as the census_df (sorted ascending by index).", "def answer_eight():\n\n counties_df = census_df[census_df['SUMLEV'] == 50]\n ans = counties_df[((counties_df['REGION']==1)|(counties_df['REGION']==2))&(counties_df['CTYNAME']=='Washington County')&(counties_df['POPESTIMATE2015']>counties_df['POPESTIMATE2014'])][['STNAME','CTYNAME']]\n return ans\n\nanswer_eight()\n\n\ndef answer_seven():\n cty = census_df[census_df['SUMLEV'] == 50]\n cty['pop_change'] = abs(cty['POPESTIMATE2015'] - cty['POPESTIMATE2014'])+abs(cty['POPESTIMATE2014'] - cty['POPESTIMATE2013'])+abs(cty['POPESTIMATE2013'] - cty['POPESTIMATE2012'])+abs(cty['POPESTIMATE2012'] - cty['POPESTIMATE2011'])+abs(cty['POPESTIMATE2011'] - cty['POPESTIMATE2010'])\n maxim = max(cty['pop_change'])\n ans = cty['CTYNAME'][cty['pop_change']==maxim].tolist()\n return ans[0]\n\n\nanswer_seven()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
TaylorOshan/spint
tools/gitcount.ipynb
bsd-3-clause
[ "PySAL Change Log Statistics\nThis notebook generates the summary statistics for a package. \nIt assumes you are running this under the tools directory at the toplevel of the package\nChange the values only in the next cell", "package_name = 'spint'\nrelease_date = '2020-09-08'\nstart_date = '2019-07-22'", "This notebook will generate a file in the current directory with the name \"changelog_VERSION.md\". You can edit and append this on front of the CHANGELOG file for the package release.", "from __future__ import print_function\nimport os\nimport json\nimport re\nimport sys\nimport pandas\n\nfrom datetime import datetime, timedelta\nfrom time import sleep\nfrom subprocess import check_output\ntry:\n from urllib import urlopen\nexcept:\n from urllib.request import urlopen\n\nimport ssl\nimport yaml\n\ncontext = ssl._create_unverified_context()\n\n\nCWD = os.path.abspath(os.path.curdir)\n\nCWD\n\nsince_date = '--since=\"{start}\"'.format(start=start_date)\nsince_date\nsince = datetime.strptime(start_date+\" 0:0:0\", \"%Y-%m-%d %H:%M:%S\")\nsince\n\n# get __version__\nf = \"../{package}/__init__.py\".format(package=package_name)\n\nwith open(f, 'r') as initfile:\n exec(initfile.readline())\n ", "Total commits by subpackage", "cmd = ['git', 'log', '--oneline', since_date]\nncommits = len(check_output(cmd).splitlines())\n\nncommits", "List Contributors\nSome of our contributors have many aliases for the same identity. So, we've added a mapping to make sure that individuals are listed once (and only once).", "identities = {'Levi John Wolf': ('ljwolf', 'Levi John Wolf'),\n 'Serge Rey': ('Serge Rey', 'Sergio Rey', 'sjsrey', 'serge'),\n 'Wei Kang': ('Wei Kang', 'weikang9009'),\n 'Dani Arribas-Bel': ('Dani Arribas-Bel', 'darribas')\n}\n\ndef regularize_identity(string):\n string = string.decode()\n for name, aliases in identities.items():\n for alias in aliases:\n if alias in string:\n string = string.replace(alias, name)\n if len(string.split(' '))>1:\n string = string.title()\n return string.lstrip('* ')\n\nauthor_cmd = ['git', 'log', '--format=* %aN', since_date]\n\nfrom collections import Counter\n\n\nncommits = len(check_output(cmd).splitlines())\nall_authors = check_output(author_cmd).splitlines()\ncounter = Counter([regularize_identity(author) for author in all_authors])\n# global_counter += counter\n# counters.update({'.'.join((package,subpackage)): counter})\nunique_authors = sorted(set(all_authors))\n\n\nunique_authors = counter.keys()\n\nunique_authors", "Disaggregate by PR, Issue", "from datetime import datetime, timedelta\nISO8601 = \"%Y-%m-%dT%H:%M:%SZ\"\nPER_PAGE = 100\nelement_pat = re.compile(r'<(.+?)>')\nrel_pat = re.compile(r'rel=[\\'\"](\\w+)[\\'\"]')\n\n\n\ndef parse_link_header(headers):\n link_s = headers.get('link', '')\n urls = element_pat.findall(link_s)\n rels = rel_pat.findall(link_s)\n d = {}\n for rel,url in zip(rels, urls):\n d[rel] = url\n return d\n\ndef get_paged_request(url):\n \"\"\"get a full list, handling APIv3's paging\"\"\"\n results = []\n while url:\n #print(\"fetching %s\" % url, file=sys.stderr)\n f = urlopen(url)\n results.extend(json.load(f))\n links = parse_link_header(f.headers)\n url = links.get('next')\n return results\n\ndef get_issues(project=\"pysal/pysal\", state=\"closed\", pulls=False):\n \"\"\"Get a list of the issues from the Github API.\"\"\"\n which = 'pulls' if pulls else 'issues'\n url = \"https://api.github.com/repos/%s/%s?state=%s&per_page=%i\" % (project, which, state, PER_PAGE)\n return get_paged_request(url)\n\n\ndef _parse_datetime(s):\n \"\"\"Parse dates in the format returned by the Github API.\"\"\"\n if s:\n return datetime.strptime(s, ISO8601)\n else:\n return datetime.fromtimestamp(0)\n\n\ndef issues2dict(issues):\n \"\"\"Convert a list of issues to a dict, keyed by issue number.\"\"\"\n idict = {}\n for i in issues:\n idict[i['number']] = i\n return idict\n\n\ndef is_pull_request(issue):\n \"\"\"Return True if the given issue is a pull request.\"\"\"\n return 'pull_request_url' in issue\n\n\ndef issues_closed_since(period=timedelta(days=365), project=\"pysal/pysal\", pulls=False):\n \"\"\"Get all issues closed since a particular point in time. period\ncan either be a datetime object, or a timedelta object. In the\nlatter case, it is used as a time before the present.\"\"\"\n\n which = 'pulls' if pulls else 'issues'\n\n if isinstance(period, timedelta):\n period = datetime.now() - period\n url = \"https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i\" % (project, which, period.strftime(ISO8601), PER_PAGE)\n allclosed = get_paged_request(url)\n # allclosed = get_issues(project=project, state='closed', pulls=pulls, since=period)\n filtered = [i for i in allclosed if _parse_datetime(i['closed_at']) > period]\n\n # exclude rejected PRs\n if pulls:\n filtered = [ pr for pr in filtered if pr['merged_at'] ]\n\n return filtered\n\n\ndef sorted_by_field(issues, field='closed_at', reverse=False):\n \"\"\"Return a list of issues sorted by closing date date.\"\"\"\n return sorted(issues, key = lambda i:i[field], reverse=reverse)\n\n\ndef report(issues, show_urls=False):\n \"\"\"Summary report about a list of issues, printing number and title.\n \"\"\"\n # titles may have unicode in them, so we must encode everything below\n if show_urls:\n for i in issues:\n role = 'ghpull' if 'merged_at' in i else 'ghissue'\n print('* :%s:`%d`: %s' % (role, i['number'],\n i['title'].encode('utf-8')))\n else:\n for i in issues:\n print('* %d: %s' % (i['number'], i['title'].encode('utf-8')))\n\n\n\n\nall_issues = {}\nall_pulls = {}\ntotal_commits = 0\n#prj='pysal/libpysal'\nprj = 'pysal/{package}'.format(package=package_name)\nissues = issues_closed_since(since, project=prj,pulls=False)\npulls = issues_closed_since(since, project=prj,pulls=True)\nissues = sorted_by_field(issues, reverse=True)\npulls = sorted_by_field(pulls, reverse=True)\nn_issues, n_pulls = map(len, (issues, pulls))\nn_total = n_issues + n_pulls\n\n\nissue_listing = []\nfor issue in issues:\n entry = \"{title} (#{number})\".format(title=issue['title'],number=issue['number'])\n issue_listing.append(entry)\n\npull_listing = []\nfor pull in pulls:\n entry = \"{title} (#{number})\".format(title=pull['title'],number=pull['number'])\n pull_listing.append(entry)\n\npull_listing\n\nmessage = \"We closed a total of {total} issues (enhancements and bug fixes) through {pr} pull requests\".format(total=n_total, pr=n_pulls)\n\nmessage = \"{msg}, since our last release on {previous}.\".format(msg=message, previous=str(start_date))\n\n\nmessage\n\nmessage += \"\\n\\n## Issues Closed\\n\"\n\nprint(message)\n\nissues = \"\\n\".join([\" - \"+issue for issue in issue_listing])\nmessage += issues\nmessage += \"\\n\\n## Pull Requests\\n\"\npulls = \"\\n\".join([\" - \"+pull for pull in pull_listing])\nmessage += pulls\n\nprint(message)\n\npeople = \"\\n\".join([\" - \"+person for person in unique_authors])\n\nprint(people)\n\nmessage +=\"\\n\\nThe following individuals contributed to this release:\\n\\n{people}\".format(people=people)\n\nprint(message)\n\nhead = \"# Changes\\n\\nVersion {version} ({release_date})\\n\\n\".format(version=__version__, release_date=release_date)\n\nprint(head+message)\n\noutfile = 'changelog_{version}.md'.format(version=__version__)\nwith open(outfile, 'w') as of:\n of.write(head+message)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
M-R-Houghton/euroscipy_2015
scikit_image/lectures/4_segmentation.ipynb
mit
[ "from __future__ import division, print_function\n%matplotlib inline", "Segmentation\nSegmentation is the division of an image into \"meaningful\" regions. If you've seen The Terminator, you've seen image segmentation:\n<img src=\"../2014-scipy/images/terminator-vision.png\" width=\"700px\"/>\nIn scikit-image, you can find segmentation functions in the segmentation package, with one exception: the watershed function is in morphology, because it's a bit of both. We'll use two algorithms, SLIC and watershed, and discuss applications of each.\nThere are two kinds of segmentation: contrast-based and boundary-based. The first is used when the regions of the image you are trying to divide have different characteristics, such as a red flower on a green background. The second is used when you want to segment an image in which borders between objects are prominent, but objects themselves are not very distinct. For example, a pile of oranges.\nImage types: contrast\nSLIC (Simple Linear Iterative Clustering) is a segmentation algorithm of the first kind: it clusters pixels in both space and color. Therefore, regions of space that are similar in color will end up in the same segment.\nLet's try to segment this image:\n<img src=\"../images/spice_1.jpg\" width=\"400px\"/>\n(Photo by Flickr user Clyde Robinson, used under CC-BY 2.0 license.)\nThe SLIC function takes two parameters: the desired number of segments, and the \"compactness\", which is the relative weighting of the space and color dimensions. The higher the compactness, the more \"square\" the returned segments.", "import numpy as np\nfrom matplotlib import pyplot as plt\n\nimport skdemo\nplt.rcParams['image.cmap'] = 'spectral'\nfrom skimage import io, segmentation as seg, color\n\nurl = '../images/spice_1.jpg'\nimage = io.imread(url)\n\nlabels = seg.slic(image, n_segments=18, compactness=10)\nskdemo.imshow_all(image, labels.astype(float) / labels.max())\nprint(labels)", "We can try to create a nicer visualization for labels: each segment will be represented by its average color.", "def mean_color(image, labels):\n out = np.zeros_like(image)\n for label in np.unique(labels):\n indices = np.nonzero(labels == label)\n out[indices] = np.mean(image[indices], axis=0)\n return out\n\nskdemo.imshow_all(image, mean_color(image, labels))", "Notice that some spices are broken up into \"light\" and \"dark\" parts. We have multiple parameters to control this:\n\nenforce_connectivity: Do some post-processing so that small regions get merged to adjacent big regions.", "labels = seg.slic(image, n_segments=18, compactness=10,\n enforce_connectivity=True)\nlabel_image = mean_color(image, labels)\nskdemo.imshow_all(image, label_image)", "Yikes! It looks like a little too much merging went on! This is because of the intertwining of the labels. One way to avoid this is to blur the image before segmentation. Because this is such a common use-case, a Gaussian blur is included in SLIC--just pass in the sigma parameter:", "labels = seg.slic(image, n_segments=18, compactness=10,\n sigma=2, enforce_connectivity=True)\nlabel_image = mean_color(image, labels)\nskdemo.imshow_all(image, label_image)", "Getting there! But it looks like some regions are merged together. We can alleviate this by increasing the number of segments:", "labels = seg.slic(image, n_segments=24, compactness=10,\n sigma=2, enforce_connectivity=True)\nlabel_image = mean_color(image, labels)\nskdemo.imshow_all(image, label_image)", "That's looking pretty good! Some regions are still too squiggly though... Let's try jacking up the compactness:", "labels = seg.slic(image, n_segments=24, compactness=40,\n sigma=2, enforce_connectivity=True)\nlabel_image = mean_color(image, labels)\nskdemo.imshow_all(image, label_image)", "<span class=\"exercize\">SLIC explorer</span>\nWrite an interactive tool to explore the SLIC parameter space. A skeleton is\ngiven below.\n```python\nfrom IPython.html import widgets\ndef func(slider_kwarg=0.5, dropdown_kwarg='option0'):\n s = some_func(image, arg1=slider_kwarg, arg2=dropdown_kwarg)\n skdemo.imshow_all(image, s)\nwidgets.interact(func, slider_kwarg=(start, stop, step),\n dropdown_kwarg=['option0', 'option1'])\n```\n<span class=\"exercize\">Select the spices</span>\nTry segmenting the following image with a modification to the same tool:\n<img src=\"../images/spices.jpg\" width=\"400px\"/>\n\"Spices\" photo by Flickr user Riyaad Minty.\nhttps://www.flickr.com/photos/riym/3326786046\nUsed under the Creative Commons CC-BY 2.0 license.\nNote: this image is more challenging to segment because the color regions are different from one part of the image to the other. Try the slic_zero parameter in combination with different values for n_segments.", "url2 = '../images/spices.jpg'", "Image types: boundary images\nOften, the contrast between regions is not sufficient to distinguish them, but there is a clear boundary between the two. Using an edge detector on these images, followed by a watershed, often gives very good segmentation. For example, look at the output of the Sobel filter on the coins image:", "from skimage import data\nfrom skimage import filters\nfrom matplotlib import pyplot as plt, cm\n\ncoins = data.coins()\nedges = filters.sobel(coins)\n\nplt.imshow(edges, cmap='gray');", "The watershed algorithm finds the regions between these edges. It does so by envisioning the pixel intensity as height on a topographic map. It then \"floods\" the map from the bottom up, starting from seed points. These flood areas are called \"watershed basins\" and when they meet, they form the image segmentation.\nLet's look at a one-dimensional example:", "from skimage.morphology import watershed\nfrom scipy import ndimage as ndi\n\nx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])\ny = np.array([1, 0, 1, 2, 1, 3, 2, 0, 2, 4, 1, 0])\n\nseeds = ndi.label(y == 0)[0]\nseed_positions = np.argwhere(seeds)[:, 0]\n\nprint(\"Seeds:\", seeds)\nprint(\"Seed positions:\", seed_positions)\n\n# ------------------------------- #\nresult = watershed(y, seeds)\n# ------------------------------- #\n\n# You can ignore the code below--it's just\n# to make a pretty plot of the results.\nplt.figure(figsize=(10, 5))\nplt.plot(y, '-o', label='Image slice', linewidth=3)\nplt.plot(seed_positions, np.zeros_like(seed_positions), 'r^',\n label='Seeds', markersize=15)\n\nfor n, label in enumerate(np.unique(result)):\n mask = (result == label)\n plt.bar(x[mask][:-1], result[mask][:-1],\n width=1, label='Region %d' % n,\n alpha=0.1)\n\nplt.vlines(np.argwhere(np.diff(result)) + 0.5, -0.2, 4.1, 'm',\n linewidth=3, linestyle='--')\n\nplt.legend(loc='upper left', numpoints=1)\nplt.axis('off')\nplt.ylim(-0.2, 4.1);", "Answers the question: which seed flooded this point?\nLet's find some seeds for coins. First, we compute the distance transform of a thresholded version of edges:", "threshold = 0.4\n\n# Euclidean distance transform\n# How far do we ave to travel from a non-edge to find an edge?\nnon_edges = (edges < threshold)\ndistance_from_edge = ndi.distance_transform_edt(non_edges)\n\nplt.imshow(distance_from_edge, cmap='gray');", "Then, we find the peaks in that image--the background points furthest away from any edges--which will act as the seeds.", "from skimage import feature\n\n# -------------------------------------------------#\npeaks = feature.peak_local_max(distance_from_edge)\nprint(\"Peaks shape:\", peaks.shape)\n# -------------------------------------------------#\n\npeaks_image = np.zeros(coins.shape, np.bool)\npeaks_image[tuple(np.transpose(peaks))] = True\nseeds, num_seeds = ndi.label(peaks_image)\n\nplt.imshow(edges, cmap='gray')\nplt.plot(peaks[:, 1], peaks[:, 0], 'ro');\nplt.axis('image')", "We are now ready to perform the watershed:", "ws = watershed(edges, seeds)\n\nfrom skimage import color\nplt.imshow(color.label2rgb(ws, coins));", "Examining the resulting segmentation\nWe have more prior knowledge that we can include in this processing problem.\nFor one--the coins are round!", "from skimage.measure import regionprops\n\nregions = regionprops(ws)\n\nws_updated = ws.copy()\nfor region in regions:\n if region.eccentricity > 0.6:\n ws_updated[ws_updated == region.label] = 0\n\nplt.imshow(color.label2rgb(ws_updated, coins, bg_label=0));", "<span class=\"exercize\">Seeds of doubt</span>\nWe can see that watershed gives a very good segmentation, but some coins are missing. Why? Can you suggest better seed points for the watershed operation?\nDiscussion\nWatershed and SLIC are too simple to be used as final segmentation outputs. In fact, their output is often called a superpixel, a kind of minimal segment. These are then used for further processing. Downstream processing methods are slated to be added to scikit-image in the next version. See Vighnesh Birodkar's GSoC project and his recent (and excellent) PR. These are beyond the scope of this tutorial but come chat to me after if you are interested in segmentation!\n\n<div style=\"height: 400px;\"></div>", "%reload_ext load_style\n%load_style ../themes/tutorial.css" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
vitojph/kschool-nlp
notebooks-py3/vsm.ipynb
gpl-3.0
[ "Esquemas de pesado para representar documentos\nLos modelos de espacio vectorial (vector space models (VSMs)) permiten representar palabras o términos dentro de un espacio vectorial continuo, de manera que las palabras que son similares desde el punto semántico se situan en puntos cercanos dentro de ese espacio común.\n\nEl uso de distintas aproximaciones de modelos de espacio vectorial tiene una larga tradición en PLN. Todas ellas comparten una misma hipótesis distribucional: las palabras o términos que comparten contextos tienen significados similares.", "# corpus ficticio con tres documentos de la misma longitud \n# y sin repeticiones de términos dentro del mismo documento\n\n# cada doc es una lista de palabras\nd1 = 'los angeles times'.split()\nd2 = 'new york times'.split()\nd3 = 'new york post'.split()\n\n# nuestro corpus D es una lista de documentos\nD = [d1, d2, d3]\nprint(D)", "tf (term frequency)\ntf es el peso que indica la frecuencia de un término, es decir, el número de veces que una determinada palabra aparece en un documento. \nLa aproximación más sencilla consiste consiste en asignar como peso para el término $t$ en el documento $d$ del corpus $D$ (denotado como $\\mbox{tf}_{t,d}$) el número de ocurrencias de $t$ en $d$. Es recomendable normalizar esta frecuencia, diviendo el número de ocurrencias entre el número total de palabras de un documento, para no penalizar los documentos breves: $\\mathrm{tf}(t,d) = \\frac{\\mathrm{f}(t, d)}{\\max{\\mathrm{f}(w, d):w \\in d}}$\nVamos a calcularlo.\nCalculando tf (1er intento)", "# calculamos los valores de tf para cada término t y cada docID \n# como un diccionario de diccionarios, tal que tf[t][docID] = valor\n\ntf = {}\n\n# iteramos sobre los documentos del corpus\nfor d in D:\n # iteramos sobre las palabras del documento\n for t in d:\n # si no he visto el término t antes, creo la clave en tf\n if t not in tf:\n tf[t] = {}\n # ¿cuál es el doc que estoy procesando? \n docID = D.index(d) \n # asigno el valor de tf para el término t y el documento actual\n # (número de veces que aparece t dividido entre el número de palabras de d)\n tf[t][docID] = d.count(t) / len(d) \n \n \nprint(tf)", "La aproximación anterior, tal cual está programada, arma un diccionario de diccionarios pero tiene varias desventajas:\n\nno almaceno los valores de tf para aquellos documentos que no contienen ninguna ocurrencia de t. \nsi las claves de mi diccionario son números enteros correlativos, como es el caso, haría mejor en utilizar una estructura de datos ordenada: una lista.\n\nCalculando tf (2º intento)", "# calculamos los valores de tf para cada término t y cada docID \n# como un diccionario de listas, tal que tf[t][i] = valor\n\ntf = {}\n\n# primera iteración, creo el esqueleto del diccionario de listas \n# iteramos sobre los documentos del corpus\nfor d in D:\n # iteramos sobre las palabras del documento\n for t in d:\n # relleno todas las casillas con 0\n tf[t] = [0] * len(D)\n \nprint('tf solo contiene 0s') \nprint(tf)\n\n\n# segunda iteración, reasigno los valores sólo en aquellas posiciones donde sea necesario\n# iteramos sobre los documentos del corpus\nfor d in D:\n # iteramos sobre las palabras del documento\n for t in d:\n docID = D.index(d)\n tf[t][docID] = d.count(t) / len(d)\n \nprint('\\ntf contiene los valores de tf que corresponden') \nprint(tf)", "En el caso de este corpus ficticio, todos los valores de tf son, o bien 0 (si el término no aparece en el documento), o bien $1/3$ si aparece una sola vez.\nidf (inverse document frequency)\nTrabajar unicamente con las frecuencias de los términos conlleva un problema: todos los términos presentes en la colección se consideran igualmente relevantes a la hora de discriminar la relevancia de los documentos, atendiendo a sus frecuencias. Y resulta que esto no es verdad. \nImaginemos un corpus en el que la frecuencia total de dos términos concretos, este y fonema, es similar en términos absolutos. La distribución de estos términos a lo largo de la coleccion es seguramente muy diferente. El primero aparece con una distribución uniforme a lo largo del corpus, su capacidad discriminativa es baja y debería penalizarse a la hora de asignar relevancia (como el resto de stopwords). El segundo, por el contrario, se concentra principalmente en documentos que hablan de fonología, su capacidad discriminativa es alta y debería ser premiado.\nExisten mecanismos correctores para incorporar estas penalizaciones y premios en nuestros pesos. Los más habituales pasan por recurrir a la frecuencia de documento $\\mbox{df}_t$, definida como el número de documentos de la colección $D$ que contienen el término $t$: $\\mbox{df}_t = {|{d \\in D: t \\in d}|}$.\nMás concretamente, se calcula la frecuencia inversa de documento, o idf (inverse document frequency), definida como: $\\mbox{idf}_t = \\log {|D|\\over \\mbox{df}_t}$, donde $|D|$ indica el número total de documentos de nuestra colección. De este modo, el idf de un término específico pero muy discriminativo será alto, mientras que el de un término muy frecuente a lo largo de la coleccion será bajo.\nCalculando df", "# calculamos los valores de df para cada término t\ndf = {}\n\n# iteramos sobre los término del vocabulario\nfor t in tf:\n # reiniciamos los valores a 0\n df[t] = 0\n for d in D:\n # para cada documento d que contenga a t, sumamos +1 al df correspondiente\n if t in d:\n df[t] += 1\n\nprint(df)", "Los valores de df son números enteros: el número de documentos del corpus que contienen cada uno de los términos.\nCalculando idf", "import math\n\n# calculamos los valores de idf para cada término t\nidf = {}\n\n# iteramos sobre los término del vocabulario\nfor t in tf:\n idf[t] = math.log(len(D) / df[t])\n\nprint(idf)", "Fíjate cómo interpretamos estos valores. Los términos que aparecen en un solo documento, tienen un idf más alto, son mejores descriptores del contenido de esos documentos, tienen más poder para discriminar temáticas. Los términos que se distribuyen en varios documentos tienen un idf más bajo, son peores descriptores.\ntf.idf\ntd.idf (term frequency - inverse document frequency) es una medida numérica que expresa la relevancia de una palabra de un documento con respecto a una colección de documentos. Es uno de los esquemas de pesado más comunes en las tareas relacionadas con la recuperación de información y la minería de texto.\nEl objetivo de esta métrica es representar los documentos de texto como vectores, ignorando el orden concreto de las palabras pero manteniendo la información relativa a las frecuencias de aparición. \nEl valor de tf-idf de una palabra:\n\nes mayor cuanto más frecuente sea esta palabra dentro de un documento concreto, pero;\nes mayor cuando menos común sea la palabra en otros documentos de la colección.\n\nEstas dos características premian a los términos que son muy frecuentes en determinados documentos concretos pero poco comunes en general: estos términos pueden considerarse buenos descriptores de un conjunto de documentos. Y a la vez, penalizan aquellos términos que aparecen con mucha frecuencia a lo largo de toda la colección, como las stopwords.\nCalculando tf.idf\ntf.idf se calcula como el producto de dos términos: $\\mathrm{tf.idf}(t, d, D) = \\mathrm{tf}(t, d) \\times \\mathrm{idf}(t, D)$\n\n\nla frecuencia de un término (tf): el número de veces que una determinada palabra aparece en un documento. \n\n\nla frecuencia inversa de documento (idf): el logaritmo del número total de documentos en el corpus dividido entre el número de documentos en los que el término aparece.\n\n\nYa hemos calculado previamente esos valores. Bastará con realizar los productos.", "# calculamos los valores de tf.idf para cada término t y cada docID \n# como un diccionario de listas, tal que tfidf[t][i] = valor\ntfidf = {}\n\n# iteramos sobre los términos del vocabulario\nfor t in tf:\n tfidf[t] = [] # inicializamos con una lista vacía\n # iteramos sobre los valores de tf del término t\n for d in tf[t]:\n # añadimos el nuevo valor multiplicando tf * idf\n tfidf[t].append( d * idf[t])\n \nprint(tfidf)", "Repetimos el experimento con más documentos\nVamos a repetir todo lo visto hasta ahora en el cuaderno con otras colección ficticia de documentos.\nParte del código de las celdas anteriores lo voy a codificar como funciones, de manera que podamos ejecutar el cálculo de los distintos valores de manera más clara. \n¡Allá vamos!", "def calcula_tf(corpus):\n \"\"\"Calcula los valores de tf para cada término t de un corpus. \n Devuelve un diccionario de listas tf[t][docID] = valor\"\"\"\n import math\n tf = {}\n # primera iteración, creo el esqueleto del diccionario de listas \n # iteramos sobre los documentos del corpus\n for d in corpus:\n # iteramos sobre las palabras del documento\n for t in d:\n # rellenamos las casillas con casi el log de casi 0\n tf[t] = [math.log(0.00000001)] * len(D)\n \n # segunda iteración, reasigno los valores sólo en aquellas posiciones donde sea necesario\n # iteramos sobre los documentos del corpus\n for d in corpus:\n # iteramos sobre las palabras del documento\n for t in d:\n docID = corpus.index(d)\n tf[t][docID] = 1 + math.log(d.count(t) / len(d)) # log normalization\n \n return tf\n\ndef calcula_idf(vocabulario, corpus):\n \"\"\"Calcula los valores de idf para una lista de vocabulario y un corpus.\n Devuelve un diccionario idf[t] = valor\"\"\"\n import math \n # primero, calculamos los valores de df para cada término t\n df = {}\n # iteramos sobre los término del vocabulario\n for t in vocabulario:\n # reiniciamos los valores a 0\n df[t] = 0\n for d in corpus:\n # para cada documento d que contenga a t, sumamos +1 al df correspondiente\n if t in d:\n df[t] += 1\n\n # después, calculamos los valores de idf para cada término t\n idf = {}\n # iteramos sobre los término del vocabulario\n for t in vocabulario:\n idf[t] = math.log(len(corpus) / df[t])\n\n return idf\n\ndef calcula_tfidf(tf, idf):\n \"\"\"Calcula los valores de tf.idf para un diccionario de valores tf y otro de valores idf.\n Devuelve un diccionario de listas tfidf[t][i] = valor\n \"\"\"\n tfidf = {}\n # iteramos sobre los términos del vocabulario\n for t in tf:\n tfidf[t] = [] # inicializamos con una lista vacía\n # iteramos sobre los valores de tf del término t\n for d in tf[t]:\n # añadimos el nuevo valor multiplicando tf * idf\n tfidf[t].append( d * idf[t])\n\n return tfidf\n\n# construyo un nuevo corpus como una lista de docs, donde cada doc es una lista de palabras\n# https://www.goodreads.com/author/quotes/272231.Eminem\n\neminem_quotes = \"\"\"Love when spelled backwards and read phonetically reads evil|\nDon’t do drugs don’t have unprotected sex don’t be violent Leave that to me|\nIf you have enemies good that means you stood up for something|\nSomewhere deep down there's a decent man in me he just can't be found|\nI can't tell you what it really is I can only tell you what it feels like|\nBehind every sucessful person lies a pack of haters|\nSometimes I'm real cool but sometimes I could be a real asshole I think everyone is like that|\nLove is just a word but you bring it definition|\nDamn How much damage can you do with a pen|\nDon't let them say you ain't beautiful They can all get fucked just stay true to you|\nI come from Detroit where it's rough and I'm not a smooth talker|\nIf there's not drama and negativity in my life all my songs will be really wack and boring or something|\nI always wished for this but it's almost turning into more of a nightmare than a dream|\nDealing with backstabbers there was one thing I learned They're only powerful when you got your back turned|\nWhen I say I'll murder my baby's mother maybe I wanted to but I didn't Anybody who takes it literally is 10 times sicker than I am|\nWhen you're a little kid you don't see color and the fact that my friends were black never crossed my mind It never became an issue until I was a teenager and started trying to rap|\nIt sometimes feels like a strange movie you know it’s all so weird that sometimes I wonder if it is really happening|\nPersonally I just think rap music is the best thing out there period If you look at my deck in my car radio you're always going to find a hip-hop tape; that's all I buy that's all I live that's all I listen to that's all I love|\nI'm just a little bit sicker then the average individual I think|\nImma be what I set out to be without a doubt undoubtedly|\nThe truth is you don't know what is going to happen tomorrow Life is a crazy ride and nothing is guaranteed|\nYou'd have to walk a thousand miles in my shoes just to see what its like to be me|\nDon't let them tell you ain't beautiful|\nI act like shit don’t phase me inside it drives me crazy my insecurities could eat me alive|\nBut music is reflection of self we just explain it and then we get our checks in the mail|\nSometimes I feel like rap music is almost the key to stopping racism|\nI might talk about killing people but that doesn't mean I do it|\nBefore I was famous when I was just working in Gilbert's Lodge everything was moving in slow motion|\"\"\".split('|\\n')\n\n# nuestro corpus D es una lista de documentos\n# cada doc es una lista de palabras\nD = []\nfor quote in eminem_quotes: \n D.append( quote.lower().split() )", "Ahora sí lo probamos :-)", "print('Calculando los valores tf... ', end='')\ntf = calcula_tf(D)\nprint('¡ok!')\n\nprint('Calculando los valores idf... ', end='')\nidf = calcula_idf(tf.keys(), D)\nprint('¡ok!')\n\nprint('Calculando los valores tf.idf... ', end='')\ntfidf = calcula_tfidf(tf, idf)\nprint('¡ok!\\n\\n')\n\n# imprimimos los valores de algunos términos\nprint('love', tfidf['love'], '\\n')\nprint('the', tfidf['the'], '\\n')\nprint('backwards', tfidf['backwards'], '\\n')\nprint('killing', tfidf['killing'], '\\n')\n\nprint('Los valores tf.idf para cada término del vocabulario son:')\nfor t in tfidf:\n print(t, '=>')\n print(tfidf[t], '\\n\\n')", "Referencias\n\ntf-idf en Wikipedia\nThe Vector Space Model\ntf.idf with Google n-Grams and POS Tags\nVector Representations of Words" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
tensorflow/docs-l10n
site/ja/probability/examples/Undocumented_Infection_and_the_Dissemination_of_SARS-CoV2.ipynb
apache-2.0
[ "Copyright 2020 The TensorFlow Authors.\nCopyright 2020 Sen Pei (Columbia University).\nLicensed under the Apache License, Version 2.0 (the \"License\");", "#@title Licensed under the Apache License, Version 2.0 (the \"License\"); { display-mode: \"form\" }\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "報告されていない実質的な感染は、新型コロナウイルス(SARS-CoV2)を急速に拡散させる\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://www.tensorflow.org/probability/examples/Undocumented_Infection_and_the_Dissemination_of_SARS-CoV2\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\">TensorFlow.org で表示</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/probability/examples/Undocumented_Infection_and_the_Dissemination_of_SARS-CoV2.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\">Google Colab で実行</a></td>\n <td><a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ja/probability/examples/Undocumented_Infection_and_the_Dissemination_of_SARS-CoV2.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\">GitHub でソースを表示</a></td>\n <td><a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/probability/examples/Undocumented_Infection_and_the_Dissemination_of_SARS-CoV2.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\">ノートブックをダウンロード</a></td>\n</table>\n\nこのドキュメントでは、Li et al による 2020 年 3 月 16 日の同名の論文に基づいて TensorFlow Probability のモデルを作成します。TensorFlow Probability プラットフォームで本論文の著者の方法と結果を忠実に再現し、現代の疫学モデリングの設定における TFP の機能の一部を紹介します。TensorFlow を使用すると、元の Matlab コードに比べて約 10 倍スピードアップします。また、TensorFlow Probability はベクトル化されたバッチ計算を広くサポートしているため、数百の独立したレプリケーションに拡張できます。\n元の論文\nRuiyun Li, Sen Pei, Bin Chen, Yimeng Song, Tao Zhang, Wan Yang, and Jeffrey Shaman. Substantial undocumented infection facilitates the rapid dissemination of novel coronavirus (SARS-CoV2). (2020), doi: https://doi.org/10.1126/science.abb3221 .\n要旨: 「報告されていない新規コロナウイルス(SARS-CoV2)感染の有病率と伝染性の推定は、この病気の全体的な有病率とパンデミックの可能性を理解するために重要です。モビリティデータ、ネットワーク化された動的メタ個体群モデル、ベイズ推定と組み合わせて、中国内で報告された感染の観察結果を使用し、報告されていない感染の割合やその伝染性など、SARS-CoV2 に関連する重大な疫学的特性を推測します。2020 年 1 月 23 日の渡航禁止令以前は、すべての感染症の 86% が報告されていなかったと推定されます(95% CI: [82%–90%])。 一人当たり、報告されていない感染の伝播率は報告された感染の 55%([46%–62%])でしたが、その数が多いため、報告されていない感染が報告された症例の 79% の感染源でした。これらの調査結果は、SARS-CoV2 の急速な地理的な拡散を説明し、このウイルスの封じ込めが特に困難であることを示しています。」\nコードとデータへの Github リンク。\n概要\n本モデルは疾患のコンパートメントモデルであり、「感受性」、「曝露」(感染しているが他の人への感染性はない)、「報告されていない感染性」、および「最終的に報告された感染性」の区画があります。このモデルには、2 つの注目すべき点があります。375 の中国の都市ごとに別々の区画があり、人々が 1 つの都市から別の都市にどのように移動するかについての仮定があります。また、感染の報告が遅れるため、 $t$ 日目に「最終的に報告された感染性」になった場合は、確率的な後日まで、観察された症例と見なされません。\nこのモデルは、報告されていない症例は軽度なために報告されていない、そして、その場合、他の人への感染率が低いことを前提としています。元の論文での重要な主なパラメータは、報告されていない症例の割合です。これは、既存の感染の程度と報告されていない感染が病気の蔓延に及ぼす影響を推定するためです。\nこのコラボは、ボトムアップスタイルのコードウォークスルーとして構成されています。以下を見ていきます。\n\nデータを取り込んで簡単に調べる\nモデルの状態空間とダイナミクスを定義する\nLi et al に従って、モデルで推論を行うための一連の関数を構築する\nそれらを呼び出して、結果を調べます。 ネタバレ:結果は論文の結果と同じです。\n\nインストールと Python のインポート", "!pip3 install -q tf-nightly tfp-nightly\n\nimport collections\nimport io\nimport requests\nimport time\nimport zipfile\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\nfrom tensorflow_probability.python.internal import samplers\n\ntfd = tfp.distributions\ntfes = tfp.experimental.sequential", "データのインポート\ngithub からデータをインポートして、その一部を調べてみましょう。", "r = requests.get('https://raw.githubusercontent.com/SenPei-CU/COVID-19/master/Data.zip')\nz = zipfile.ZipFile(io.BytesIO(r.content))\nz.extractall('/tmp/')\nraw_incidence = pd.read_csv('/tmp/data/Incidence.csv')\nraw_mobility = pd.read_csv('/tmp/data/Mobility.csv')\nraw_population = pd.read_csv('/tmp/data/pop.csv')", "以下は、1 日あたりの生の発生数です。23 日に渡航禁止令が発令されたため、最初の 14 日間(1 月 10 日から 1 月 23 日)に最も関心があります。この論文では、1 月 10〜23 日と 1 月 23 日以降を別々に、さまざまなパラメータでモデル化しています。 ここでは期間を最初の期間に制限します。", "raw_incidence.drop('Date', axis=1) # The 'Date' column is all 1/18/21\n# Luckily the days are in order, starting on January 10th, 2020.", "武漢の発生数を確認します。", "plt.plot(raw_incidence.Wuhan, '.-')\nplt.title('Wuhan incidence counts over 1/10/20 - 02/08/20')\nplt.show()", "次に初期人口の数を確認します。", "raw_population", "また、どのエントリが武漢であるかを確認して記録します。", "raw_population['City'][169]\n\nWUHAN_IDX = 169", "ここに、異なる都市間のモビリティマトリックスが表示されます。これは、最初の 14 日間に異なる都市間を移動する人々の数のプロキシです。これは、2018 年の旧正月シーズンに Tencent から提供された GPS データから派生しています。Li et al は、2020 年シーズン中のモビリティを、未知の(推論の対象となる)定数係数 $\\theta$ にこれを掛けたものとしてモデル化します。", "raw_mobility", "最後に、これらすべてを前処理して、使えるように numpy 配列にします。", "# The given populations are only \"initial\" because of intercity mobility during\n# the holiday season.\ninitial_population = raw_population['Population'].to_numpy().astype(np.float32)", "モビリティデータを [L, L, T] 型のテンソルに変換します。ここで、L は位置の数、T は時間ステップの数です。", "daily_mobility_matrices = []\nfor i in range(1, 15):\n day_mobility = raw_mobility[raw_mobility['Day'] == i]\n \n # Make a matrix of daily mobilities.\n z = pd.crosstab(\n day_mobility.Origin, \n day_mobility.Destination, \n values=day_mobility['Mobility Index'], aggfunc='sum', dropna=False)\n \n # Include every city, even if there are no rows for some in the raw data on\n # some day. This uses the sort order of `raw_population`.\n z = z.reindex(index=raw_population['City'], columns=raw_population['City'], \n fill_value=0)\n # Finally, fill any missing entries with 0. This means no mobility.\n z = z.fillna(0)\n daily_mobility_matrices.append(z.to_numpy())\n\nmobility_matrix_over_time = np.stack(daily_mobility_matrices, axis=-1).astype(\n np.float32)", "最後に、観察された感染の [L, T] 表を作成します。", "# Remove the date parameter and take the first 14 days.\nobserved_daily_infectious_count = raw_incidence.to_numpy()[:14, 1:]\nobserved_daily_infectious_count = np.transpose(\n observed_daily_infectious_count).astype(np.float32)", "そして、希望する形状になったことを再確認します。都市数は 375、期間は 14 日間です。", "print('Mobility Matrix over time should have shape (375, 375, 14): {}'.format(\n mobility_matrix_over_time.shape))\nprint('Observed Infectious should have shape (375, 14): {}'.format(\n observed_daily_infectious_count.shape))\nprint('Initial population should have shape (375): {}'.format(\n initial_population.shape))", "状態とパラメータの定義\nモデルの定義を始めましょう。再現しているモデルは、SEIR モデルの変形です。ここでは、次の時変状態があります。\n\n$S$: 各都市の感染する可能性のある人の数。\n$E$: 各都市でこの病気に曝露しているがまだ感染性がない人の数。生物学的には、これは疾患することに相当し、すべての曝露された人々が最終的に感染性になります。\n$I^u$: 感染性があるが報告されていない各都市の人の数。モデルでは、これは実際には「報告されることがない」ことを意味します。\n$I^r$: 感染性があり、そのことが報告されている各都市の人の数。Li et al のモデルは遅延を報告しているため、$I^r$ は実際には、「症例は将来のある時点で報告されるほど深刻である」に対応します。\n\n以下に示すように、EAKF(Ensemble Adjustment Kalman Filter)を時間的に前方に実行することにより、これらの状態を推測します。EAKF の状態ベクトルは、これらの量ごとに 1 つの都市インデックス付きベクトルです。\nモデルには、次の推定可能なグローバルな時不変パラメータがあります。\n\n$\\beta$: 報告された感染者による感染率。\n$\\mu$: 報告されていない感染者による相対的な感染率。これは、$\\mu \\beta$ を掛けて取得します。\n$\\theta$: 都市間移動係数。これは、モビリティデータの過少報告(および2 018 年から 2020 年までの人口増加)を補正する 1 より大きい係数です。\n$Z$: 平均潜伏期間(すなわち、「曝露」状態の期間)。\n$\\alpha$: これは、(最終的に)報告されるのに十分なほど深刻な感染の割合です。\n$D$: 感染の平均期間(つまり、いずれかの「感染」状態の期間)。\n\n状態の EAKF のイテレーションフィルタリングループを使用して、これらのパラメータの点推定を推測します。\nまた、モデルは、推論されていない定数にも依存します。\n\n$M$: 都市間モビリティマトリックス。これは時変であり、与えられると推定します。前述のとおり、これは都市間の実際の人口移動を得るために、推測されたパラメータ $\\theta$ によりスケーリングされます。\n$N$: 各都市の総人数。 初期母集団は与えられたとし、母集団の時間変化は移動度数 $\\theta M$ から計算されます。\n\nまず、状態とパラメータを保持するためのデータ構造をいくつか作成します。", "SEIRComponents = collections.namedtuple(\n typename='SEIRComponents',\n field_names=[\n 'susceptible', # S\n 'exposed', # E\n 'documented_infectious', # I^r\n 'undocumented_infectious', # I^u\n # This is the count of new cases in the \"documented infectious\" compartment.\n # We need this because we will introduce a reporting delay, between a person\n # entering I^r and showing up in the observable case count data.\n # This can't be computed from the cumulative `documented_infectious` count,\n # because some portion of that population will move to the 'recovered'\n # state, which we aren't tracking explicitly.\n 'daily_new_documented_infectious'])\n\nModelParams = collections.namedtuple(\n typename='ModelParams',\n field_names=[\n 'documented_infectious_tx_rate', # Beta\n 'undocumented_infectious_tx_relative_rate', # Mu\n 'intercity_underreporting_factor', # Theta\n 'average_latency_period', # Z\n 'fraction_of_documented_infections', # Alpha\n 'average_infection_duration' # D\n ]\n)", "また、パラメータの値に対する Li et al の境界をコーディングします。", "PARAMETER_LOWER_BOUNDS = ModelParams(\n documented_infectious_tx_rate=0.8,\n undocumented_infectious_tx_relative_rate=0.2,\n intercity_underreporting_factor=1.,\n average_latency_period=2.,\n fraction_of_documented_infections=0.02,\n average_infection_duration=2.\n)\n\nPARAMETER_UPPER_BOUNDS = ModelParams(\n documented_infectious_tx_rate=1.5,\n undocumented_infectious_tx_relative_rate=1.,\n intercity_underreporting_factor=1.75,\n average_latency_period=5.,\n fraction_of_documented_infections=1.,\n average_infection_duration=5.\n)", "SEIR ダイナミクス\nここでは、パラメータと状態の関係を定義します。\nLi et al の時間ダイナミクス方程式(補足資料、方程式 1〜5)は次のとおりです。\n$\\frac{dS_i}{dt} = -\\beta \\frac{S_i I_i^r}{N_i} - \\mu \\beta \\frac{S_i I_i^u}{N_i} + \\theta \\sum_k \\frac{M_{ij} S_j}{N_j - I_j^r} - + \\theta \\sum_k \\frac{M_{ji} S_j}{N_i - I_i^r}$\n$\\frac{dE_i}{dt} = \\beta \\frac{S_i I_i^r}{N_i} + \\mu \\beta \\frac{S_i I_i^u}{N_i} -\\frac{E_i}{Z} + \\theta \\sum_k \\frac{M_{ij} E_j}{N_j - I_j^r} - + \\theta \\sum_k \\frac{M_{ji} E_j}{N_i - I_i^r}$\n$\\frac{dI^r_i}{dt} = \\alpha \\frac{E_i}{Z} - \\frac{I_i^r}{D}$\n$\\frac{dI^u_i}{dt} = (1 - \\alpha) \\frac{E_i}{Z} - \\frac{I_i^u}{D} + \\theta \\sum_k \\frac{M_{ij} I_j^u}{N_j - I_j^r} - + \\theta \\sum_k \\frac{M_{ji} I^u_j}{N_i - I_i^r}$\n$N_i = N_i + \\theta \\sum_j M_{ij} - \\theta \\sum_j M_{ji}$\n$i$ と $j$ の下付き文字は都市にインデックスを付けます。これらの方程式は、病気の時間変化をモデル化します。\n\nより多くの感染につながる感染者との接触\n「曝露」状態から「感染性」状態の 1 つへの病気の進行。\n「感染性」状態から回復への疾患の進行。これは、モデル化された集団からの除去によってモデル化されます。\n曝露された、または報告されていない感染者を含む、都市間の移動\n都市間の移動による毎日の都市人口の時間変化\n\nLi et al に従って、最終的に報告されるほど深刻な症例を持つ人々は都市間を移動しないと仮定します。\nまた、Li et al に従って、これらの変化を項ごとのポアソンノイズの影響を受けるものとして扱います。つまり、各項は実際にはポアソンのレートであり、サンプルから真の変化が得られます。ポアソンサンプルを(加算するのではなく)減算してもポアソン分布の結果が得られないため、ポアソンノイズは項ごとに異なります。\nこれらの変化を古典的な 4 次のルンゲクッタ法積分器に合わせて進化させますが、最初にそれらを計算する関数を定義します(ポアソンノイズのサンプリングを含む)。", "def sample_state_deltas(\n state, population, mobility_matrix, params, seed, is_deterministic=False):\n \"\"\"Computes one-step change in state, including Poisson sampling.\n \n Note that this is coded to support vectorized evaluation on arbitrary-shape\n batches of states. This is useful, for example, for running multiple\n independent replicas of this model to compute credible intervals for the\n parameters. We refer to the arbitrary batch shape with the conventional\n `B` in the parameter documentation below. This function also, of course,\n supports broadcasting over the batch shape.\n\n Args:\n state: A `SEIRComponents` tuple with fields Tensors of shape\n B + [num_locations] giving the current disease state.\n population: A Tensor of shape B + [num_locations] giving the current city\n populations.\n mobility_matrix: A Tensor of shape B + [num_locations, num_locations] giving\n the current baseline inter-city mobility.\n params: A `ModelParams` tuple with fields Tensors of shape B giving the\n global parameters for the current EAKF run.\n seed: Initial entropy for pseudo-random number generation. The Poisson\n sampling is repeatable by supplying the same seed.\n is_deterministic: A `bool` flag to turn off Poisson sampling if desired.\n\n Returns:\n delta: A `SEIRComponents` tuple with fields Tensors of shape\n B + [num_locations] giving the one-day changes in the state, according\n to equations 1-4 above (including Poisson noise per Li et al).\n \"\"\"\n undocumented_infectious_fraction = state.undocumented_infectious / population\n documented_infectious_fraction = state.documented_infectious / population\n\n # Anyone not documented as infectious is considered mobile\n mobile_population = (population - state.documented_infectious)\n def compute_outflow(compartment_population):\n raw_mobility = tf.linalg.matvec(\n mobility_matrix, compartment_population / mobile_population)\n return params.intercity_underreporting_factor * raw_mobility\n def compute_inflow(compartment_population):\n raw_mobility = tf.linalg.matmul(\n mobility_matrix,\n (compartment_population / mobile_population)[..., tf.newaxis],\n transpose_a=True)\n return params.intercity_underreporting_factor * tf.squeeze(\n raw_mobility, axis=-1)\n\n # Helper for sampling the Poisson-variate terms.\n seeds = samplers.split_seed(seed, n=11)\n if is_deterministic:\n def sample_poisson(rate):\n return rate\n else:\n def sample_poisson(rate):\n return tfd.Poisson(rate=rate).sample(seed=seeds.pop())\n\n # Below are the various terms called U1-U12 in the paper. We combined the\n # first two, which should be fine; both are poisson so their sum is too, and\n # there's no risk (as there could be in other terms) of going negative.\n susceptible_becoming_exposed = sample_poisson(\n state.susceptible *\n (params.documented_infectious_tx_rate *\n documented_infectious_fraction +\n (params.undocumented_infectious_tx_relative_rate *\n params.documented_infectious_tx_rate) *\n undocumented_infectious_fraction)) # U1 + U2\n\n susceptible_population_inflow = sample_poisson(\n compute_inflow(state.susceptible)) # U3\n susceptible_population_outflow = sample_poisson(\n compute_outflow(state.susceptible)) # U4\n\n exposed_becoming_documented_infectious = sample_poisson(\n params.fraction_of_documented_infections *\n state.exposed / params.average_latency_period) # U5\n exposed_becoming_undocumented_infectious = sample_poisson(\n (1 - params.fraction_of_documented_infections) *\n state.exposed / params.average_latency_period) # U6\n\n exposed_population_inflow = sample_poisson(\n compute_inflow(state.exposed)) # U7\n exposed_population_outflow = sample_poisson(\n compute_outflow(state.exposed)) # U8\n\n documented_infectious_becoming_recovered = sample_poisson(\n state.documented_infectious /\n params.average_infection_duration) # U9\n undocumented_infectious_becoming_recovered = sample_poisson(\n state.undocumented_infectious /\n params.average_infection_duration) # U10\n\n undocumented_infectious_population_inflow = sample_poisson(\n compute_inflow(state.undocumented_infectious)) # U11\n undocumented_infectious_population_outflow = sample_poisson(\n compute_outflow(state.undocumented_infectious)) # U12\n\n # The final state_deltas\n return SEIRComponents(\n # Equation [1]\n susceptible=(-susceptible_becoming_exposed +\n susceptible_population_inflow +\n -susceptible_population_outflow),\n # Equation [2]\n exposed=(susceptible_becoming_exposed +\n -exposed_becoming_documented_infectious +\n -exposed_becoming_undocumented_infectious +\n exposed_population_inflow +\n -exposed_population_outflow),\n # Equation [3]\n documented_infectious=(\n exposed_becoming_documented_infectious +\n -documented_infectious_becoming_recovered),\n # Equation [4]\n undocumented_infectious=(\n exposed_becoming_undocumented_infectious +\n -undocumented_infectious_becoming_recovered +\n undocumented_infectious_population_inflow +\n -undocumented_infectious_population_outflow),\n # New to-be-documented infectious cases, subject to the delayed\n # observation model.\n daily_new_documented_infectious=exposed_becoming_documented_infectious)", "これが積分器です。これは完全に標準的なものですが、PRNG シードを sample_state_deltas 関数に渡して、ルンゲクッタ法が要求する各部分ステップで独立したポアソンノイズを取得する点が異なります。", "@tf.function(autograph=False)\ndef rk4_one_step(state, population, mobility_matrix, params, seed):\n \"\"\"Implement one step of RK4, wrapped around a call to sample_state_deltas.\"\"\"\n # One seed for each RK sub-step\n seeds = samplers.split_seed(seed, n=4)\n\n deltas = tf.nest.map_structure(tf.zeros_like, state)\n combined_deltas = tf.nest.map_structure(tf.zeros_like, state)\n\n for a, b in zip([1., 2, 2, 1.], [6., 3., 3., 6.]):\n next_input = tf.nest.map_structure(\n lambda x, delta, a=a: x + delta / a, state, deltas)\n deltas = sample_state_deltas(\n next_input,\n population,\n mobility_matrix,\n params,\n seed=seeds.pop(), is_deterministic=False)\n combined_deltas = tf.nest.map_structure(\n lambda x, delta, b=b: x + delta / b, combined_deltas, deltas)\n\n return tf.nest.map_structure(\n lambda s, delta: s + tf.round(delta),\n state, combined_deltas)", "初期化\nここでは、論文からの初期化スキームを実装します。\nLi et al に従い、私たちの推論スキームは、イテレーションフィルタリング外部ループ(IF-EAKF)に囲まれた、EAKF 内部ループになります。計算上、これは 3 種類の初期化が必要であることを意味します。\n\n内部 EAKF の初期状態\n最初の EAKF の初期パラメータでもある外部 IF の初期パラメータ\n1 つの IF イテレーションから次のイテレーションへのパラメータの更新。これは、最初の EAKF 以外の各 EAKF の初期パラメータとして機能します。", "def initialize_state(num_particles, num_batches, seed):\n \"\"\"Initialize the state for a batch of EAKF runs.\n \n Args:\n num_particles: `int` giving the number of particles for the EAKF.\n num_batches: `int` giving the number of independent EAKF runs to\n initialize in a vectorized batch.\n seed: PRNG entropy.\n \n Returns:\n state: A `SEIRComponents` tuple with Tensors of shape [num_particles,\n num_batches, num_cities] giving the initial conditions in each\n city, in each filter particle, in each batch member.\n \"\"\"\n num_cities = mobility_matrix_over_time.shape[-2]\n state_shape = [num_particles, num_batches, num_cities]\n susceptible = initial_population * np.ones(state_shape, dtype=np.float32)\n documented_infectious = np.zeros(state_shape, dtype=np.float32)\n daily_new_documented_infectious = np.zeros(state_shape, dtype=np.float32)\n\n # Following Li et al, initialize Wuhan with up to 2000 people exposed\n # and another up to 2000 undocumented infectious.\n rng = np.random.RandomState(seed[0] % (2**31 - 1))\n wuhan_exposed = rng.randint(\n 0, 2001, [num_particles, num_batches]).astype(np.float32)\n wuhan_undocumented_infectious = rng.randint(\n 0, 2001, [num_particles, num_batches]).astype(np.float32)\n \n # Also following Li et al, initialize cities adjacent to Wuhan with three\n # days' worth of additional exposed and undocumented-infectious cases,\n # as they may have traveled there before the beginning of the modeling\n # period.\n exposed = 3 * mobility_matrix_over_time[\n WUHAN_IDX, :, 0] * wuhan_exposed[\n ..., np.newaxis] / initial_population[WUHAN_IDX]\n undocumented_infectious = 3 * mobility_matrix_over_time[\n WUHAN_IDX, :, 0] * wuhan_undocumented_infectious[\n ..., np.newaxis] / initial_population[WUHAN_IDX]\n\n exposed[..., WUHAN_IDX] = wuhan_exposed\n undocumented_infectious[..., WUHAN_IDX] = wuhan_undocumented_infectious\n\n # Following Li et al, we do not remove the inital exposed and infectious\n # persons from the susceptible population.\n return SEIRComponents(\n susceptible=tf.constant(susceptible),\n exposed=tf.constant(exposed),\n documented_infectious=tf.constant(documented_infectious),\n undocumented_infectious=tf.constant(undocumented_infectious),\n daily_new_documented_infectious=tf.constant(daily_new_documented_infectious))\n \ndef initialize_params(num_particles, num_batches, seed):\n \"\"\"Initialize the global parameters for the entire inference run.\n\n Args:\n num_particles: `int` giving the number of particles for the EAKF.\n num_batches: `int` giving the number of independent EAKF runs to\n initialize in a vectorized batch.\n seed: PRNG entropy.\n \n Returns:\n params: A `ModelParams` tuple with fields Tensors of shape\n [num_particles, num_batches] giving the global parameters\n to use for the first batch of EAKF runs.\n \"\"\"\n # We have 6 parameters. We'll initialize with a Sobol sequence,\n # covering the hyper-rectangle defined by our parameter limits.\n halton_sequence = tfp.mcmc.sample_halton_sequence(\n dim=6, num_results=num_particles * num_batches, seed=seed)\n halton_sequence = tf.reshape(\n halton_sequence, [num_particles, num_batches, 6])\n halton_sequences = tf.nest.pack_sequence_as(\n PARAMETER_LOWER_BOUNDS, tf.split(\n halton_sequence, num_or_size_splits=6, axis=-1))\n def interpolate(minval, maxval, h):\n return (maxval - minval) * h + minval\n return tf.nest.map_structure(\n interpolate,\n PARAMETER_LOWER_BOUNDS, PARAMETER_UPPER_BOUNDS, halton_sequences)\n\ndef update_params(num_particles, num_batches,\n prev_params, parameter_variance, seed):\n \"\"\"Update the global parameters between EAKF runs.\n\n Args:\n num_particles: `int` giving the number of particles for the EAKF.\n num_batches: `int` giving the number of independent EAKF runs to\n initialize in a vectorized batch.\n prev_params: A `ModelParams` tuple of the parameters used for the previous\n EAKF run.\n parameter_variance: A `ModelParams` tuple specifying how much to drift\n each parameter.\n seed: PRNG entropy.\n \n Returns:\n params: A `ModelParams` tuple with fields Tensors of shape\n [num_particles, num_batches] giving the global parameters\n to use for the next batch of EAKF runs.\n \"\"\"\n # Initialize near the previous set of parameters. This is the first step\n # in Iterated Filtering.\n seeds = tf.nest.pack_sequence_as(\n prev_params, samplers.split_seed(seed, n=len(prev_params)))\n return tf.nest.map_structure(\n lambda x, v, seed: x + tf.math.sqrt(v) * tf.random.stateless_normal([\n num_particles, num_batches, 1], seed=seed),\n prev_params, parameter_variance, seeds)", "遅延\nこのモデルの重要な点の 1 つは、感染が遅れて報告されるという事実を明確に考慮していることです。つまり、 $t$ 日に $E$ 区間から $I^r$ 区間に移動した人は、後日まで、報告された観察可能な症例数として表示されない可能性があります。\n遅延はガンマ分布であると想定しています。Li et al に従って、形状に1.85 を使用し、レートをパラメータ化して、9 日間の平均報告遅延を生成します。", "def raw_reporting_delay_distribution(gamma_shape=1.85, reporting_delay=9.):\n return tfp.distributions.Gamma(\n concentration=gamma_shape, rate=gamma_shape / reporting_delay)", "観測値は離散的であるため、生の(連続的な)遅延を最も近い日に切り上げます。また、データの範囲が有限であるため、1 人の遅延分布は、残りの日数に渡ってカテゴリに分類されます。したがって、代わりに多項遅延確率を事前に計算することにより、$O(I^r)$ ガンマをサンプリングするよりも効率的に都市ごとの予測観測値を計算できます。", "def reporting_delay_probs(num_timesteps, gamma_shape=1.85, reporting_delay=9.):\n gamma_dist = raw_reporting_delay_distribution(gamma_shape, reporting_delay)\n multinomial_probs = [gamma_dist.cdf(1.)]\n for k in range(2, num_timesteps + 1):\n multinomial_probs.append(gamma_dist.cdf(k) - gamma_dist.cdf(k - 1))\n # For samples that are larger than T.\n multinomial_probs.append(gamma_dist.survival_function(num_timesteps))\n multinomial_probs = tf.stack(multinomial_probs)\n return multinomial_probs", "これらの遅延を、毎日記録されている新しい感染数に実際に適用するためのコードは次のとおりです。", "def delay_reporting(\n daily_new_documented_infectious, num_timesteps, t, multinomial_probs, seed):\n # This is the distribution of observed infectious counts from the current\n # timestep.\n\n raw_delays = tfd.Multinomial(\n total_count=daily_new_documented_infectious,\n probs=multinomial_probs).sample(seed=seed)\n\n # The last bucket is used for samples that are out of range of T + 1. Thus\n # they are not going to be observable in this model.\n clipped_delays = raw_delays[..., :-1]\n\n # We can also remove counts that are such that t + i >= T.\n clipped_delays = clipped_delays[..., :num_timesteps - t]\n # We finally shift everything by t. That means prepending with zeros.\n return tf.concat([\n tf.zeros(\n tf.concat([\n tf.shape(clipped_delays)[:-1], [t]], axis=0),\n dtype=clipped_delays.dtype),\n clipped_delays], axis=-1)", "推論\nまず、推論のためのいくつかのデータ構造を定義します。\n特に、推論を実行しながら状態とパラメータを共にパッケージ化するイテレーションフィルタリングを実行する必要があるので ParameterStatePair オブジェクトを定義します。\nまた、その他の情報をモデルにパッケージ化します。", "ParameterStatePair = collections.namedtuple(\n 'ParameterStatePair', ['state', 'params'])\n\n# Info that is tracked and mutated but should not have inference performed over.\nSideInfo = collections.namedtuple(\n 'SideInfo', [\n # Observations at every time step.\n 'observations_over_time',\n 'initial_population',\n 'mobility_matrix_over_time',\n 'population',\n # Used for variance of measured observations.\n 'actual_reported_cases',\n # Pre-computed buckets for the multinomial distribution.\n 'multinomial_probs',\n 'seed',\n ])\n\n# Cities can not fall below this fraction of people\nMINIMUM_CITY_FRACTION = 0.6\n\n# How much to inflate the covariance by.\nINFLATION_FACTOR = 1.1\n\nINFLATE_FN = tfes.inflate_by_scaled_identity_fn(INFLATION_FACTOR)", "これは、アンサンブルカルマンフィルタ用にパッケージ化された完全な観測モデルです。\n興味深い点は、報告の遅延です(前述のように計算されます)。アップストリームモデルは、各時間ステップで各都市の daily_new_documented_infectious を発行します。", "# We observe the observed infections.\ndef observation_fn(t, state_params, extra):\n \"\"\"Generate reported cases.\n \n Args:\n state_params: A `ParameterStatePair` giving the current parameters\n and state.\n t: Integer giving the current time.\n extra: A `SideInfo` carrying auxiliary information.\n\n Returns:\n observations: A Tensor of predicted observables, namely new cases\n per city at time `t`.\n extra: Update `SideInfo`.\n \"\"\"\n # Undo padding introduced in `inference`.\n daily_new_documented_infectious = state_params.state.daily_new_documented_infectious[..., 0]\n # Number of people that we have already committed to become\n # observed infectious over time.\n # shape: batch + [num_particles, num_cities, time]\n observations_over_time = extra.observations_over_time\n num_timesteps = observations_over_time.shape[-1]\n\n seed, new_seed = samplers.split_seed(extra.seed, salt='reporting delay')\n \n daily_delayed_counts = delay_reporting(\n daily_new_documented_infectious, num_timesteps, t,\n extra.multinomial_probs, seed)\n observations_over_time = observations_over_time + daily_delayed_counts\n\n extra = extra._replace(\n observations_over_time=observations_over_time,\n seed=new_seed)\n\n # Actual predicted new cases, re-padded.\n adjusted_observations = observations_over_time[..., t][..., tf.newaxis]\n # Finally observations have variance that is a function of the true observations:\n return tfd.MultivariateNormalDiag(\n loc=adjusted_observations,\n scale_diag=tf.math.maximum(\n 2., extra.actual_reported_cases[..., t][..., tf.newaxis] / 2.)), extra", "ここでは、遷移ダイナミクスを定義します。セマンティック作業はすでに完了しています。ここでは、EAKF フレームワーク用にパッケージ化し、Li et al に従って、都市の人口が小さくなりすぎないようにクリップします。", "def transition_fn(t, state_params, extra):\n \"\"\"SEIR dynamics.\n\n Args:\n state_params: A `ParameterStatePair` giving the current parameters\n and state.\n t: Integer giving the current time.\n extra: A `SideInfo` carrying auxiliary information.\n\n Returns:\n state_params: A `ParameterStatePair` predicted for the next time step.\n extra: Updated `SideInfo`.\n \"\"\"\n mobility_t = extra.mobility_matrix_over_time[..., t]\n new_seed, rk4_seed = samplers.split_seed(extra.seed, salt='Transition')\n new_state = rk4_one_step(\n state_params.state,\n extra.population,\n mobility_t,\n state_params.params,\n seed=rk4_seed)\n\n # Make sure population doesn't go below MINIMUM_CITY_FRACTION.\n new_population = (\n extra.population + state_params.params.intercity_underreporting_factor * (\n # Inflow\n tf.reduce_sum(mobility_t, axis=-2) - \n # Outflow\n tf.reduce_sum(mobility_t, axis=-1)))\n new_population = tf.where(\n new_population < MINIMUM_CITY_FRACTION * extra.initial_population,\n extra.initial_population * MINIMUM_CITY_FRACTION,\n new_population)\n\n extra = extra._replace(population=new_population, seed=new_seed)\n\n # The Ensemble Kalman Filter code expects the transition function to return a distribution.\n # As the dynamics and noise are encapsulated above, we construct a `JointDistribution` that when\n # sampled, returns the values above.\n\n new_state = tfd.JointDistributionNamed(\n model=tf.nest.map_structure(lambda x: tfd.VectorDeterministic(x), new_state))\n params = tfd.JointDistributionNamed(\n model=tf.nest.map_structure(lambda x: tfd.VectorDeterministic(x), state_params.params))\n \n state_params = tfd.JointDistributionNamed(\n model=ParameterStatePair(state=new_state, params=params))\n\n return state_params, extra", "最後に、推論方法を定義します。これは 2 つのループであり、外側のループはイテレーションフィルタリングであり、内側のループは EAKF です。", "# Use tf.function to speed up EAKF prediction and updates.\nensemble_kalman_filter_predict = tf.function(\n tfes.ensemble_kalman_filter_predict, autograph=False)\nensemble_adjustment_kalman_filter_update = tf.function(\n tfes.ensemble_adjustment_kalman_filter_update, autograph=False)\n\ndef inference(\n num_ensembles,\n num_batches,\n num_iterations,\n actual_reported_cases,\n mobility_matrix_over_time,\n seed=None,\n # This is how much to reduce the variance by in every iterative\n # filtering step.\n variance_shrinkage_factor=0.9,\n # Days before infection is reported.\n reporting_delay=9.,\n # Shape parameter of Gamma distribution.\n gamma_shape_parameter=1.85):\n \"\"\"Inference for the Shaman, et al. model.\n\n Args:\n num_ensembles: Number of particles to use for EAKF.\n num_batches: Number of batches of IF-EAKF to run.\n num_iterations: Number of iterations to run iterative filtering.\n actual_reported_cases: `Tensor` of shape `[L, T]` where `L` is the number\n of cities, and `T` is the timesteps.\n mobility_matrix_over_time: `Tensor` of shape `[L, L, T]` which specifies the\n mobility between locations over time.\n variance_shrinkage_factor: Python `float`. How much to reduce the\n variance each iteration of iterated filtering.\n reporting_delay: Python `float`. How many days before the infection\n is reported.\n gamma_shape_parameter: Python `float`. Shape parameter of Gamma distribution\n of reporting delays.\n\n Returns:\n result: A `ModelParams` with fields Tensors of shape [num_batches],\n containing the inferred parameters at the final iteration.\n \"\"\"\n print('Starting inference.')\n num_timesteps = actual_reported_cases.shape[-1]\n params_per_iter = []\n\n multinomial_probs = reporting_delay_probs(\n num_timesteps, gamma_shape_parameter, reporting_delay)\n\n seed = samplers.sanitize_seed(seed, salt='Inference')\n\n for i in range(num_iterations):\n start_if_time = time.time()\n seeds = samplers.split_seed(seed, n=4, salt='Initialize')\n if params_per_iter:\n parameter_variance = tf.nest.map_structure(\n lambda minval, maxval: variance_shrinkage_factor ** (\n 2 * i) * (maxval - minval) ** 2 / 4.,\n PARAMETER_LOWER_BOUNDS, PARAMETER_UPPER_BOUNDS)\n params_t = update_params(\n num_ensembles,\n num_batches,\n prev_params=params_per_iter[-1],\n parameter_variance=parameter_variance,\n seed=seeds.pop())\n else:\n params_t = initialize_params(num_ensembles, num_batches, seed=seeds.pop())\n\n state_t = initialize_state(num_ensembles, num_batches, seed=seeds.pop())\n population_t = sum(x for x in state_t)\n observations_over_time = tf.zeros(\n [num_ensembles,\n num_batches,\n actual_reported_cases.shape[0], num_timesteps])\n\n extra = SideInfo(\n observations_over_time=observations_over_time,\n initial_population=tf.identity(population_t),\n mobility_matrix_over_time=mobility_matrix_over_time,\n population=population_t,\n multinomial_probs=multinomial_probs,\n actual_reported_cases=actual_reported_cases,\n seed=seeds.pop())\n\n # Clip states\n state_t = clip_state(state_t, population_t)\n params_t = clip_params(params_t, seed=seeds.pop())\n\n # Accrue the parameter over time. We'll be averaging that\n # and using that as our MLE estimate.\n params_over_time = tf.nest.map_structure(\n lambda x: tf.identity(x), params_t)\n\n state_params = ParameterStatePair(state=state_t, params=params_t)\n\n eakf_state = tfes.EnsembleKalmanFilterState(\n step=tf.constant(0), particles=state_params, extra=extra)\n\n for j in range(num_timesteps):\n seeds = samplers.split_seed(eakf_state.extra.seed, n=3)\n \n extra = extra._replace(seed=seeds.pop())\n \n # Predict step.\n\n # Inflate and clip.\n new_particles = INFLATE_FN(eakf_state.particles)\n state_t = clip_state(new_particles.state, eakf_state.extra.population)\n params_t = clip_params(new_particles.params, seed=seeds.pop())\n eakf_state = eakf_state._replace(\n particles=ParameterStatePair(params=params_t, state=state_t))\n\n eakf_predict_state = ensemble_kalman_filter_predict(eakf_state, transition_fn)\n\n # Clip the state and particles.\n state_params = eakf_predict_state.particles\n state_t = clip_state(\n state_params.state, eakf_predict_state.extra.population)\n state_params = ParameterStatePair(state=state_t, params=state_params.params)\n\n # We preprocess the state and parameters by affixing a 1 dimension. This is because for\n # inference, we treat each city as independent. We could also introduce localization by\n # considering cities that are adjacent.\n state_params = tf.nest.map_structure(lambda x: x[..., tf.newaxis], state_params)\n eakf_predict_state = eakf_predict_state._replace(particles=state_params)\n\n # Update step.\n \n eakf_update_state = ensemble_adjustment_kalman_filter_update(\n eakf_predict_state,\n actual_reported_cases[..., j][..., tf.newaxis],\n observation_fn)\n \n state_params = tf.nest.map_structure(\n lambda x: x[..., 0], eakf_update_state.particles)\n\n # Clip to ensure parameters / state are well constrained.\n state_t = clip_state(\n state_params.state, eakf_update_state.extra.population)\n \n # Finally for the parameters, we should reduce over all updates. We get\n # an extra dimension back so let's do that.\n params_t = tf.nest.map_structure(\n lambda x, y: x + tf.reduce_sum(y[..., tf.newaxis] - x, axis=-2, keepdims=True),\n eakf_predict_state.particles.params, state_params.params)\n params_t = clip_params(params_t, seed=seeds.pop())\n params_t = tf.nest.map_structure(lambda x: x[..., 0], params_t)\n\n state_params = ParameterStatePair(state=state_t, params=params_t)\n eakf_state = eakf_update_state\n eakf_state = eakf_state._replace(particles=state_params)\n\n # Flatten and collect the inferred parameter at time step t.\n params_over_time = tf.nest.map_structure(\n lambda s, x: tf.concat([s, x], axis=-1), params_over_time, params_t)\n\n est_params = tf.nest.map_structure(\n # Take the average over the Ensemble and over time.\n lambda x: tf.math.reduce_mean(x, axis=[0, -1])[..., tf.newaxis],\n params_over_time)\n params_per_iter.append(est_params)\n print('Iterated Filtering {} / {} Ran in: {:.2f} seconds'.format(\n i, num_iterations, time.time() - start_if_time))\n\n return tf.nest.map_structure(\n lambda x: tf.squeeze(x, axis=-1), params_per_iter[-1])", "最終的な詳細: パラメータが範囲内にあり、負でないことを確認してパラメータと状態をクリッピングします。", "def clip_state(state, population):\n \"\"\"Clip state to sensible values.\"\"\"\n state = tf.nest.map_structure(\n lambda x: tf.where(x < 0, 0., x), state)\n\n # If S > population, then adjust as well.\n susceptible = tf.where(state.susceptible > population, population, state.susceptible)\n return SEIRComponents(\n susceptible=susceptible,\n exposed=state.exposed,\n documented_infectious=state.documented_infectious,\n undocumented_infectious=state.undocumented_infectious,\n daily_new_documented_infectious=state.daily_new_documented_infectious)\n\ndef clip_params(params, seed):\n \"\"\"Clip parameters to bounds.\"\"\"\n def _clip(p, minval, maxval):\n return tf.where(\n p < minval,\n minval * (1. + 0.1 * tf.random.stateless_uniform(p.shape, seed=seed)),\n tf.where(p > maxval,\n maxval * (1. - 0.1 * tf.random.stateless_uniform(\n p.shape, seed=seed)), p))\n params = tf.nest.map_structure(\n _clip, params, PARAMETER_LOWER_BOUNDS, PARAMETER_UPPER_BOUNDS)\n\n return params", "全てを実行する", "# Let's sample the parameters.\n#\n# NOTE: Li et al. run inference 1000 times, which would take a few hours.\n# Here we run inference 30 times (in a single, vectorized batch).\nbest_parameters = inference(\n num_ensembles=300,\n num_batches=30,\n num_iterations=10,\n actual_reported_cases=observed_daily_infectious_count,\n mobility_matrix_over_time=mobility_matrix_over_time)", "推論の結果です。すべてのグローバルパラメータの最尤値をプロットして、num_batches の独立した推論の実行全体での変動を示します。これは、補足資料の表 S1 に対応しています。", "fig, axs = plt.subplots(2, 3)\naxs[0, 0].boxplot(best_parameters.documented_infectious_tx_rate,\n whis=(2.5,97.5), sym='')\naxs[0, 0].set_title(r'$\\beta$')\n\naxs[0, 1].boxplot(best_parameters.undocumented_infectious_tx_relative_rate,\n whis=(2.5,97.5), sym='')\naxs[0, 1].set_title(r'$\\mu$')\n\naxs[0, 2].boxplot(best_parameters.intercity_underreporting_factor,\n whis=(2.5,97.5), sym='')\naxs[0, 2].set_title(r'$\\theta$')\n\naxs[1, 0].boxplot(best_parameters.average_latency_period,\n whis=(2.5,97.5), sym='')\naxs[1, 0].set_title(r'$Z$')\n\naxs[1, 1].boxplot(best_parameters.fraction_of_documented_infections,\n whis=(2.5,97.5), sym='')\naxs[1, 1].set_title(r'$\\alpha$')\n\naxs[1, 2].boxplot(best_parameters.average_infection_duration,\n whis=(2.5,97.5), sym='')\naxs[1, 2].set_title(r'$D$')\nplt.tight_layout()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
whitead/numerical_stats
unit_11/hw_2019/homework_11_key.ipynb
gpl-3.0
[ "Homework 11 Key\nCHE 116: Numerical Methods and Statistics\n4/11/2019\n\n1. Conceptual Questions (9 Points)\n3 Points each\n\nWhat is the difference between a bound and a constraint?\nWhat must you consider before choosing a method for minimization?\nGive an example of a non-convex problem you might see in chemical engineering. \n\n1.1\nA bound is a resitrction on the domain. A constraint is an arbitrary function or inequality that must be satisfied for a solution\n2.2\nNumber of dimnsions, if it's non-convex, if it's discrete, if there are bounds or constraints\n2.3\nReaction equilibrium problems have two solutions if you do not bound the domain, giving a non-convex problem. Other examples are possible.\n2. Optimization Problems (15 Points)\nSolve and plot the following expressions using python. Plot a red dot at your solution.\n\n[2 points (plot) + 2 points (solution)] Find all roots of this expression:\n\n$$\n\\frac{(x−4)^4}{8}+ \\frac{(x−2)^2}{2} - 4\n$$\n\n[2 + 2 points] Find the minimum of this expression with $r$ bounded to be $[0.9,3]$\n\n$$\n4\\left[\\frac{1}{r^8} - \\frac{1}{r^4}\\right]\n$$\n\n\n[2 + 2 points] Solve $e^{x / 4} = x$\n\n\n[3 points] You do not need to plot this problem. Solve the following system of equations and report your answer.\n\n\n$$\n2\\sqrt{x} - 3 \\cos z = 3\n$$\n$$\n2 x - z^2 = 18\n$$\n2.1", "import scipy.optimize as opt\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef prob1(x):\n return (x - 4)**4 / 8 + (x - 2)**2 / 2 - 4\n\nr1 = opt.root(prob1, x0=0)\nr2 = opt.root(prob1, x0=8)\n\nx = np.linspace(1, 5, 500)\nplt.plot(x, prob1(x))\nplt.plot([r1.x, r2.x], [0, 0], 'ro')\nplt.show()\nprint(\"The solution is {:.2f} and {:.2f}\".format(r1.x[0], r2.x[0]))", "2.2", "def prob2(r):\n return 4 * r**-8 - 4 * r**-4\nr = np.linspace(0.9,3, 1000)\nm = opt.minimize(prob2, bounds=[(0.9,3)], x0=1)\nprint(\"The minimum value occurs when x = {:.3f} and it is {:.1f}\".format(m.x[0],m.fun[0]))\nplt.plot(r, prob2(r))\nplt.plot(m.x, prob2(m.x), 'ro')\nplt.show()", "2.3", "def prob3(x):\n return np.exp(x / 4) - x\nx = np.linspace(0,3, 100)\n\ni = opt.root(prob3, x0=0)\n\nplt.plot(x, np.exp(x / 4))\nplt.plot(x, x)\nplt.plot(i.x, i.x, 'ro')\nplt.show()\nprint(\"the solution is {:.2f}\".format(i.x[0]))", "2.4", "def prob4(x):\n y1 = np.sqrt(x[0]) * 2 - 3 * np.cos(x[1]) - 3\n y2 = 2 * x[0] - x[1]**2 - 18\n return y1**2 + y2**2\nr = opt.minimize(prob4, x0=[0,0], bounds=[(0,100), (-10,10)])\n\nprint(f'x = {r.x[0]}, z = {r.x[1]}')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tridesclous/tridesclous_examples
kampfflab_ultra_dense/kampfflab_ultra_dense.ipynb
mit
[ "Kampff lab - Ultra dense survey\nHere a description of the dataset:\nhttp://www.kampff-lab.org/ultra-dense-survey/\nHere the official publication of this open dataset:\nhttps://crcns.org/data-sets/methods/hdr-1/about-hdr-1\nAnd a paper is being preparing here:\nhttps://doi.org/10.1101/275818\nIntroduction\nThis dataset explore optimal size and density of electrodes.\nHere 255 extracellular electrodes (5 x 5 μm and spacing of 1 μm) \nDownload\nDataset must downloaded locally and manually from crcns or from the google drive in \"workdir\" path.\nThe PRB file\ntridesclous need a PRB file that describe the geometry of probe.\nCreate it by copy/paste or download it via github.", "# suposing the datset is downloaded here\nworkdir = '/media/samuel/dataspikesorting/DataSpikeSortingHD2/kampff/ultra dense/'\nfilename = workdir + 'T2/amplifier2017-02-08T21_38_55.bin'\n\n\n%matplotlib notebook\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tridesclous as tdc\nfrom tridesclous import DataIO, CatalogueConstructor, Peeler\nimport os, shutil", "create a DataIO (and remove if already exists)", "dirname = workdir + 'tdc_amplifier2017-02-02T17_18_46'\n\nif os.path.exists(dirname):\n #remove is already exists\n shutil.rmtree(dirname)\n \ndataio = DataIO(dirname=dirname)\n\n# feed DataIO with one file\ndataio.set_data_source(type='RawData', filenames=[filename], \n sample_rate=20000., dtype='int16', total_channel=256,\n bit_to_microVolt=0.195)\nprint(dataio)\n\n# set the probe file\ndataio.set_probe_file('kampff_ultra_dense_256.prb')", "CatalogueConstructor\nRun all chain in one shot.", "cc = CatalogueConstructor(dataio=dataio, chan_grp=0)\n\nfullchain_kargs = {\n 'duration' : 300.,\n 'preprocessor' : {\n 'highpass_freq' : 400.,\n 'lowpass_freq' : 5000.,\n 'smooth_size' : 0,\n 'chunksize' : 1024,\n 'lostfront_chunksize' : 128,\n 'signalpreprocessor_engine' : 'numpy',\n },\n 'peak_detector' : {\n 'peakdetector_engine' : 'numpy',\n 'peak_sign' : '-',\n 'relative_threshold' : 5.,\n 'peak_span' : 0.0002,\n },\n 'noise_snippet' : {\n 'nb_snippet' : 300,\n },\n 'extract_waveforms' : {\n 'n_left' : -20,\n 'n_right' : 30,\n 'mode' : 'rand',\n 'nb_max' : 20000,\n 'align_waveform' : False,\n },\n 'clean_waveforms' : {\n 'alien_value_threshold' : 100.,\n },\n }\nfeat_method = 'peak_max'\nfeat_kargs = {}\nclust_method = 'sawchaincut'\nclust_kargs = {}\n \ntdc.apply_all_catalogue_steps(cc, fullchain_kargs, \n feat_method, feat_kargs,clust_method, clust_kargs)\nprint(cc)", "Noise measurement", "dataio = DataIO(dirname=dirname)\ntdc.summary_noise(dataio=dataio, chan_grp=0)", "Inspect waveform quality at catalogue level", "tdc.summary_catalogue_clusters(dataio=dataio, chan_grp=0, label=0)", "construct catalogue", "cc.make_catalogue_for_peeler()", "apply peeler\nThis is the real spike sorting: find spike that correcpond to catalogue templates.", "initial_catalogue = dataio.load_catalogue(chan_grp=0)\npeeler = Peeler(dataio)\npeeler.change_params(catalogue=initial_catalogue,\n use_sparse_template=True,\n sparse_threshold_mad=1.5,\n use_opencl_with_sparse=True,\n\n cl_platform_index=1,\n cl_device_index=0)\npeeler.run(duration=300.,\n progressbar=True)", "final inspection of cells", "tdc.summary_after_peeler_clusters(dataio, chan_grp=0, label=0, neighborhood_radius=None, show_channels=False)\n\ntdc.summary_after_peeler_clusters(dataio, chan_grp=0, label=1, neighborhood_radius=None, show_channels=False)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
rrbb014/data_science
fastcampus_dss/2016_05_17/2016_0517_행렬의 연산과 성질.ipynb
mit
[ "행렬의 연산과 성질\n행렬에는 곱셈, 전치 이외에도 지수 함수 등의 다양한 연산을 정의할 수 있다. 각각의 정의와 성질을 알아보자.\n행렬의 부호\n행렬은 복수의 실수 값을 가지고 있으므로 행렬 전체의 부호는 정의할 수 없다. 하지만 행렬에서도 실수의 부호 정의와 유사한 기능을 가지는 정의가 존재한다. 바로 행렬의 양-한정(positive definite) 특성이다.\n모든 실수 공간 $\\mathbb{R}^n$ 의 0벡터가 아닌 벡터 $x \\in \\mathbb{R}^n$ 에 대해 다음 부등식이 성립하면 행렬 $A$ 가 양-한정(positive definite)이라고 한다.\n$$ x^T A x > 0 $$\n만약 이 식이 등호를 포함한다면 양-반한정(positive semi-definite)이라고 한다.\n$$ x^T A x \\geq 0 $$\n예를 들어 단위 행렬은 양-한정이다.\n$$ x^TI x = x^T\n\\begin{bmatrix}\n1&0&\\cdots&0\\\n0&1&\\cdots&0\\\n\\vdots&\\vdots&\\ddots&\\vdots\\\n0&0&\\cdots&1\\\n\\end{bmatrix}\nx\n= x_1^2 + x_2^2 + \\cdots + x_n^2 > 0\n$$\n다음과 같은 행렬도 양-한정이다.\n$$ M = \\begin{bmatrix} 2&-1&0\\-1&2&-1\\0&-1&2 \\end{bmatrix} $$\n$$ \n\\begin{align} \nx^{\\mathrm{T}}M x \n&= \\begin{bmatrix} (2x_1-b)&(-x_1+2x_2-x_3)&(-x_2+2c) \\end{bmatrix} \\begin{bmatrix} x_1\\x_2\\x_3 \\end{bmatrix} \\\n&= 2{x_1}^2 - 2x_1x_2 + 2{x_2}^2 - 2x_2x_3 + 2{x_3}^2 \\\n&= {x_1}^2+(x_1 - x_2)^{2} + (x_2 - x_3)^{2}+{x_3}^2\n\\end{align}\n$$\n행렬의 크기\n행렬에는 크기 개념과 유사하게 하나의 행렬에 대해 하나의 실수를 대응시키는 norm, 대각 성분(trace), 행렬식(determinant)에 대한 정의가 존재한다.\n행렬 Norm\n행렬의 norm 정의는 다양하지만 그 중 많이 쓰이는 induced p-norm 정의는 다음과 같다.\n$$ \\Vert A \\Vert_p = \\left( \\sum_{i=1}^m \\sum_{j=1}^n |a_{ij}|^p \\right)^{1/p} $$\n이 중 $p=2$는 특별히 Frobenius norm 이라고 불리며 다음과 같이 표시한다.\n$$ \\Vert A \\Vert_F = \\sqrt{\\sum_{i=1}^m \\sum_{j=1}^n a_{ij}^2} $$\nNumPy에서는 linalg 서브패키지의 norm 명령으로 Frobenious norm을 계산할 수 있다.", "A = (np.arange(9) - 4).reshape((3, 3))\nA\n\nnp.linalg.norm(A)", "대각 성분\n대각 성분(trace) 행렬의 특성을 결정하는 숫자 중 하나로 정방 행렬(square matrix)에 대해서만 정의되며 다음과 같이 대각 성분(diaginal)의 합으로 계산된다.\n$$ \\operatorname{tr}(A) = a_{11} + a_{22} + \\dots + a_{nn}=\\sum_{i=1}^{n} a_{ii} $$\n대각 성분은 다음과 같은 성질을 지닌다.\n$$ \\text{tr} (cA) = c\\text{tr} (A) $$\n$$ \\text{tr} (A^T) = \\text{tr} (A) $$\n$$ \\text{tr} (A + B) = \\text{tr} (A) + \\text{tr} (B)$$\n$$ \\text{tr} (AB) = \\text{tr} (BA) $$\n$$ \\text{tr} (ABC) = \\text{tr} (BCA) = \\text{tr} (CAB) $$\n특히 마지막 성질은 trace trick이라고 하여 이차 형식(quadratic form)의 값을 구하는데 유용하게 사용된다.\n$$ x^TAx = \\text{tr}(x^TAx) = \\text{tr}(Axx^T) = \\text{tr}(xx^TA) $$\nNumPy에서는 linalg 서브패키지의 trace 명령으로 trace를 계산할 수 있다. \nNumPy에서는 linalg 서브패키지의 trace 명령으로 trace를 계산할 수 있다.", "np.trace(np.eye(3))", "행렬식\n정방 행렬 $A$의 행렬식(determinant) $\\det (A)$ 는 Laplace formula라고 불리는 재귀적인 방법으로 정의된다. \n이 식에서 $a_{i,j}$는 $A$의 i행, j열 원소이고 $M_{i,j}$은 정방 행렬 $A$ 에서 i행과 j열을 지워서 얻어진 행렬의 행렬식이다.\n$$ \\det(A) = \\sum_{j=1}^n (-1)^{i+j} a_{i,j} M_{i,j} $$\n행렬식은 다음과 같은 성질을 만족한다.\n$$ \\det(I) = 1 $$\n$$ \\det(A^{\\rm T}) = \\det(A) $$\n$$ \\det(A^{-1}) = \\frac{1}{\\det(A)}=\\det(A)^{-1} $$\n$$ \\det(AB) = \\det(A)\\det(B) $$\n$$ A \\in \\mathbf{R}^n \\;\\;\\; \\rightarrow \\;\\;\\; \\det(cA) = c^n\\det(A) $$\n또한 역행렬은 행렬식과 다음과 같은 관계를 가진다. \n$$ A^{-1} = \\dfrac{1}{\\det A} M = \\dfrac{1}{\\det A} \n\\begin{bmatrix}\nM_{1,1}&\\cdots&M_{1,n}\\\n\\vdots&\\ddots&\\vdots\\\nM_{n,1}&\\cdots&M_{n,n}\\\n\\end{bmatrix}\n$$\nNumPy에서는 linalg 서브패키지의 det 명령으로 trace를 계산할 수 있다.", "A = np.array([[1, 2], [3, 4]])\nA\n\nnp.linalg.det(A)", "전치 행렬과 대칭 행렬\n전치 연산을 통해서 얻어진 행렬을 전치 행렬(transpose matrix)이라고 한다.\n$$ [\\mathbf{A}^\\mathrm{T}]{ij} = [\\mathbf{A}]{ji} $$ \n만약 전치 행렬과 원래의 행렬이 같으면 대칭 행렬(symmetric matrix)이라고 한다.\n$$ A^\\mathrm{T} = A $$ \n전치 연산은 다음과 같은 성질을 만족한다.\n$$ ( A^\\mathrm{T} ) ^\\mathrm{T} = A $$\n$$ (A+B) ^\\mathrm{T} = A^\\mathrm{T} + B^\\mathrm{T} $$\n$$ \\left( A B \\right) ^\\mathrm{T} = B^\\mathrm{T} A^\\mathrm{T} $$\n$$ \\det(A^\\mathrm{T}) = \\det(A) $$\n$$ (A^\\mathrm{T})^{-1} = (A^{-1})^\\mathrm{T} $$\n지수 행렬\n행렬 $A$에 대해 다음과 같은 급수로 만들어지는 행렬 $e^A=\\exp A$ 를 지수 행렬(exponential matrix)이라고 한다.\n$$ e^X = \\sum_{k=0}^\\infty \\dfrac{X^k}{k!} = I + X + \\dfrac{1}{2}X^2 + \\dfrac{1}{3!}X^3 + \\cdots $$ \n지수 행렬은 다음과 같은 성질을 만족한다.\n$$ e^0 = I $$\n$$ e^{aX} e^{bX} = e^{(a+b)X} $$\n$$ e^X e^{-X} = I $$\n$$ XY = YX \\;\\; \\rightarrow \\;\\; e^Xe^Y = e^Ye^X = e^{X+Y} $$\n로그 행렬\n행렬 $A$에 대해 다음과 같은 급수로 만들어지는 행렬 $B=e^A$ 가 존재할 때, $A$를 $B$에 대한 로그 행렬이라고 하고 다음과 같이 표기한다.\n$$ A = \\log B $$\n로그 행렬은 다음과 같은 성질은 만족한다.\n만약 행렬 $A$, $B$가 모두 양-한정(positive definite)이고 $AB=BA$이면 \n$$ AB = e^{\\ln(A)+\\ln(B)} $$\n만약 행렬 $A$의 역행렬이 존재하면\n$$ A^{-1} = e^{-\\ln(A)} $$\n지수 행렬이나 로그 행렬은 NumPy에서 계산할 수 없다. SciPy의 linalg 서브패키지의 expm, logm 명령을 사용한다.", "A = np.array([[1.0, 3.0], [1.0, 4.0]])\nA\n\nB = sp.linalg.logm(A)\nB\n\nsp.linalg.expm(B)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
bloomberg/bqplot
examples/Introduction.ipynb
apache-2.0
[ "bqplot\nThis notebook is meant to guide you through the first stages of using the bqplot visualization library. bqplot is a Grammar of Graphics based interactive visualization library for the Jupyter notebook where every single component of a plot is an interactive iPython widget. What this means is that even after a plot is drawn, you can change almost any aspect of it. This makes the creation of advanced Graphical User Interfaces attainable through just a few simple lines of Python code.", "# Let's begin by importing some libraries we'll need\nimport numpy as np\n\n# And creating some random data\nsize = 100\nnp.random.seed(0)\nx_data = np.arange(size)\ny_data = np.cumsum(np.random.randn(size) * 100.0)", "Your First Plot\nLet's start by creating a simple Line chart. bqplot has two different APIs, the first one is a matplotlib inspired simple API called pyplot. So let's import that.", "from bqplot import pyplot as plt", "Let's plot y_data against x_data, and then show the plot.", "plt.figure(title=\"My First Plot\")\nplt.plot(x_data, y_data)\nplt.show()", "Use the buttons above to Pan (or Zoom), Reset or save the Figure.\nUsing bqplot's interactive elements\nNow, let's try creating a new plot. First, we create a brand new Figure. The Figure is the final element of any plot that is eventually displayed. You can think of it as a Canvas on which we put all of our other plots.", "# Creating a new Figure and setting it's title\nplt.figure(title=\"My Second Chart\")\n\n# Let's assign the scatter plot to a variable\nscatter_plot = plt.scatter(x_data, y_data)\n\n# Let's show the plot\nplt.show()", "Since both the x and the y attributes of a bqplot chart are interactive widgets, we can change them. So, let's\nchange the y attribute of the chart.", "scatter_plot.y = np.cumsum(np.random.randn(size) * 100.0)", "Re-run the above cell a few times, the same plot should update every time. But, that's not the only thing that can be changed once a plot has been rendered. Let's try changing some of the other attributes.", "# Say, the color\nscatter_plot.colors = [\"Red\"]\n\n# Or, the marker style\nscatter_plot.marker = \"diamond\"", "It's important to remember that an interactive widget means that the JavaScript and the Python communicate. So, the plot can be changed through a single line of python code, or a piece of python code can be triggered by a change in the plot. Let's go through a simple example. Say we have a function foo:", "def foo(change):\n print(\n \"This is a trait change. Foo was called by the fact that we moved the Scatter\"\n )\n print(\"In fact, the Scatter plot sent us all the new data: \")\n print(\n \"To access the data, try modifying the function and printing the data variable\"\n )", "We can call foo every time any attribute of our scatter is changed. Say, the y values:", "# First, we hook up our function `foo` to the colors attribute (or Trait) of the scatter plot\nscatter_plot.observe(foo, \"y\")", "To allow the points in the Scatter to be moved interactively, we set the enable_move attribute to True", "scatter_plot.enable_move = True", "Go ahead, head over to the chart and move any point in some way. This move (which happens on the JavaScript side should trigger our Python function foo.\nUnderstanding how bqplot uses the Grammar of Graphics paradigm\nbqplot has two different APIs. One is the matplotlib inspired pyplot which we used above (you can think of it as similar to qplot in ggplot2). The other one, the verbose API, is meant to expose every element of a plot individually, so that their attriutes can be controlled in an atomic way. In order to truly use bqplot to build complex and feature-rich GUIs, it pays to understand the underlying theory that is used to create a plot.\nTo understand this verbose API, it helps to revisit what exactly the components of a plot are. The first thing we need is a Scale.\nA Scale is a mapping from (function that converts) data coordinates to figure coordinates. What this means is that, a Scale takes a set of values in any arbitrary unit (say number of people, or $, or litres) and converts it to pixels (or colors for a ColorScale).", "# First, we import the scales\nfrom bqplot import LinearScale\n\n# Let's create a scale for the x attribute, and a scale for the y attribute\nx_sc = LinearScale()\ny_sc = LinearScale()", "Now, we need to create the actual Mark that will visually represent the data. Let's pick a Scatter chart to start.", "from bqplot import Scatter\n\nscatter_chart = Scatter(x=x_data, y=y_data, scales={\"x\": x_sc, \"y\": y_sc})", "Most of the time, the actual Figure coordinates don't really mean anything to us. So, what we need is the visual representation of our Scale, which is called an Axis.", "from bqplot import Axis\n\nx_ax = Axis(label=\"X\", scale=x_sc)\ny_ax = Axis(label=\"Y\", scale=y_sc, orientation=\"vertical\")", "And finally, we put it all together on a canvas, which is called a Figure.", "from bqplot import Figure\n\nfig = Figure(marks=[scatter_chart], title=\"A Figure\", axes=[x_ax, y_ax])\nfig", "The IPython display machinery displays the last returned value of a cell. If you wish to explicitly display a widget, you can call IPython.display.display.", "from IPython.display import display\n\ndisplay(fig)", "Now, that the plot has been generated, we can control every single attribute of it. Let's say we wanted to color the chart based on some other data.", "# First, we generate some random color data.\ncolor_data = np.random.randint(0, 2, size=100)", "Now, we define a ColorScale to map the color_data to actual colors", "from bqplot import ColorScale\n\n# The colors trait controls the actual colors we want to map to. It can also take a min, mid, max list of\n# colors to be interpolated between for continuous data.\ncol_sc = ColorScale(colors=[\"MediumSeaGreen\", \"Red\"])\n\nscatter_chart.scales = {\"x\": x_sc, \"y\": y_sc, \"color\": col_sc}\n# We pass the color data to the Scatter Chart through it's color attribute\nscatter_chart.color = color_data", "The grammar of graphics framework allows us to overlay multiple visualizations on a single Figure by having the visualization share the Scales. So, for example, if we had a Bar chart that we would like to plot alongside the Scatter plot, we just pass it the same Scales.", "from bqplot import Bars\n\nnew_size = 50\nscale = 100.0\nx_data_new = np.arange(new_size)\ny_data_new = np.cumsum(np.random.randn(new_size) * scale)\n\n# All we need to do to add a bar chart to the Figure is pass the same scales to the Mark\nbar_chart = Bars(x=x_data_new, y=y_data_new, scales={\"x\": x_sc, \"y\": y_sc})", "Finally, we add the new Mark to the Figure to update the plot!", "fig.marks = [scatter_chart, bar_chart]" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
rnoxy/cifar10-cnn
Classification_using_CNN_codes.ipynb
mit
[ "CIFAR10 classification using CNN codes\nHere we are going to build linear models to classify CNN codes of CIFAR10 images.\nWe assume that we already have all the codes extracted by the scripts in the following notebooks:\n- Feature_extraction_using_keras.ipynb\n- Feature_extraction_using_Inception_v3.ipynb", "!ls features/", "Load CNN codes", "model_names = [\n 'vgg16-keras', \n 'vgg19-keras', \n 'resnet50-keras',\n 'incv3-keras', \n 'Inception_v3'\n]\n\nimport numpy as np\n\ndata = dict()\nfor model_name in model_names:\n data[model_name] = np.load('features/CIFAR10_{model}_features.npz'.format(model=model_name)) \n\n# It is important that CNN codes for all the models are given in the same order,\n# i.e. they refer to the same samples from the dataset (both training and testing)\n\ny_training = data[ model_names[0] ]['labels_training'] # this should be common for all the models\ny_testing = data[ model_names[0] ]['labels_testing'] # this should be common for all the models\n\nfor i in range(1,len(model_names)):\n assert( (data[model_names[i]]['labels_training'] == y_training).all() )\n assert( (data[model_names[i]]['labels_testing'] == y_testing).all() )", "LinearSVC classifier from scikit-learn\nWe used the linear classifier from the scikit-learn library.<br/>\nMore precisely, we used LinearSVC", "# First we tried all of the following parameters for each model\nmodel_params = {\n 'vgg16-keras': [ {'C':0.0001}, {'C':0.001}, {'C':0.01,'max_iter':3000},\n {'C':0.1}, {'C':0.5}, {'C':1.0}, {'C':1.2}, {'C':1.5}, {'C':2.0}, {'C':10.0} ],\n 'vgg19-keras': [ {'C':0.0001}, {'C':0.001}, {'C':0.01},\n {'C':0.1}, {'C':0.5}, {'C':1.0}, {'C':1.2}, {'C':1.5}, {'C':2.0}, {'C':10.0} ],\n 'resnet50-keras': [ {'C':0.0001}, {'C':0.001}, {'C':0.01},\n {'C':0.1}, {'C':0.5}, {'C':1.0}, {'C':1.2}, {'C':1.5}, {'C':2.0}, {'C':10.0} ],\n 'Inception_v3': [ {'C':0.0001}, {'C':0.001}, {'C':0.01},\n {'C':0.1}, {'C':0.5}, {'C':1.0}, {'C':1.2}, {'C':1.5}, {'C':2.0}, {'C':10.0} ],\n 'incv3-keras': [ {'C':0.0001}, {'C':0.001}, {'C':0.01},\n {'C':0.1}, {'C':0.5}, {'C':1.0}, {'C':1.2}, {'C':1.5}, {'C':2.0}, {'C':10.0} ],\n}", "Before we start to train so many classifiers, let us write all the results\nwe obtained after hours of computation.\nWe tried to build LinearSVC classifier with many possible paramater C.\nBelow we present the accuracy of all the considered models.\n Model \n -----------------------------------------------------------------------------\n C | vgg16-keras | vgg19-keras | resnet50-keras | incv3-keras | Inception_v3\n------------------------------------------------------------------------------------\n 0.0001 | 8515 | 8633 | 9043 | 7244 | 8860\n 0.001 | 8528 | 8654 | 9158 | 7577 | 9005\n 0.01 | 8521 | 8644 | 9130 | 7604 | 9061\n 0.1 | 8519 | 8615 | 9009 | 7461 | 8959\n 0.5 | 7992 | 8014 | 8858 | 7409 | 8834\n 1.0 | 8211 | 8225 | 8853 | 7369 | 8776\n 1.2 | 8156 | 8335 | 8871 | 7357 | 8772\n 1.5 | 8172 | 8022 | 8852 | 7318 | 8762\n 2.0 | 7609 | 8256 | 8870 | 7281 | 8736\n10.0 | 7799 | 7580 | 8774 | 7042 | 8709", "# and we decided to choose the best parameters\nmodel_params = {\n 'vgg16-keras': [ {'C':0.0001} ],\n 'vgg19-keras': [ {'C':0.001} ],\n 'resnet50-keras': [ {'C':0.001} ],\n 'Inception_v3': [ {'C':0.01} ],\n 'incv3-keras': [ {'C':0.001} ]\n}\n\nfrom sklearn.svm import LinearSVC\n\n# C - chosen experimentally (see explanation below)\nresults = dict()\n\nfor model_name in model_params:\n print('model = ', model_name)\n X_training = data[model_name]['features_training']\n X_testing = data[model_name]['features_testing']\n print( 'X_training size = {}'.format(X_training.shape))\n# print( 'X_testing size = {}'.format(X_testing.shape))\n# print( 'y_training size = {}'.format(y_training.shape))\n# print( 'y_testing size = {}'.format(y_testing.shape))\n results[model_name] = []\n for params in model_params[model_name]:\n clf = LinearSVC(**params, verbose=0)\n clf.fit( X_training, y_training )\n y_pred = clf.predict( X_testing )\n score = sum( y_pred == y_testing )\n print('features={:>16}, C={:8f} => score={:5d}'.format(model_name,params['C'],score))\n results[model_name].append({'pred': y_pred, 'score': score, 'clf': clf})\n\nfrom sklearn.externals import joblib\n\nfor model_name in model_params:\n joblib.dump(results[model_name][0]['clf'], \\\n 'classifiers/{score}-{name}.pkl'.format(score=results[model_name][0]['score'], name=model_name))\n\n!ls -l classifiers/*.pkl\n\nbest_model = 'resnet50-keras'\nX_training = data[best_model]['features_training']\nX_testing = data[best_model]['features_testing']\n\nclf = results[best_model][0]['clf']\nprint( 'Best accuracy = {}'.format( clf.score( X_testing, y_testing ) ) )\ny_predictions = clf.predict( X_testing )", "So we obtained 91.58% accuracy on testing dataset using LinearSVC classifier on top of features extracted with ResNET50 convolutional neural network.\nSome misclassifications", "import myutils\nfrom sklearn.metrics import confusion_matrix\nlabels = myutils.load_CIFAR_classnames()\nconf_matrix = confusion_matrix( y_testing, y_predictions )\n\nprint( 'Confusion matrix:\\n', conf_matrix )\nprint( labels )\n\ni,j = 3,0\nimg_idx = [ k for k in range(10000) if y_testing[k]==i and y_predictions[k]==j ] \nprint( 'We have, e.g., {c} {iname}s predicted to be {jname}'.format(\\\n c=conf_matrix[i,j], iname=labels[i], jname=labels[j]) )\n# print(img_idx)\n\n_, data_testing = myutils.load_CIFAR_dataset(shuffle=False)\nfrom matplotlib import pyplot as plt\n%matplotlib inline \n\nfig = plt.figure(figsize=(18,2));\nfor _i in range(conf_matrix[i,j]):\n a=fig.add_subplot(1,conf_matrix[i,j],_i+1)\n plt.imshow(data_testing[img_idx[_i]][0])\n plt.axis('off')", "Saving parameters\nWe simply save the matrix with weights and bias vector for linear classifier.", "# np.savez_compressed(\"classifiers/9158_resnet50-keras_LinearSVC.npz\",W=np.array(clf.coef_).T, b=clf.intercept_)", "k-nearest neighbors classifier\nLet us note that simple kNN classifier\n(with k=10), trained with 5000 training features (CNN codes from Inception_v3) gives 83.45% accuracy on whole 10000 testing images.\nRemark that computing predictions with this classifier is very complex and it is not recommended for classificcation of images.\nHere is the code to compute the score on testing dataset.\n```python\nfrom sklearn.neighbors import KNeighborsClassifier\nkNN_clf = KNeighborsClassifier(n_neighbors=10)\nkNN_clf.fit(X_training, y_training)\nprint( 'Classification score = ', kNN_clf.score( X_testing, y_testing ) )\nClassification score = 0.8345\n```\nLogistic regression\nFinally we used <tt>Logistic regression</tt> with default parameters. We trained the model with all the training data and obtained 90.37% accuracy on testing dataset.", "from sklearn.linear_model import LogisticRegression\nclf = LogisticRegression()\nclf.fit(X_training, y_training)\nprint( 'Linear regression accuracy = ', clf.score( X_testing, y_testing ) )" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
projectmesa/mesa-examples
examples/PD_Grid/Demographic Prisoner's Dilemma Activation Schedule.ipynb
apache-2.0
[ "Demographic Prisoner's Dilemma\nThe Demographic Prisoner's Dilemma is a family of variants on the classic two-player Prisoner's Dilemma, first developed by Joshua Epstein. The model consists of agents, each with a strategy of either Cooperate or Defect. Each agent's payoff is based on its strategy and the strategies of its spatial neighbors. After each step of the model, the agents adopt the strategy of their neighbor with the highest total score. \nThe specific variant presented here is adapted from the Evolutionary Prisoner's Dilemma model included with NetLogo. Its payoff table is a slight variant of the traditional PD payoff table:\n<table>\n <tr><td></td><td>**Cooperate**</td><td>**Defect**</td></tr>\n <tr><td>**Cooperate**</td><td>1, 1</td><td>0, *D*</td></tr>\n <tr><td>**Defect**</td><td>*D*, 0</td><td>0, 0</td></tr>\n</table>\n\nWhere D is the defection bonus, generally set higher than 1. In these runs, the defection bonus is set to $D=1.6$.\nThe Demographic Prisoner's Dilemma demonstrates how simple rules can lead to the emergence of widespread cooperation, despite the Defection strategy dominiating each individual interaction game. However, it is also interesting for another reason: it is known to be sensitive to the activation regime employed in it.\nBelow, we demonstrate this by instantiating the same model (with the same random seed) three times, with three different activation regimes: \n\nSequential activation, where agents are activated in the order they were added to the model;\nRandom activation, where they are activated in random order every step;\nSimultaneous activation, simulating them all being activated simultaneously.", "from pd_grid import PD_Model\n\nimport random\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec\n%matplotlib inline", "Helper functions", "bwr = plt.get_cmap(\"bwr\")\n\ndef draw_grid(model, ax=None):\n '''\n Draw the current state of the grid, with Defecting agents in red\n and Cooperating agents in blue.\n '''\n if not ax:\n fig, ax = plt.subplots(figsize=(6,6))\n grid = np.zeros((model.grid.width, model.grid.height))\n for agent, x, y in model.grid.coord_iter():\n if agent.move == \"D\":\n grid[y][x] = 1\n else:\n grid[y][x] = 0\n ax.pcolormesh(grid, cmap=bwr, vmin=0, vmax=1)\n ax.axis('off')\n ax.set_title(\"Steps: {}\".format(model.schedule.steps))\n\ndef run_model(model):\n '''\n Run an experiment with a given model, and plot the results.\n '''\n fig = plt.figure(figsize=(12,8))\n \n ax1 = fig.add_subplot(231)\n ax2 = fig.add_subplot(232)\n ax3 = fig.add_subplot(233)\n ax4 = fig.add_subplot(212)\n \n draw_grid(model, ax1)\n model.run(10)\n draw_grid(model, ax2)\n model.run(10)\n draw_grid(model, ax3)\n model.datacollector.get_model_vars_dataframe().plot(ax=ax4)\n \n \n\n# Set the random seed\nseed = 21", "Sequential Activation", "random.seed(seed)\nm = PD_Model(50, 50, \"Sequential\")\nrun_model(m)", "Random Activation", "random.seed(seed)\nm = PD_Model(50, 50, \"Random\")\nrun_model(m)", "Simultaneous Activation", "random.seed(seed)\nm = PD_Model(50, 50, \"Simultaneous\")\nrun_model(m)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
slowvak/MachineLearningForMedicalImages
notebooks/Module 6.ipynb
mit
[ "Application Example\nStep 1: Load basic python libraries", "# This is used to display images within the browser \n%matplotlib inline \nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport dicom as pydicom # library to load dicom images \ntry:\n import cPickle as pickle\nexcept:\n import pickle\nfrom sklearn.preprocessing import StandardScaler\nimport nibabel as nib", "Step 2: Load the classifier and the images\n# load a classifier that has been saved in pickle form\n with open('my_dumped_classifier.pkl', 'rb') as fid:\n gnb_loaded = cPickle.load(fid)", "with open('RBF SVM.pkl', 'rb') as fid:\n classifier = pickle.load(fid)\nprint (dir(classifier))", "Step 3: Load the unknown image and perform the segmetnation", "CurrentDir= os.getcwd()\n# Print current directory\nprint (CurrentDir)\n# Get parent direcotry \nprint(os.path.abspath(os.path.join(CurrentDir, os.pardir)))\n# Create the file paths. The images are contained in a subfolder called Data. \nPostName = os.path.abspath(os.path.join(os.path.abspath(os.path.join(CurrentDir, os.pardir)), \"Data\", 'POST.nii.gz') )\nPreName = os.path.abspath(os.path.join(os.path.abspath(os.path.join(CurrentDir, os.pardir)), \"Data\", 'PRE.nii.gz') )\nFLAIRName = os.path.abspath(os.path.join(os.path.abspath(os.path.join(CurrentDir, os.pardir)), \"Data\", 'FLAIR.nii.gz') )\nGT = os.path.abspath(os.path.join(os.path.abspath(os.path.join(CurrentDir, os.pardir)), \"Data\", 'GroundTruth.nii.gz') )\n# read Pre in--we assume that all images are same x,y dims\nPre = nib.load(PreName)\n# Pre is a class containing the image data among other information \nPre=Pre.get_data()\nxdim = np.shape(Pre)[0]\nydim = np.shape(Pre)[1]\nzdim = np.shape(Pre)[2]\n# Printing the dimensions of an image \nprint ('Dimensions')\nprint (xdim,ydim,zdim)\n# make space in a numpy array for the images\nArrayDicom = np.zeros((xdim, ydim, 2), dtype=Pre.dtype)\n# copy Pre pixels into z=0\nPre=Pre[:,:,55]\nArrayDicom[:, :, 0] = Post/ np.mean(Post[np.nonzero(Post)])\n# Post\nPost = nib.load(PostName)\n# Pre is a class containing the image data among other information \nPost=Post.get_data()\nPost= Post[:,:,55]\nArrayDicom[:, :, 1] = Pre/ np.mean(Pre[np.nonzero(Pre)])", "Step 4: Use the pretrained classifier to perform segmentation\nReshape the data", "print ('Shape before reshape')\nprint (np.shape(ArrayDicom))\nArrayDicom=ArrayDicom.reshape(-1,2)\nprint ('Shape after reshape')\nprint (np.shape(ArrayDicom))", "Appy trained classifier", "# ArrayDicom = StandardScaler().fit_transform(ArrayDicom)\nLabels=classifier.predict(ArrayDicom)\nprint (Labels)", "Visualize results", "print (np.mean(Labels[np.nonzero(Labels)]))\nprint (np.shape(Labels))\n# respape to image\nLabels=Labels.reshape(240,240)\nPost=Post.reshape(240,240)\nPre=Pre.reshape(240,240)\nf, (ax1,ax2,ax3)=plt.subplots(1,3)\nax1.imshow(np.rot90(Post[:, :],3), cmap=plt.cm.gray)\nax1.axis('off')\nax2.imshow(np.rot90(Pre[:, :],3), cmap=plt.cm.gray)\nax2.axis('off')\nax3.imshow(np.rot90(Labels[:, :,],3), cmap=plt.cm.jet)\nax3.axis('off')\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
godfreyduke/deep-learning
sentiment-rnn/Sentiment_RNN_Solution.ipynb
mit
[ "Sentiment Analysis with an RNN\nIn this notebook, you'll implement a recurrent neural network that performs sentiment analysis. Using an RNN rather than a feedfoward network is more accurate since we can include information about the sequence of words. Here we'll use a dataset of movie reviews, accompanied by labels.\nThe architecture for this network is shown below.\n<img src=\"assets/network_diagram.png\" width=400px>\nHere, we'll pass in words to an embedding layer. We need an embedding layer because we have tens of thousands of words, so we'll need a more efficient representation for our input data than one-hot encoded vectors. You should have seen this before from the word2vec lesson. You can actually train up an embedding with word2vec and use it here. But it's good enough to just have an embedding layer and let the network learn the embedding table on it's own.\nFrom the embedding layer, the new representations will be passed to LSTM cells. These will add recurrent connections to the network so we can include information about the sequence of words in the data. Finally, the LSTM cells will go to a sigmoid output layer here. We're using the sigmoid because we're trying to predict if this text has positive or negative sentiment. The output layer will just be a single unit then, with a sigmoid activation function.\nWe don't care about the sigmoid outputs except for the very last one, we can ignore the rest. We'll calculate the cost from the output of the last step and the training label.", "import numpy as np\nimport tensorflow as tf\n\nwith open('../sentiment-network/reviews.txt', 'r') as f:\n reviews = f.read()\nwith open('../sentiment-network/labels.txt', 'r') as f:\n labels = f.read()\n\nreviews[:2000]", "Data preprocessing\nThe first step when building a neural network model is getting your data into the proper form to feed into the network. Since we're using embedding layers, we'll need to encode each word with an integer. We'll also want to clean it up a bit.\nYou can see an example of the reviews data above. We'll want to get rid of those periods. Also, you might notice that the reviews are delimited with newlines \\n. To deal with those, I'm going to split the text into each review using \\n as the delimiter. Then I can combined all the reviews back together into one big string.\nFirst, let's remove all punctuation. Then get all the text without the newlines and split it into individual words.", "from string import punctuation\nall_text = ''.join([c for c in reviews if c not in punctuation])\nreviews = all_text.split('\\n')\n\nall_text = ' '.join(reviews)\nwords = all_text.split()\n\nall_text[:2000]\n\nwords[:100]", "Encoding the words\nThe embedding lookup requires that we pass in integers to our network. The easiest way to do this is to create dictionaries that map the words in the vocabulary to integers. Then we can convert each of our reviews into integers so they can be passed into the network.\n\nExercise: Now you're going to encode the words with integers. Build a dictionary that maps words to integers. Later we're going to pad our input vectors with zeros, so make sure the integers start at 1, not 0.\nAlso, convert the reviews to integers and store the reviews in a new list called reviews_ints.", "from collections import Counter\ncounts = Counter(words)\nvocab = sorted(counts, key=counts.get, reverse=True)\nvocab_to_int = {word: ii for ii, word in enumerate(vocab, 1)}\n\nreviews_ints = []\nfor each in reviews:\n reviews_ints.append([vocab_to_int[word] for word in each.split()])", "Encoding the labels\nOur labels are \"positive\" or \"negative\". To use these labels in our network, we need to convert them to 0 and 1.\n\nExercise: Convert labels from positive and negative to 1 and 0, respectively.", "labels = labels.split('\\n')\nlabels = np.array([1 if each == 'positive' else 0 for each in labels])\n\nreview_lens = Counter([len(x) for x in reviews_ints])\nprint(\"Zero-length reviews: {}\".format(review_lens[0]))\nprint(\"Maximum review length: {}\".format(max(review_lens)))", "Okay, a couple issues here. We seem to have one review with zero length. And, the maximum review length is way too many steps for our RNN. Let's truncate to 200 steps. For reviews shorter than 200, we'll pad with 0s. For reviews longer than 200, we can truncate them to the first 200 characters.\n\nExercise: First, remove the review with zero length from the reviews_ints list.", "non_zero_idx = [ii for ii, review in enumerate(reviews_ints) if len(review) != 0]\nlen(non_zero_idx)\n\nreviews_ints[-1]", "Turns out its the final review that has zero length. But that might not always be the case, so let's make it more general.", "reviews_ints = [reviews_ints[ii] for ii in non_zero_idx]\nlabels = np.array([labels[ii] for ii in non_zero_idx])", "Exercise: Now, create an array features that contains the data we'll pass to the network. The data should come from review_ints, since we want to feed integers to the network. Each row should be 200 elements long. For reviews shorter than 200 words, left pad with 0s. That is, if the review is ['best', 'movie', 'ever'], [117, 18, 128] as integers, the row will look like [0, 0, 0, ..., 0, 117, 18, 128]. For reviews longer than 200, use on the first 200 words as the feature vector.\n\nThis isn't trivial and there are a bunch of ways to do this. But, if you're going to be building your own deep learning networks, you're going to have to get used to preparing your data.", "seq_len = 200\nfeatures = np.zeros((len(reviews_ints), seq_len), dtype=int)\nfor i, row in enumerate(reviews_ints):\n features[i, -len(row):] = np.array(row)[:seq_len]\n\nfeatures[:10,:100]", "Training, Validation, Test\nWith our data in nice shape, we'll split it into training, validation, and test sets.\n\nExercise: Create the training, validation, and test sets here. You'll need to create sets for the features and the labels, train_x and train_y for example. Define a split fraction, split_frac as the fraction of data to keep in the training set. Usually this is set to 0.8 or 0.9. The rest of the data will be split in half to create the validation and testing data.", "split_frac = 0.8\nsplit_idx = int(len(features)*0.8)\ntrain_x, val_x = features[:split_idx], features[split_idx:]\ntrain_y, val_y = labels[:split_idx], labels[split_idx:]\n\ntest_idx = int(len(val_x)*0.5)\nval_x, test_x = val_x[:test_idx], val_x[test_idx:]\nval_y, test_y = val_y[:test_idx], val_y[test_idx:]\n\nprint(\"\\t\\t\\tFeature Shapes:\")\nprint(\"Train set: \\t\\t{}\".format(train_x.shape), \n \"\\nValidation set: \\t{}\".format(val_x.shape),\n \"\\nTest set: \\t\\t{}\".format(test_x.shape))", "With train, validation, and text fractions of 0.8, 0.1, 0.1, the final shapes should look like:\nFeature Shapes:\nTrain set: (20000, 200) \nValidation set: (2500, 200) \nTest set: (2500, 200)\nBuild the graph\nHere, we'll build the graph. First up, defining the hyperparameters.\n\nlstm_size: Number of units in the hidden layers in the LSTM cells. Usually larger is better performance wise. Common values are 128, 256, 512, etc.\nlstm_layers: Number of LSTM layers in the network. I'd start with 1, then add more if I'm underfitting.\nbatch_size: The number of reviews to feed the network in one training pass. Typically this should be set as high as you can go without running out of memory.\nlearning_rate: Learning rate", "lstm_size = 256\nlstm_layers = 1\nbatch_size = 500\nlearning_rate = 0.001", "For the network itself, we'll be passing in our 200 element long review vectors. Each batch will be batch_size vectors. We'll also be using dropout on the LSTM layer, so we'll make a placeholder for the keep probability.\n\nExercise: Create the inputs_, labels_, and drop out keep_prob placeholders using tf.placeholder. labels_ needs to be two-dimensional to work with some functions later. Since keep_prob is a scalar (a 0-dimensional tensor), you shouldn't provide a size to tf.placeholder.", "n_words = len(vocab_to_int)\n\n# Create the graph object\ngraph = tf.Graph()\n# Add nodes to the graph\nwith graph.as_default():\n inputs_ = tf.placeholder(tf.int32, [None, None], name='inputs')\n labels_ = tf.placeholder(tf.int32, [None, None], name='labels')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')", "Embedding\nNow we'll add an embedding layer. We need to do this because there are 74000 words in our vocabulary. It is massively inefficient to one-hot encode our classes here. You should remember dealing with this problem from the word2vec lesson. Instead of one-hot encoding, we can have an embedding layer and use that layer as a lookup table. You could train an embedding layer using word2vec, then load it here. But, it's fine to just make a new layer and let the network learn the weights.\n\nExercise: Create the embedding lookup matrix as a tf.Variable. Use that embedding matrix to get the embedded vectors to pass to the LSTM cell with tf.nn.embedding_lookup. This function takes the embedding matrix and an input tensor, such as the review vectors. Then, it'll return another tensor with the embedded vectors. So, if the embedding layer as 200 units, the function will return a tensor with size [batch_size, 200].", "# Size of the embedding vectors (number of units in the embedding layer)\nembed_size = 300 \n\nwith graph.as_default():\n embedding = tf.Variable(tf.random_uniform((n_words, embed_size), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, inputs_)", "LSTM cell\n<img src=\"assets/network_diagram.png\" width=400px>\nNext, we'll create our LSTM cells to use in the recurrent network (TensorFlow documentation). Here we are just defining what the cells look like. This isn't actually building the graph, just defining the type of cells we want in our graph.\nTo create a basic LSTM cell for the graph, you'll want to use tf.contrib.rnn.BasicLSTMCell. Looking at the function documentation:\ntf.contrib.rnn.BasicLSTMCell(num_units, forget_bias=1.0, input_size=None, state_is_tuple=True, activation=&lt;function tanh at 0x109f1ef28&gt;)\nyou can see it takes a parameter called num_units, the number of units in the cell, called lstm_size in this code. So then, you can write something like \nlstm = tf.contrib.rnn.BasicLSTMCell(num_units)\nto create an LSTM cell with num_units. Next, you can add dropout to the cell with tf.contrib.rnn.DropoutWrapper. This just wraps the cell in another cell, but with dropout added to the inputs and/or outputs. It's a really convenient way to make your network better with almost no effort! So you'd do something like\ndrop = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\nMost of the time, you're network will have better performance with more layers. That's sort of the magic of deep learning, adding more layers allows the network to learn really complex relationships. Again, there is a simple way to create multiple layers of LSTM cells with tf.contrib.rnn.MultiRNNCell:\ncell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)\nHere, [drop] * lstm_layers creates a list of cells (drop) that is lstm_layers long. The MultiRNNCell wrapper builds this into multiple layers of RNN cells, one for each cell in the list.\nSo the final cell you're using in the network is actually multiple (or just one) LSTM cells with dropout. But it all works the same from an achitectural viewpoint, just a more complicated graph in the cell.\n\nExercise: Below, use tf.contrib.rnn.BasicLSTMCell to create an LSTM cell. Then, add drop out to it with tf.contrib.rnn.DropoutWrapper. Finally, create multiple LSTM layers with tf.contrib.rnn.MultiRNNCell.\n\nHere is a tutorial on building RNNs that will help you out.", "with graph.as_default():\n # Your basic LSTM cell\n lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)\n \n # Add dropout to the cell\n drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)\n \n # Stack up multiple LSTM layers, for deep learning\n cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)\n \n # Getting an initial state of all zeros\n initial_state = cell.zero_state(batch_size, tf.float32)", "RNN forward pass\n<img src=\"assets/network_diagram.png\" width=400px>\nNow we need to actually run the data through the RNN nodes. You can use tf.nn.dynamic_rnn to do this. You'd pass in the RNN cell you created (our multiple layered LSTM cell for instance), and the inputs to the network.\noutputs, final_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state)\nAbove I created an initial state, initial_state, to pass to the RNN. This is the cell state that is passed between the hidden layers in successive time steps. tf.nn.dynamic_rnn takes care of most of the work for us. We pass in our cell and the input to the cell, then it does the unrolling and everything else for us. It returns outputs for each time step and the final_state of the hidden layer.\n\nExercise: Use tf.nn.dynamic_rnn to add the forward pass through the RNN. Remember that we're actually passing in vectors from the embedding layer, embed.", "with graph.as_default():\n outputs, final_state = tf.nn.dynamic_rnn(cell, embed,\n initial_state=initial_state)", "Output\nWe only care about the final output, we'll be using that as our sentiment prediction. So we need to grab the last output with outputs[:, -1], the calculate the cost from that and labels_.", "with graph.as_default():\n predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid)\n cost = tf.losses.mean_squared_error(labels_, predictions)\n \n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)", "Validation accuracy\nHere we can add a few nodes to calculate the accuracy which we'll use in the validation pass.", "with graph.as_default():\n correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels_)\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))", "Batching\nThis is a simple function for returning batches from our data. First it removes data such that we only have full batches. Then it iterates through the x and y arrays and returns slices out of those arrays with size [batch_size].", "def get_batches(x, y, batch_size=100):\n \n n_batches = len(x)//batch_size\n x, y = x[:n_batches*batch_size], y[:n_batches*batch_size]\n for ii in range(0, len(x), batch_size):\n yield x[ii:ii+batch_size], y[ii:ii+batch_size]", "Training\nBelow is the typical training code. If you want to do this yourself, feel free to delete all this code and implement it yourself. Before you run this, make sure the checkpoints directory exists.", "epochs = 10\n\nwith graph.as_default():\n saver = tf.train.Saver()\n\nwith tf.Session(graph=graph) as sess:\n sess.run(tf.global_variables_initializer())\n iteration = 1\n for e in range(epochs):\n state = sess.run(initial_state)\n \n for ii, (x, y) in enumerate(get_batches(train_x, train_y, batch_size), 1):\n feed = {inputs_: x,\n labels_: y[:, None],\n keep_prob: 0.5,\n initial_state: state}\n loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed)\n \n if iteration%5==0:\n print(\"Epoch: {}/{}\".format(e, epochs),\n \"Iteration: {}\".format(iteration),\n \"Train loss: {:.3f}\".format(loss))\n\n if iteration%25==0:\n val_acc = []\n val_state = sess.run(cell.zero_state(batch_size, tf.float32))\n for x, y in get_batches(val_x, val_y, batch_size):\n feed = {inputs_: x,\n labels_: y[:, None],\n keep_prob: 1,\n initial_state: val_state}\n batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed)\n val_acc.append(batch_acc)\n print(\"Val acc: {:.3f}\".format(np.mean(val_acc)))\n iteration +=1\n saver.save(sess, \"checkpoints/sentiment.ckpt\")", "Testing", "test_acc = []\nwith tf.Session(graph=graph) as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n test_state = sess.run(cell.zero_state(batch_size, tf.float32))\n for ii, (x, y) in enumerate(get_batches(test_x, test_y, batch_size), 1):\n feed = {inputs_: x,\n labels_: y[:, None],\n keep_prob: 1,\n initial_state: test_state}\n batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed)\n test_acc.append(batch_acc)\n print(\"Test accuracy: {:.3f}\".format(np.mean(test_acc)))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
moonbury/pythonanywhere
github/MasteringMatplotlib/mmpl-custom-and-config.ipynb
gpl-3.0
[ "Advanced Customization and Configuration\nTable of Contents\n\nIntroduction\nCustomization\nmatplotlib Styles\nSubplots\nMaking a Plan\nRevisiting Pandas\nIndividual Plots\nCombined Plots\n\n\nConfiguration\nRun Control\n\nWarm-up proceedures:", "import matplotlib\nmatplotlib.use('nbagg')\n%matplotlib inline\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom IPython.display import Image", "Note that we're not using Seaborn for styling like we did previously -- that's beccause the first thing we're going to tackle is creating a custom matplotlib style :-)\nCustomization\nCreating a custom style\nIn the previous notebook, we saw that we could list the available styles with the following call:", "print(plt.style.available)", "You can create custom styles and use them by calling style.use with the path or URL to the style sheet. Alternatively, if you save your &lt;style-name&gt;.mplstyle file to the ~/.matplotlib/stylelib directory (you may need to create it), you can reuse your custom style sheet with a call to style.use(&lt;style-name&gt;). Note that a custom style sheet in ~/.matplotlib/stylelib will override a style sheet defined by matplotlib if the styles have the same name.\nWe've created a style sheet for you to use in this repository for this notebook, but before we go further, let's create a function that will generate a demo plot for us. Then we'll render it, using the default style -- thus having a baseline to compare our work to:", "def make_plot ():\n x = np.random.randn(5000, 6)\n (figure, axes) = plt.subplots(figsize=(16,10))\n (n, bins, patches) = axes.hist(x, 12, normed=1, histtype='bar',\n label=['Color 1', 'Color 2', 'Color 3',\n 'Color 4', 'Color 5', 'Color 6'])\n axes.set_title(\"Histogram\\nfor a\\nNormal Distribution\", fontsize=24)\n axes.set_xlabel(\"Data Points\", fontsize=16)\n axes.set_ylabel(\"Counts\", fontsize=16)\n axes.legend()\n plt.show()\n\nplt.style.use('ggplot')\nmake_plot()", "Okay, we've got our sample plot. Now let's look at the style.\nWe've created a style called \"Superheroine\", based on Thomas Park's excellent Bootstrap theme, Superhero. Here's a screenshot of the Boostrap theme:", "#Image(filename=\"superhero.png\")", "We've saved captured some of the colors from this screenshot and saved them in a couple of plot style files to the \"styles\" directory in this notebook repo:", "ls -l ../styles/", "Basically, we couldn't make up our mind about whether we liked the light text (style 1) or the orange text (style 2). So we kept both :-)\nLet's take a look at the second one's contents which show the hexadecimal colors we copied from the Boostrap theme:", "cat ./superheroine-2.mplstyle", "Now let's load it:", "plt.style.use(\"./superheroine-2.mplstyle\")", "And then re-render our plot:", "make_plot()", "A full list of styles available for customization is in given in the matplotlib run control file. We'll be discussing this more in the next section.\nSubplots\nMaking a Plan\nIn this next section, we'll be creating a sophisticated subplot to give you a sense of what's possible with matplotlib's layouts. We'll be ingesting data from the UCI Machine Learning Repository, in particular the 1985 Automobile Data Set, an example of data which can be used to assess the insurance risks for different vehicles.\nWe will use it in an effort to compare 21 automobile manufacturers (using 1985 data) along the following dimensions:\n* mean price\n* mean city MPG\n* mean highway MPG\n* mean horsepower\n* mean curb-weight\n* mean relative average loss payment\n* mean insurance riskiness\nWe will limit ourselves to automobile manufacturers that have data for losses as well as 6 or more data rows.\nOur subplot will be comprised of the following sections:\n * An overall title \n * Line plots for max, mean, and min prices\n * Stacked bar chart for combined riskiness/losses\n * Stacked bar chart for riskiness\n * Stacked bar chart for losses\n * Radar charts for each automobile manufacturer\n * Combined scatter plot for city and highway MPG\nThese will be composed as subplots in the following manner:\n```\n| overall title |\n| price ranges |\n| combined loss/risk | |\n| | radar |\n---------------------- plots |\n| risk | loss | |\n\n| mpg |\n```\nRevisiting Pandas", "import sys\nsys.path.append(\"../lib\")\nimport demodata, demoplot, radar\n\nraw_data = demodata.get_raw_data()\nraw_data.head()\n\nlimited_data = demodata.get_limited_data()\nlimited_data.head()\n\ndemodata.get_all_auto_makes()\n\n(makes, counts) = demodata.get_make_counts(limited_data)\ncounts\n\n(makes, counts) = demodata.get_make_counts(limited_data, lower_bound=6)\n\ncounts\n\ndata = demodata.get_limited_data(lower_bound=6)\ndata.head()\n\nlen(data.index)\n\nsum([x[1] for x in counts])\n\nnormed_data = data.copy()\nnormed_data.rename(columns={\"horsepower\": \"power\"}, inplace=True)", "Higher values are better for these:", "demodata.norm_columns([\"city mpg\", \"highway mpg\", \"power\"], normed_data)\nnormed_data.head()", "Lower values are better for these:", "demodata.invert_norm_columns([\"price\", \"weight\", \"riskiness\", \"losses\"], normed_data)\nnormed_data.head()", "Individual Plots", "figure = plt.figure(figsize=(15, 5))\nprices_gs = mpl.gridspec.GridSpec(1, 1)\nprices_axes = demoplot.make_autos_price_plot(figure, prices_gs, data)\nplt.show()\n\nfigure = plt.figure(figsize=(15, 5))\nmpg_gs = mpl.gridspec.GridSpec(1, 1)\nmpg_axes = demoplot.make_autos_mpg_plot(figure, mpg_gs, data)\nplt.show()\n\nfigure = plt.figure(figsize=(15, 5))\nrisk_gs = mpl.gridspec.GridSpec(1, 1)\nrisk_axes = demoplot.make_autos_riskiness_plot(figure, risk_gs, normed_data)\nplt.show()\n\nfigure = plt.figure(figsize=(15, 5))\nloss_gs = mpl.gridspec.GridSpec(1, 1)\nloss_axes = demoplot.make_autos_losses_plot(figure, loss_gs, normed_data)\nplt.show()\n\nfigure = plt.figure(figsize=(15, 5))\nrisk_loss_gs = mpl.gridspec.GridSpec(1, 1)\nrisk_loss_axes = demoplot.make_autos_loss_and_risk_plot(figure, risk_loss_gs, normed_data)\nplt.show()\n\nfigure = plt.figure(figsize=(15, 5))\nradar_gs = mpl.gridspec.GridSpec(3, 7, height_ratios=[1, 10, 10], wspace=0.50, hspace=0.60, top=0.95, bottom=0.25)\nradar_axes = demoplot.make_autos_radar_plot(figure, radar_gs, normed_data)\nplt.show()", "Combined Plots\nHere's a refresher on the plot layout we're aiming for:\n```\n| overall title |\n| price ranges |\n| combined loss/risk | |\n| | radar |\n---------------------- plots |\n| risk | loss | |\n\n| mpg |\n```\nLet's try that now with just empty graphs, to get a sense of things:", "figure = plt.figure(figsize=(10, 8))\ngs_master = mpl.gridspec.GridSpec(4, 2, height_ratios=[1, 2, 8, 2])\n# Layer 1 - Title\ngs_1 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs_master[0, :])\ntitle_axes = figure.add_subplot(gs_1[0])\n# Layer 2 - Price\ngs_2 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs_master[1, :])\nprice_axes = figure.add_subplot(gs_2[0])\n# Layer 3 - Risks & Radar\ngs_31 = mpl.gridspec.GridSpecFromSubplotSpec(2, 2, height_ratios=[2, 1], subplot_spec=gs_master[2, :1])\nrisk_and_loss_axes = figure.add_subplot(gs_31[0, :])\nrisk_axes = figure.add_subplot(gs_31[1, :1])\nloss_axes = figure.add_subplot(gs_31[1:, 1])\ngs_32 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs_master[2, 1])\nradar_axes = figure.add_subplot(gs_32[0])\n# Layer 4 - MPG\ngs_4 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs_master[3, :])\nmpg_axes = figure.add_subplot(gs_4[0])\n# Tidy up\ngs_master.tight_layout(figure)\nplt.show()\n\nfigure = plt.figure(figsize=(15, 15))\ngs_master = mpl.gridspec.GridSpec(4, 2, height_ratios=[1, 24, 128, 32], hspace=0, wspace=0)\n\n# Layer 1 - Title\ngs_1 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs_master[0, :])\ntitle_axes = figure.add_subplot(gs_1[0])\ntitle_axes.set_title(\"Demo Plots for 1985 Auto Maker Data\", fontsize=30, color=\"#cdced1\")\ndemoplot.hide_axes(title_axes)\n\n# Layer 2 - Price\ngs_2 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs_master[1, :])\nprice_axes = figure.add_subplot(gs_2[0])\ndemoplot.make_autos_price_plot(figure, pddata=data, axes=price_axes)\n\n# Layer 3, Part I - Risks\ngs_31 = mpl.gridspec.GridSpecFromSubplotSpec(2, 2, height_ratios=[2, 1], hspace=0.4, subplot_spec=gs_master[2, :1])\nrisk_and_loss_axes = figure.add_subplot(gs_31[0, :])\ndemoplot.make_autos_loss_and_risk_plot(\n figure, pddata=normed_data, axes=risk_and_loss_axes, x_label=False, rotate_ticks=True)\nrisk_axes = figure.add_subplot(gs_31[1, :1])\ndemoplot.make_autos_riskiness_plot(figure, pddata=normed_data, axes=risk_axes, legend=False, labels=False)\nloss_axes = figure.add_subplot(gs_31[1:, 1])\ndemoplot.make_autos_losses_plot(figure, pddata=normed_data, axes=loss_axes, legend=False, labels=False)\n\n# Layer 3, Part II - Radar\ngs_32 = mpl.gridspec.GridSpecFromSubplotSpec(\n 5, 3, height_ratios=[1, 20, 20, 20, 20], hspace=0.6, wspace=0, subplot_spec=gs_master[2, 1])\n(rows, cols) = geometry = gs_32.get_geometry()\ntitle_axes = figure.add_subplot(gs_32[0, :])\ninner_axes = []\nprojection = radar.RadarAxes(spoke_count=len(normed_data.groupby(\"make\").mean().columns))\n[inner_axes.append(figure.add_subplot(m, projection=projection)) for m in [n for n in gs_32][cols:]]\ndemoplot.make_autos_radar_plot(\n figure, pddata=normed_data, title_axes=title_axes, inner_axes=inner_axes, legend_axes=False, \n geometry=geometry)\n\n# Layer 4 - MPG\ngs_4 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs_master[3, :])\nmpg_axes = figure.add_subplot(gs_4[0])\ndemoplot.make_autos_mpg_plot(figure, pddata=data, axes=mpg_axes)\n\n# Tidy up\ngs_master.tight_layout(figure)\nplt.show()", "Configuration\nGet the directory for the matplotlib config files and cache:", "mpl.get_configdir()\n\nmpl.matplotlib_fname()", "matplotlib's rcParams configuration dictionary holds a great many options for tweaking your use of matplotlib the way you want to:", "len(mpl.rcParams.keys())", "The first 10 configuration options in rcParams are:", "dict(list(mpl.rcParams.items())[:10])\n\nmpl.rcParams['savefig.jpeg_quality'] = 72\nmpl.rcParams['axes.formatter.limits'] = [-5, 5]\n\nmpl.rcParams['axes.formatter.limits']\n\nmpl.rcdefaults() \n\nmpl.rcParams['axes.formatter.limits']" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
kjschiroo/mlip
Machine_Learning_in_Python.ipynb
mit
[ "Machine learning in Python\nThe data set", "from sklearn.datasets import load_digits\ndata_set = load_digits()", "Let's poke around and see what is in the data set.", "data_set.keys()\n\ndata_set.data", "Well, that is a bit hard to grok. Let's see if we can get a better view.", "%matplotlib inline\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = (4.0, 4.0)\n\ndef show_image(image_data):\n plt.imshow(image_data, cmap=plt.cm.gray_r, interpolation='nearest')\n plt.show()\n \nshow_image(data_set.images[0])\ndata_set.images[0]", "Now we have an idea of what our data looks like. It looks like they took 8x8 gray scale images, and then just concatenated all of the rows together.", "data_set.target", "And each one of these data points has a label, 0 through 9.\nMachine learning time", "half_length = len(data_set.data) // 2\ntrain_set = {\n 'data': data_set.data[:half_length],\n 'target': data_set.target[:half_length],\n}\ntest_set = {\n 'data': data_set.data[half_length:],\n 'target': data_set.target[half_length:]\n}\n\nfrom sklearn.ensemble import RandomForestClassifier\n\nclassifier = RandomForestClassifier()\nclassifier.fit(train_set['data'], train_set['target'])", "We now have a classifier. Let's try it out.", "import random\nplt.rcParams['figure.figsize'] = (4.0, 4.0)\npredictions = classifier.predict(test_set['data'])\nthe_pick = random.randrange(0, len(test_set['data']))\nshow_image(test_set['data'][the_pick].reshape(8, 8))\nprint(\"We predict: {0}\".format(predictions[the_pick]))\n\nimport pandas as pd\n\npd.crosstab(test_set['target'], predictions, rownames=['Actual'], colnames=['Predicted'], margins=True)", "## How are we doing? ##", "from sklearn import metrics\n\nprint(\"Classification report\")\nprint(metrics.classification_report(test_set['target'], predictions))\n\nplt.rcParams['figure.figsize'] = (12.0, 12.0)\nprecision = []\nrecall = []\nprobabilities = classifier.predict_proba(test_set['data'])\nfor i in range(10):\n actual = [v == i for v in test_set['target']]\n p, r, _ = metrics.precision_recall_curve(actual, probabilities[:, i])\n precision.append(p)\n recall.append(r)\n\nfor i in range(10):\n plt.plot(recall[i], precision[i], label=i)\nplt.legend(loc='lower left')\nplt.xlim([0.0, 1.0])\nplt.xlabel('Recall')\nplt.ylabel('Precision')\nplt.show()", "Bad classifier", "import numpy as np\n\n# Totally random data, nothing can be learned here.\nrows = 1000\nfeatures = 64\ndata = np.random.random((rows, features))\nlabels = np.random.randint(0, 2, rows)\n\nfrom sklearn.ensemble import RandomForestClassifier\n\n# Train it on all the data, such a bad idea!\nbad_classifier = RandomForestClassifier()\nbad_classifier.fit(data, labels)\n\nprobabilities = bad_classifier.predict_proba(data)\np, r, _ = metrics.precision_recall_curve(labels, probabilities[:,1])\nplt.plot(r, p)\nplt.xlabel('Recall')\nplt.ylabel('Precision')\nplt.show()", "The right way to do it", "# Divide our data in half\nhalf_length = len(data) // 2\ntrain_data = data[:half_length]\ntrain_labels = labels[:half_length]\ntest_data = data[half_length:]\ntest_labels = labels[half_length:]\n\n# train on half of it\ngood_classifier = RandomForestClassifier()\ngood_classifier.fit(train_data, train_labels)\n\n# evaluate\nprobabilities = good_classifier.predict_proba(test_data)\np, r, _ = metrics.precision_recall_curve(test_labels, probabilities[:,1])\nplt.plot(r, p)\nplt.xlabel('Recall')\nplt.ylabel('Precision')\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ES-DOC/esdoc-jupyterhub
notebooks/nerc/cmip6/models/sandbox-2/toplevel.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Toplevel\nMIP Era: CMIP6\nInstitute: NERC\nSource ID: SANDBOX-2\nSub-Topics: Radiative Forcings. \nProperties: 85 (42 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:27\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'nerc', 'sandbox-2', 'toplevel')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --&gt; Flux Correction\n3. Key Properties --&gt; Genealogy\n4. Key Properties --&gt; Software Properties\n5. Key Properties --&gt; Coupling\n6. Key Properties --&gt; Tuning Applied\n7. Key Properties --&gt; Conservation --&gt; Heat\n8. Key Properties --&gt; Conservation --&gt; Fresh Water\n9. Key Properties --&gt; Conservation --&gt; Salt\n10. Key Properties --&gt; Conservation --&gt; Momentum\n11. Radiative Forcings\n12. Radiative Forcings --&gt; Greenhouse Gases --&gt; CO2\n13. Radiative Forcings --&gt; Greenhouse Gases --&gt; CH4\n14. Radiative Forcings --&gt; Greenhouse Gases --&gt; N2O\n15. Radiative Forcings --&gt; Greenhouse Gases --&gt; Tropospheric O3\n16. Radiative Forcings --&gt; Greenhouse Gases --&gt; Stratospheric O3\n17. Radiative Forcings --&gt; Greenhouse Gases --&gt; CFC\n18. Radiative Forcings --&gt; Aerosols --&gt; SO4\n19. Radiative Forcings --&gt; Aerosols --&gt; Black Carbon\n20. Radiative Forcings --&gt; Aerosols --&gt; Organic Carbon\n21. Radiative Forcings --&gt; Aerosols --&gt; Nitrate\n22. Radiative Forcings --&gt; Aerosols --&gt; Cloud Albedo Effect\n23. Radiative Forcings --&gt; Aerosols --&gt; Cloud Lifetime Effect\n24. Radiative Forcings --&gt; Aerosols --&gt; Dust\n25. Radiative Forcings --&gt; Aerosols --&gt; Tropospheric Volcanic\n26. Radiative Forcings --&gt; Aerosols --&gt; Stratospheric Volcanic\n27. Radiative Forcings --&gt; Aerosols --&gt; Sea Salt\n28. Radiative Forcings --&gt; Other --&gt; Land Use\n29. Radiative Forcings --&gt; Other --&gt; Solar \n1. Key Properties\nKey properties of the model\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTop level overview of coupled model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of coupled model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Flux Correction\nFlux correction properties of the model\n2.1. Details\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how flux corrections are applied in the model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.flux_correction.details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Genealogy\nGenealogy and history of the model\n3.1. Year Released\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nYear the model was released", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3.2. CMIP3 Parent\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCMIP3 parent if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3.3. CMIP5 Parent\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCMIP5 parent if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3.4. Previous Name\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nPreviously known as", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Software Properties\nSoftware properties of model\n4.1. Repository\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nLocation of code for this component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Code Version\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCode version identifier.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Code Languages\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nCode language(s).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.4. Components Structure\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe how model realms are structured into independent software components (coupled via a coupler) and internal software components.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.5. Coupler\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nOverarching coupling framework for model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OASIS\" \n# \"OASIS3-MCT\" \n# \"ESMF\" \n# \"NUOPC\" \n# \"Bespoke\" \n# \"Unknown\" \n# \"None\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Coupling\n**\n5.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of coupling in the model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Atmosphere Double Flux\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the atmosphere passing a double flux to the ocean and sea ice (as opposed to a single one)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "5.3. Atmosphere Fluxes Calculation Grid\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nWhere are the air-sea fluxes calculated", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Atmosphere grid\" \n# \"Ocean grid\" \n# \"Specific coupler grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "5.4. Atmosphere Relative Winds\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre relative or absolute winds used to compute the flux? I.e. do ocean surface currents enter the wind stress calculation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Tuning Applied\nTuning methodology for model\n6.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics/diagnostics retained. Document the relative weight given to climate performance metrics/diagnostics versus process oriented metrics/diagnostics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.2. Global Mean Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList set of metrics/diagnostics of the global mean state used in tuning model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.3. Regional Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of regional metrics/diagnostics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.4. Trend Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList observed trend metrics/diagnostics used in tuning model/component (such as 20th century)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.5. Energy Balance\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how energy balance was obtained in the full system: in the various components independently or at the components coupling stage?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.6. Fresh Water Balance\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how fresh_water balance was obtained in the full system: in the various components independently or at the components coupling stage?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7. Key Properties --&gt; Conservation --&gt; Heat\nGlobal heat convervation properties of the model\n7.1. Global\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how heat is conserved globally", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. Atmos Ocean Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how heat is conserved at the atmosphere/ocean coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.3. Atmos Land Interface\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how heat is conserved at the atmosphere/land coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.4. Atmos Sea-ice Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how heat is conserved at the atmosphere/sea-ice coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.5. Ocean Seaice Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how heat is conserved at the ocean/sea-ice coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.6. Land Ocean Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how heat is conserved at the land/ocean coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8. Key Properties --&gt; Conservation --&gt; Fresh Water\nGlobal fresh water convervation properties of the model\n8.1. Global\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how fresh_water is conserved globally", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Atmos Ocean Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how fresh_water is conserved at the atmosphere/ocean coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.3. Atmos Land Interface\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how fresh water is conserved at the atmosphere/land coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.4. Atmos Sea-ice Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how fresh water is conserved at the atmosphere/sea-ice coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.5. Ocean Seaice Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how fresh water is conserved at the ocean/sea-ice coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.6. Runoff\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe how runoff is distributed and conserved", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.7. Iceberg Calving\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how iceberg calving is modeled and conserved", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.8. Endoreic Basins\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how endoreic basins (no ocean access) are treated", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.9. Snow Accumulation\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe how snow accumulation over land and over sea-ice is treated", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Key Properties --&gt; Conservation --&gt; Salt\nGlobal salt convervation properties of the model\n9.1. Ocean Seaice Interface\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how salt is conserved at the ocean/sea-ice coupling interface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Key Properties --&gt; Conservation --&gt; Momentum\nGlobal momentum convervation properties of the model\n10.1. Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how momentum is conserved in the model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11. Radiative Forcings\nRadiative forcings of the model for historical and scenario (aka Table 12.1 IPCC AR5)\n11.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of radiative forcings (GHG and aerosols) implementation in model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12. Radiative Forcings --&gt; Greenhouse Gases --&gt; CO2\nCarbon dioxide forcing\n12.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Radiative Forcings --&gt; Greenhouse Gases --&gt; CH4\nMethane forcing\n13.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14. Radiative Forcings --&gt; Greenhouse Gases --&gt; N2O\nNitrous oxide forcing\n14.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15. Radiative Forcings --&gt; Greenhouse Gases --&gt; Tropospheric O3\nTroposheric ozone forcing\n15.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16. Radiative Forcings --&gt; Greenhouse Gases --&gt; Stratospheric O3\nStratospheric ozone forcing\n16.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17. Radiative Forcings --&gt; Greenhouse Gases --&gt; CFC\nOzone-depleting and non-ozone-depleting fluorinated gases forcing\n17.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.2. Equivalence Concentration\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDetails of any equivalence concentrations used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"Option 1\" \n# \"Option 2\" \n# \"Option 3\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.3. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18. Radiative Forcings --&gt; Aerosols --&gt; SO4\nSO4 aerosol forcing\n18.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "18.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19. Radiative Forcings --&gt; Aerosols --&gt; Black Carbon\nBlack carbon aerosol forcing\n19.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "19.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "20. Radiative Forcings --&gt; Aerosols --&gt; Organic Carbon\nOrganic carbon aerosol forcing\n20.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "21. Radiative Forcings --&gt; Aerosols --&gt; Nitrate\nNitrate forcing\n21.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "21.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22. Radiative Forcings --&gt; Aerosols --&gt; Cloud Albedo Effect\nCloud albedo effect forcing (RFaci)\n22.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22.2. Aerosol Effect On Ice Clouds\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRadiative effects of aerosols on ice clouds are represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "22.3. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "23. Radiative Forcings --&gt; Aerosols --&gt; Cloud Lifetime Effect\nCloud lifetime effect forcing (ERFaci)\n23.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.2. Aerosol Effect On Ice Clouds\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRadiative effects of aerosols on ice clouds are represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "23.3. RFaci From Sulfate Only\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRadiative forcing from aerosol cloud interactions from sulfate aerosol only?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "23.4. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "24. Radiative Forcings --&gt; Aerosols --&gt; Dust\nDust forcing\n24.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "24.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "25. Radiative Forcings --&gt; Aerosols --&gt; Tropospheric Volcanic\nTropospheric volcanic forcing\n25.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.2. Historical Explosive Volcanic Aerosol Implementation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in historical simulations", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.3. Future Explosive Volcanic Aerosol Implementation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in future simulations", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.4. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "26. Radiative Forcings --&gt; Aerosols --&gt; Stratospheric Volcanic\nStratospheric volcanic forcing\n26.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "26.2. Historical Explosive Volcanic Aerosol Implementation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in historical simulations", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "26.3. Future Explosive Volcanic Aerosol Implementation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow explosive volcanic aerosol is implemented in future simulations", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Type A\" \n# \"Type B\" \n# \"Type C\" \n# \"Type D\" \n# \"Type E\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "26.4. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "27. Radiative Forcings --&gt; Aerosols --&gt; Sea Salt\nSea salt forcing\n27.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "27.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "28. Radiative Forcings --&gt; Other --&gt; Land Use\nLand use forcing\n28.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"M\" \n# \"Y\" \n# \"E\" \n# \"ES\" \n# \"C\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "28.2. Crop Change Only\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nLand use change represented via crop change only?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "28.3. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "29. Radiative Forcings --&gt; Other --&gt; Solar\nSolar forcing\n29.1. Provision\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nHow solar forcing is provided", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"N/A\" \n# \"irradiance\" \n# \"proton\" \n# \"electron\" \n# \"cosmic ray\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "29.2. Additional Information\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAdditional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
maibkey/udacity
泰坦尼克号生存率的影响因素/.ipynb_checkpoints/taitannikehao-checkpoint.ipynb
mit
[ "关于泰坦尼克号生存率的数据分析\n首先通过观察数据,可以了解到每位旅客的详细数据:\n\nSurvived:是否存活(0代表否,1代表是)\nPclass:舱位(一等舱,二等舱,三等舱)\nName:船上乘客的名字\nSex:船上乘客的性别\nAge:船上乘客的年龄(可能存在 NaN)\nSibSp:乘客在船上的兄弟姐妹和配偶的数量\nParch:乘客在船上的父母以及小孩的数量\nTicket:乘客船票的编号\nFare:乘客为船票支付的费用\nCabin:乘客所在船舱的编号(可能存在 NaN)\nEmbarked:乘客上船的港口(C 代表从 Cherbourg 登船,Q 代表从 Queenstown 登船,S 代表从 Southampton 登船)\n\n通过对原始数据的初步观察可以发现存活率和社会等级,性别,年龄,在船上的兄弟姐妹和配偶数量,在船上的父母以及小孩的数量有着某种联系。因此根据初步推测可以提出以下几个问题并进行分析:\n - 乘客的存活率和其社会等级是否有关系?是否社会等级越高存活率就越高?\n - 乘客的存活率和其性别,年龄又有什么关系?\n - 乘客的存活率和其在船上的兄弟姐妹和配偶数量,父母以及小孩的数量又有什么联系?", "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pylab as pl\n%matplotlib inline\nfilename = './titanic-data.csv'\ntitanic_df = pd.read_csv(filename)\ntitanic_df.describe()\n", "首先,我们观察一下几个比较重要的数值,初步得出一些结论,比如只有‘Age’这一列存在缺失值,整体的存活率只有0.383838。所以首先应该对年龄的缺失值进行填充。", "\ntitanic_df = titanic_df.fillna(method='pad')#用前一个数值填充\n\ntitanic_df.describe()", "可以看出年龄这一列数据的总数正常了,为891,接下来可以进一步分析生存率了。", "sort_pclass = titanic_df.groupby('Pclass').count()['PassengerId']\nprint sort_pclass\ntitanic_df.groupby('Pclass')['PassengerId'].count().plot(kind = 'pie',autopct = '%.0f%%')\nplt.title('Pclass VS Count')\nplt.show()\n\nPclass_survived = titanic_df.groupby('Pclass').mean()['Survived']\nprint Pclass_survived.plot.bar()", "根据以上不同舱位人数所占比例和关于生存率的直方图可以看出头等舱的生存率最高,经济舱的生存率最低。虽然头等舱的人数占总人数的比例很少,生存率却极高,三等舱的人数超过一半,而生存率确只有20%,间接的说明了一个现实问题:社会地位越高生存机率越高。或者说头等舱的安全措施很高", "sort_sex = titanic_df.groupby('Sex').count()['PassengerId']\nprint sort_sex\nSex_survived = titanic_df.groupby('Sex').mean()['Survived']\nprint Sex_survived\nprint Sex_survived.plot.bar()", "我们可以清晰的看到虽然船上的男性人数显著多于女性人数,但是女性的存活率高达74%,而男性的存活率只有19%。这说明在逃生的时候会有男性保护女性的情况。一般是女先男后。", "titanic_df['Age_bins'] = pd.cut(titanic_df['Age'],range(0,80,10))\nAge_survived = titanic_df.groupby('Age_bins').mean()['Survived']\nSort_survived = titanic_df.groupby('Age_bins').count()['Survived']\nprint Age_survived\nprint Sort_survived \n\n\nAge_survived.plot(kind='bar', stacked=True)", "可见0~10岁的儿童成活率是最高的,也说明了在家长陪同下的婴幼儿受到了很好的保护,超过60岁的老年人成活率非常低,由此我们可以推测老年人可能会因为年迈行动不便而导致在灾难中无法及时脱身。在10~60各个年龄阶段的生存率几本相等。", "sort_SibSp = titanic_df.groupby('SibSp').count()['PassengerId']\nprint sort_SibSp\ntitanic_df.groupby('SibSp')['PassengerId'].count().plot(kind = 'pie',autopct = '%.0f%%')\nplt.title('SibSp VS Count')\nplt.show()\n\nSibSp_survived = titanic_df.groupby('SibSp').mean()['Survived']\nprint SibSp_survived\nSibSp_survived.plot.bar()\n\n\nsort_Parch = titanic_df.groupby('Parch').count()['PassengerId']\nprint sort_Parch\ntitanic_df.groupby('Parch')['PassengerId'].count().plot(kind = 'pie',autopct = '%.0f%%')\nplt.title('Parch VS Count')\nplt.show()\n\n\nParch_survived = titanic_df.groupby('Parch').mean()['Survived']\nprint Parch_survived\nParch_survived.plot.bar()", "通过乘客在船上的家庭成员数量与生存率的折线图可以推测:家庭成员数在1~3之间的时候生存率是比较高的,在船上没有家庭成员的相对有1~3个的生存率就比较低了,当家庭成员的数量超过4的时候就更低了,甚至到6~8个的生存率直接为零.\n\n### 综合以上分析可以得出结论\n根据对数据的一些分析得到的结果基本和猜测的一致,女性生存率比男性的3倍还要多,对舱位的大体分析可以看出头等舱二等舱的生存率是比较高的,这也客观的反映了当时对富人阶级的优待,可猜测在头等舱和二等舱放的救生艇会更多,关于年龄,只能明确得出0~10岁的生存率最高,老年人最低,符合常理。\n #### 局限性\n - 可能会存在数据的缺失,也就是说这个样本数据不能代表整体数据,例如有的时候会存在一票多用的情况,这个时候计算结果会有偏差。\n - 在分析年龄这一列的时候,使用的是前面的一个数填充缺失值,并不能代表真是数据,因此对年龄和生存率的分析也会存在一些偏差。\n - 在分析单变量与生存率的关系的时候,可能会有其他不确定性因素的影响。\n\n\n参考文献:\n\n\n泰坦尼克号的详细参考文献\n\npandas的可视化函数\nPython 数据可视化入门\n数据缺失值的处理" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mit-crpg/openmc
examples/jupyter/triso.ipynb
mit
[ "Modeling TRISO Particles\nOpenMC includes a few convenience functions for generationing TRISO particle locations and placing them in a lattice. To be clear, this capability is not a stochastic geometry capability like that included in MCNP. It's also important to note that OpenMC does not use delta tracking, which would normally speed up calculations in geometries with tons of surfaces and cells. However, the computational burden can be eased by placing TRISO particles in a lattice.", "%matplotlib inline\nfrom math import pi\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport openmc\nimport openmc.model", "Let's first start by creating materials that will be used in our TRISO particles and the background material.", "fuel = openmc.Material(name='Fuel')\nfuel.set_density('g/cm3', 10.5)\nfuel.add_nuclide('U235', 4.6716e-02)\nfuel.add_nuclide('U238', 2.8697e-01)\nfuel.add_nuclide('O16', 5.0000e-01)\nfuel.add_element('C', 1.6667e-01)\n\nbuff = openmc.Material(name='Buffer')\nbuff.set_density('g/cm3', 1.0)\nbuff.add_element('C', 1.0)\nbuff.add_s_alpha_beta('c_Graphite')\n\nPyC1 = openmc.Material(name='PyC1')\nPyC1.set_density('g/cm3', 1.9)\nPyC1.add_element('C', 1.0)\nPyC1.add_s_alpha_beta('c_Graphite')\n\nPyC2 = openmc.Material(name='PyC2')\nPyC2.set_density('g/cm3', 1.87)\nPyC2.add_element('C', 1.0)\nPyC2.add_s_alpha_beta('c_Graphite')\n\nSiC = openmc.Material(name='SiC')\nSiC.set_density('g/cm3', 3.2)\nSiC.add_element('C', 0.5)\nSiC.add_element('Si', 0.5)\n\ngraphite = openmc.Material()\ngraphite.set_density('g/cm3', 1.1995)\ngraphite.add_element('C', 1.0)\ngraphite.add_s_alpha_beta('c_Graphite')", "To actually create individual TRISO particles, we first need to create a universe that will be used within each particle. The reason we use the same universe for each TRISO particle is to reduce the total number of cells/surfaces needed which can substantially improve performance over using unique cells/surfaces in each.", "# Create TRISO universe\nspheres = [openmc.Sphere(r=1e-4*r)\n for r in [215., 315., 350., 385.]]\ncells = [openmc.Cell(fill=fuel, region=-spheres[0]),\n openmc.Cell(fill=buff, region=+spheres[0] & -spheres[1]),\n openmc.Cell(fill=PyC1, region=+spheres[1] & -spheres[2]),\n openmc.Cell(fill=SiC, region=+spheres[2] & -spheres[3]),\n openmc.Cell(fill=PyC2, region=+spheres[3])]\ntriso_univ = openmc.Universe(cells=cells)", "Next, we need a region to pack the TRISO particles in. We will use a 1 cm x 1 cm x 1 cm box centered at the origin.", "min_x = openmc.XPlane(x0=-0.5, boundary_type='reflective')\nmax_x = openmc.XPlane(x0=0.5, boundary_type='reflective')\nmin_y = openmc.YPlane(y0=-0.5, boundary_type='reflective')\nmax_y = openmc.YPlane(y0=0.5, boundary_type='reflective')\nmin_z = openmc.ZPlane(z0=-0.5, boundary_type='reflective')\nmax_z = openmc.ZPlane(z0=0.5, boundary_type='reflective')\nregion = +min_x & -max_x & +min_y & -max_y & +min_z & -max_z", "Now we need to randomly select locations for the TRISO particles. In this example, we will select locations at random within the box with a packing fraction of 30%. Note that pack_spheres can handle up to the theoretical maximum of 60% (it will just be slow).", "outer_radius = 425.*1e-4\ncenters = openmc.model.pack_spheres(radius=outer_radius, region=region, pf=0.3)", "Now that we have the locations of the TRISO particles determined and a universe that can be used for each particle, we can create the TRISO particles.", "trisos = [openmc.model.TRISO(outer_radius, triso_univ, center) for center in centers]", "Each TRISO object actually is a Cell, in fact; we can look at the properties of the TRISO just as we would a cell:", "print(trisos[0])", "Let's confirm that all our TRISO particles are within the box.", "centers = np.vstack([triso.center for triso in trisos])\nprint(centers.min(axis=0))\nprint(centers.max(axis=0))", "We can also look at what the actual packing fraction turned out to be:", "len(trisos)*4/3*pi*outer_radius**3", "Now that we have our TRISO particles created, we need to place them in a lattice to provide optimal tracking performance in OpenMC. We can use the box we created above to place the lattice in. Actually creating a lattice containing TRISO particles can be done with the model.create_triso_lattice() function. This function requires that we give it a list of TRISO particles, the lower-left coordinates of the lattice, the pitch of each lattice cell, the overall shape of the lattice (number of cells in each direction), and a background material.", "box = openmc.Cell(region=region)\nlower_left, upper_right = box.region.bounding_box\nshape = (3, 3, 3)\npitch = (upper_right - lower_left)/shape\nlattice = openmc.model.create_triso_lattice(\n trisos, lower_left, pitch, shape, graphite)", "Now we can set the fill of our box cell to be the lattice:", "box.fill = lattice", "Finally, let's take a look at our geometry by putting the box in a universe and plotting it. We're going to use the Fortran-side plotter since it's much faster.", "universe = openmc.Universe(cells=[box])\n\ngeometry = openmc.Geometry(universe)\ngeometry.export_to_xml()\n\nmaterials = list(geometry.get_all_materials().values())\nopenmc.Materials(materials).export_to_xml()\n\nsettings = openmc.Settings()\nsettings.run_mode = 'plot'\nsettings.export_to_xml()\n\nplot = openmc.Plot.from_geometry(geometry)\nplot.to_ipython_image()", "If we plot the universe by material rather than by cell, we can see that the entire background is just graphite.", "plot.color_by = 'material'\nplot.colors = {graphite: 'gray'}\nplot.to_ipython_image()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
therealAJ/python-sandbox
data-science/learning/ud1/DataScience/NaiveBayes.ipynb
gpl-3.0
[ "Naive Bayes (the easy way)\nWe'll cheat by using sklearn.naive_bayes to train a spam classifier! Most of the code is just loading our training data into a pandas DataFrame that we can play with:", "import os\nimport io\nimport numpy\nfrom pandas import DataFrame\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\n\ndef readFiles(path):\n for root, dirnames, filenames in os.walk(path):\n for filename in filenames:\n path = os.path.join(root, filename)\n\n inBody = False\n lines = []\n f = io.open(path, 'r', encoding='latin1')\n for line in f:\n if inBody:\n lines.append(line)\n elif line == '\\n':\n inBody = True\n f.close()\n message = '\\n'.join(lines)\n yield path, message\n\n\ndef dataFrameFromDirectory(path, classification):\n rows = []\n index = []\n for filename, message in readFiles(path):\n rows.append({'message': message, 'class': classification})\n index.append(filename)\n\n return DataFrame(rows, index=index)\n\ndata = DataFrame({'message': [], 'class': []})\n\ndata = data.append(dataFrameFromDirectory('e:/sundog-consult/Udemy/DataScience/emails/spam', 'spam'))\ndata = data.append(dataFrameFromDirectory('e:/sundog-consult/Udemy/DataScience/emails/ham', 'ham'))\n", "Let's have a look at that DataFrame:", "data.head()", "Now we will use a CountVectorizer to split up each message into its list of words, and throw that into a MultinomialNB classifier. Call fit() and we've got a trained spam filter ready to go! It's just that easy.", "vectorizer = CountVectorizer()\ncounts = vectorizer.fit_transform(data['message'].values)\n\nclassifier = MultinomialNB()\ntargets = data['class'].values\nclassifier.fit(counts, targets)", "Let's try it out:", "examples = ['Free Viagra now!!!', \"Hi Bob, how about a game of golf tomorrow?\"]\nexample_counts = vectorizer.transform(examples)\npredictions = classifier.predict(example_counts)\npredictions", "Activity\nOur data set is small, so our spam classifier isn't actually very good. Try running some different test emails through it and see if you get the results you expect.\nIf you really want to challenge yourself, try applying train/test to this spam classifier - see how well it can predict some subset of the ham and spam emails." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
4dsolutions/Python5
Clock Arithmetic.ipynb
mit
[ "Oregon Curriculum Network <br />\nDiscovering Math with Python\nChapter 4: CLOCK ARITHMETIC\nCalling modulo arithmetic \"clock arithmetic\" is a way of calling attention to the finite cyclic nature of always taking remainders, factoring away the \"modulus\". What does that mean?\nClock Arithmetic is important to Number Theory, and Number Theory is important to crytography.\nCyptography, in the not so distant past, would not concern an average civilian, whereas today the HTTPS protocol in every mainstream web browser (Mozilla, Chrome, IE, Safari, Opera...) is capable of implementing what we called TLS, or Transport Layer Security.\nThe web browser client, and server, will handshake, meaning agree on what cryptographic strategy to use: first a public one, to exchange a symmetric key, and then a symmetric one, most likely AES at the time of this writing.\nAES is the Advanced Encryption Standard, the winner of a global contest as adjudged by NIST. AES uses \"clock arithemetic\" internally in order to apply several layers of \"mixing it up\" by invertible methods. Enciphering is reverisble (the whole point) but only if the key is handy.\nIn Python, one of our primitive operators is %, for \"modulo\", which relates to the built-in function divmod(&nbsp;). Lets see how % and divmod do their jobs:", "(5 + 5) % 10 # no remainder\n\ndivmod(10, 5) # 5 goes into 10 twice, no remainder\n\n19 % 10 # divide by 10, give the remainder", "Try doing a number of these yourself. Guess the answer before hitting the Shift-Enter key.\nYou may find your first years of formal schooling did not emphasize cyclic arithmetic, except in the two most common forms, of telling time and scheduling appointments. We understand both the 24 hour day, and the 365 day year, comprise a modulus such that we're always talking about our degree of offset into each one. Adding hours or days, per a timedelta object, keeps us within the scope of some calendar.\nThe unit circle of 360 degrees, or 2 pi radians, is also treated in a clock-like fashion. Going around thousands of degrees from some present position, never takes us beyond 360. We're confined to a finite domain. Cyclic phenomena more generally are accommodated by our clock arithmetic, of complex numbers, of e to imaginary powers.\nThere's a kind of \"closure\" in this picture, which might be resonating with you by now, as one of those properties of a group. Will we find groups in this Chapter? You bet. Groups also enjoy the symmetry of a circle in that each member is paired with another \"180 degrees opposite\" although sometimes the inverse might be the same as the self.\nAgain, we call this \"clock arithmetic\" because even if we say \"20 hours from now\", or \"30 hours ago\" we'll still be somewhere on the circle marked out into 12 intervals, each representing one hour. The yearly calendar is likewise a kind of modulus. No matter how many days we add, we're still somewhere between 0 and 365 days into some year.\nWe also call it \"modulo arithmetic\" meaning the computations are always vis-a-vis some fixed modulus. This word \"modulus\" is our inspiration for the M-numbers coded below.\nA Class for M-numbers\nLets build a class, the instances of which will multiply and add per some fixed modulus, meaning we're always factoring out the modulus and keeping the remainder.", "class M: # for \"modulo\"\n \n modulus = 10 # class level\n \n def __init__(self, val, modulus=None):\n if modulus:\n self.modulus = M.modulus = modulus # resettable\n else:\n self.modulus = M.modulus\n self.val = val % M.modulus\n \n def __add__(self, other):\n if self.modulus != other.modulus:\n raise ValueError\n return M((self.val + other.val) % self.modulus)\n \n def __mul__(self, other):\n if self.modulus != other.modulus:\n raise ValueError\n return M((self.val * other.val) % self.modulus)\n \n def __pow__(self, exp):\n raise NotImplemented\n \n def __repr__(self):\n return \"(\" + str(self.val) + \" mod \" + str(self.modulus)+ \")\"\n \na = M(8)\nb = M(7)\nprint(a, b)\nprint(\"(8 mod 10) * (7 mod 10) = \", a * b)\nprint(\"(8 mod 10) + (7 mod 10) = \", a + b)", "OK, everything seems to be working, even though we haven't implemented powering yet. Eventually we'd like to go pow(M(3), -1) to get the inverse of M(3), such that M(3) times its inverse equals the multiplicative identity M(1).\nLooking for Groups\nBut wait, does every M-number, with modulus set to 10, have an inverse? We can check that easily. First, lets make a list of all 10 M-numbers, (0) through (9):", "elems = [M(n) for n in range(10)]\nelems", "Now we can do like a \"times table\" wherein we pick a single M-number, say M(5), and multiply it by every number in elems...", "[M(5) * x for x in elems] ", "Interesting. In ordinary arithematic, the times table for 5 goes 0, 5, 10, 15, 20, 25, 30... and so on. Factoring out the 10s, leaving only remainders, we get M(0) or M(5) as our two M-numbers. We have no way to reach M(1) and so M(5) has no inverse. We don't have a group yet.\nLets try M(2):", "[M(2) * x for x in elems]", "Same thing. We cycle around and around, always stopping at the same stations, like a model train going in a circle. We never stop at station M(1). So M(2) has no inverse either. What about M(3)?", "[M(3) * x for x in elems]", "Aha! Now we're getting somewhere. M(3) * M(7) returns M(1), so these two are inverses of one another. One fact to notice immediately is neither has any factors in common with 10. In fact, both are prime numbers in the ordinary integer sense. M(9) is not prime, but again, 9 has no factors in common with 10. So does M(9) have an inverse? Lets check:", "[M(9) * x for x in elems]", "Indeed it does. M(9) * M(9) = M(1), meaning M(9) is its own inverse.\nWhen positive integers have no factors in common, aside from 1, we say they're \"relatively prime\" or that they're \"strangers\" to one another. 3 and 10 are strangers, as are 9 and 10. Sometimes we write 7 | 10 = 1, meaning their greatest factor in common is 1. On the other hand, 6 | 10 = 2, as 2 divides into both without remainder.\nWhat we're about to discover is the strangers to a modulus comprise a group, i.e. M(n) where n | N = 1 and M.modulus = N. A Cartesian product of such \"minions\" would demonstrate closure, inverse for everyone, naturally a neutral element, and associativity i.e. makes no difference if you go M(a) * M(b) * M(c) by starting with either M(a) * M(b) or M(b) * M(c). Commutativity is not a requirement for grouphood.\nFinding Totatives\nIt'd sure be handy at this point, to have a function, gcd, that returns the greatest common divisor of two numbers. Then we could find all the strangers to 10, that are positive integers less than 10, pretty easily. They would have no common divisor with 10. \nWe call these strangers the \"totatives\" of 10, and the number of totatives, is called the \"totient\" of 10.\nNote that we're using list comprehension syntax in this example. \nThe for loop inside the square brackets makes x be every integer from 0 to modulus - 1, but then most of these get filtered out, because of factors in common with the modulus.", "import math\nmodulus = 10\n[x for x in range(modulus) if math.gcd(x, modulus) == 1]\n\nmodulus = 12\n[x for x in range(modulus) if math.gcd(x, modulus) == 1]", "That was pretty easy. Right away we're getting totatives. We could be using set comprehension syntax instead.\nUsing Python's \"list comprehension\" syntax, which allows an if clause for filtering, we were able to get the totatives of 10 and 12 respectively. Both 10 and 12 have a totient of four, meaning each has four totatives.\nWhat's true is that the M-numbers that are totatives of some modulus (say 10), collectively form a group under multiplication. \nIf we confine ourselves to strangers to the modulus, we'll have an inverse for every element, closure, associativity, and M(1) will be included. That's a quick way to get a group.\nWe don't have closure for addition though. If we want to include addition, along with its neutral element zero, then we'll need to make our modulus a prime number. Then we will be guaranteed a field, a Galois Field to be more precise, though in German they say it differently.\nBefore we jump to fields though, lets build a function for computing totatives and use that to build some groups. \nA \"set comprehension\" is just like a list comprehension in terms of syntax, but the curly braces make it for a set, instead of a list. \nSets are unordered and their elements are always unique (no duplicates). Since we know totatives are unique, we might as well practice using a set object.", "def totatives(n):\n return {x for x in range(n) if math.gcd(x, n) == 1}\n\nelems = {M(x, 12) for x in totatives(12)}\nelems", "What we just did there was compose a group of M-number objects, of twelves four strangers. If you're using this Notebook live, here might be a good time to insert some code cells of your own, testing out whether the Cayley Table, i.e. the everything by everything multiplication table, really shows closure, and 1 in every row (proof of inverse element).", "M.modulus = 100\nelems = {M(x, 100) for x in totatives(100)} # set comprehension\nelems # print M-number totatives in no special order -- it's a set", "Note that set objects cannot be ordered even if we might like them to be as by definition they're not sequences. Does Python have an OrderedSet corresponding to an OrderedDict?\nFinding Inverses\nIf we confine ourselves to a set of strangers, we're safe in assuming there's always an inverse for any given element. That suggests a \"brute force\" way of finding any element's inverse: just multiply it by every element in the same set of strangers, until their product is 1 (the identity element). The function below accomplishes this:", "def inverse(n : M, elems : set) -> M:\n for m in elems:\n if (n * m).val == 1:\n return m\n\ninverse(M(79), elems)\n\nM(19) * M(79)", "Finding Totients\nWe'll need this concept in the next chapter on Public Key Cryptography. The totient of N is simply the number of totatives it has. We can use the len(&nbsp;) to simply count them. That won't always be practical, when N gets huge, but for now this approach makes the concept easy to grasp:", "def totient(n):\n \"\"\"how many totatives have we?\"\"\"\n return len(totatives(n))\n\ntotient(1_000_000) # using _ helps us parse the number, one million", "The number one million has a totient of 400,000, meaning that many numbers from one to one million are co-prime to it.\nAdding a Power Method\nWe now have the technology (tool set) we need to add the __pow__ method to our M class. A negative power triggers finding an inverse of a, the -1 part, then we switch back to positively powering. We're saying $M(n)^{-e}$ equals $(M(n)^{-1})^{e}$.\nThis is how we treat exponents normally, e.g. 2 ** -2 equals (2 ** -1)**2 or 1/4.", "2 ** -2 == (2 ** -1) ** 2", "For example, pow(M(79), -2) means find the inverse of M(79) e.g. M(79)**-1, and then raise the result to the 2nd power.", "class M: # for \"modulo\"\n \n \"\"\"\n Version 2, with inverting and powering added\n Version 3, not listed, is in groups.py and uses xgcd (next Chapter)\n \"\"\"\n \n modulus = 10 # class level\n \n def __init__(self, val, modulus=None):\n if modulus:\n self.modulus = M.modulus = modulus # resettable\n else:\n self.modulus = M.modulus\n self.val = val % M.modulus\n \n def __add__(self, other):\n if self.modulus != other.modulus:\n raise ValueError\n return M(self.val + other.val, self.modulus)\n \n def __mul__(self, other):\n if self.modulus != other.modulus:\n raise ValueError\n return M(self.val * other.val, self.modulus)\n\n def __invert__(self):\n elems = {M(x) for x in totatives(self.modulus)}\n for m in elems:\n if (self * m).val == 1:\n return m\n \n def __pow__(self, exp): # pow() and ** both trigger this method\n output = self\n if exp < 0:\n exp = abs(exp) # make exp positive now\n output = ~self # -1 means take the inverse\n elif exp == 0:\n output = M(1) # return identity if exp is 0\n elif exp == 1:\n output = self # return M if exp is 1\n if exp > 1:\n for _ in range(1, exp): # use __mul__ (already defined)\n output *= self\n \n return output\n \n def __repr__(self):\n return \"(\" + str(self.val) + \" mod \" + str(M.modulus)+ \")\"\n\nM.modulus = 100\na = M(79)\na_inv = pow(a, -1)\na_inv\n\nM(13) * M(13) * M(13)\n\nM(13)**3 # confirm this is another way of saying it (we're testing)\n\nM(13)**-1 # give me the inverse of M(13) please\n\nM(13) * M(77) # these must be inverses then", "Now that we have our inverse function, we can ask questions such as:\n\nWhat is the inverse of M(5, 7)?\nWhat is the inverse of M(7, 5)?\nWhat is the inverse of M(97, 100)?", "print(\"What is the inverse of M(5, 7)?\", ~M(5,7))\nprint(\"What is the inverse of M(7, 5)?\", ~M(7,5)) # M(7,5) is same as M(2,5)\nprint(\"What is the inverse of M(97, 100)?\", ~M(97, 100))", "Primality Tests\nNow that we have a powering method in place, lets take a look a Fermat's Little Theorem. The logic here is useful, in the sense that we sometimes get tripped up by IF / THEN statements.\nIF a number p is prime, THEN M(A, p) ** p == A, where 0 < A < p. If some number n passes this test for a given A, where gcd(A, n) == 1, then that's evidence n might be prime, but not proof. If n in fact turns out to be composite, then a is called a Fermat Liar or Fermat Fooler.\nLets take a look. Is 511 prime? Lets find an A with no factors in common with 511.", "from math import gcd\ngcd(511, 25)\n\nn = M(25, 511) # n will power modulo 511\nn ** 511", "Here we say 25 is a \"witness\" to the fact that 511 is not prime. If it were, it would pass this Fermat test. How about 513?", "n = M(25, 513)\nn ** 513", "Nope, that's not passing either. Remember we expect A back out again, of 25 in this case. 25 is a witness that 513 is not prime. How about 523 then?", "n = M(19, 523)\nn ** 523\n\nn = M(25, 523)\nn ** 523\n\nn = M(31, 523)\nn ** 523", "OK, at last it appears we've struck gold. All of our As are testifying that 523 is a prime. Could all of these be liars?\nLets introduce the set of Carmichael Numbers, precisely those composites which sneak through the Fermat Test for any suitable base 1 < A < n.\n561 is the lowest Carmichael number.", "liars = [M(n, 561) for n in (19, 25, 31) if gcd(n, 561) == 1]\nliars\n\n[n**561 for n in liars] # all pass the Fermat primality test", "And yet 561 is a composite number, with prime factors (3, 11, 17).\nAnother test for primality, called the AKS Test, is more fool-proof. We'll need the coefficients of the polynomial (x - 1) ** p, where p is our candidate prime. Those coefficients may be read of Pascal's Triangle. If all coefficients but the first and last (which are 1) are divisible by p, then p is prime.\nLets try that out, first by writing a Python generator for successive rows of Pascal's Triangle:", "def pascal():\n row = [1]\n while True:\n yield row\n row = [(i+j) for i,j in zip(row + [0], [0] + row)]\n\ngen = pascal()\nfor i in range(10):\n print(next(gen))", "That seems to be working. So Pascal's Triangle could be at the heart of a new iterator for producing successive prime numbers.", "def primes():\n yield 2\n p = 1\n gen = pascal()\n next(gen) # [1]\n next(gen) # [1, 1]\n while True:\n p += 2 # 3, 5... check odd rows only, once 2 yielded\n next(gen) # skip even row\n r = set(next(gen)[1:-1]) # drop 1s on both ends and dedupe\n if sum([coeff%p for coeff in r]) == 0:\n yield p\n\nps = primes()\nprint(str([next(ps) for _ in range(100)]))", "Homework:\nWatch this Youtube on the Chinese Remainder Theorem (CRT). Be prepared to explain its utility. What Youtube do you find most elucidating, regarding the CRT?\nSo now it looks like we have another fun tool for exploring Group Theory, while learning Python at the same time!\nNow lets take what we've learned in this chapter and apply it to an important topic: Public Key Cryptography.\nBack to Chapter 3: A First Class <br />\nChapter 5: Public Key Cryptography <br />\nIntroduction" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
gsorianob/fiuba-python
.ipynb_checkpoints/Clase 04 - Excepciones, funciones lambda, búsquedas y ordenamientos-checkpoint.ipynb
apache-2.0
[ "27/10\nOrdenamientos y búsquedas\nFunciones anónimas.\nExcepciones. \nOrdenamiento de listas\nLas listas se pueden ordenar fácilmente usando la función sorted:", "lista_de_numeros = [1, 6, 3, 9, 5, 2]\nlista_ordenada = sorted(lista_de_numeros)\nprint lista_ordenada", "Pero, ¿y cómo hacemos para ordenarla de mayor a menor?. <br>\nSimple, interrogamos un poco a la función:\n```Python\n\n\n\nprint sorted.doc\nsorted(iterable, cmp=None, key=None, reverse=False) --> new sorted list\n``\nEntonces, con sólo pasarle el parámetro de *reverse* enTrue` debería alcanzar:", "lista_de_numeros = [1, 6, 3, 9, 5, 2]\nprint sorted(lista_de_numeros, reverse=True)", "¿Y si lo que quiero ordenar es una lista de registros?. <br>\nPodemos pasarle una función que sepa cómo comparar esos registros o una que sepa devolver la información que necesita comparar.", "import random\n\ndef crear_alumnos(cantidad_de_alumnos=5):\n nombres = ['Javier', 'Pablo', 'Ramiro', 'Lucas', 'Carlos']\n apellidos = ['Saviola', 'Aimar', 'Funes Mori', 'Alario', 'Sanchez']\n\n alumnos = []\n for i in range(cantidad_de_alumnos):\n a = {\n 'nombre': '{}, {}'.format(random.choice(apellidos), random.choice(nombres)),\n 'padron': random.randint(90000, 100000),\n 'nota': random.randint(4, 10)\n }\n alumnos.append(a)\n \n return alumnos\n\n\ndef imprimir_curso(lista):\n for idx, x in enumerate(lista, 1):\n print ' {pos:2}. {padron} - {nombre}: {nota}'.format(pos=idx, **x)\n\n\ndef obtener_padron(alumno):\n return alumno['padron']\n\n\ndef ordenar_por_padron(alumno1, alumno2):\n if alumno1['padron'] < alumno2['padron']:\n return -1\n elif alumno2['padron'] < alumno1['padron']:\n return 1\n else:\n return 0\n\ncurso = crear_alumnos()\nprint 'La lista tiene los alumnos:'\nimprimir_curso(curso)\n\nlista_ordenada = sorted(curso, key=obtener_padron)\nprint 'Y la lista ordenada por padrón:'\nimprimir_curso(lista_ordenada)\n\notra_lista_ordenada = sorted(curso, cmp=ordenar_por_padron)\nprint 'Y la lista ordenada por padrón:'\nimprimir_curso(otra_lista_ordenada)", "Búsquedas en listas\nPara saber si un elemento se encuentra en una lista, alcanza con usar el operador in:", "lista = [11, 4, 6, 1, 3, 5, 7]\n\nif 3 in lista:\n print '3 esta en la lista'\nelse:\n print '3 no esta en la lista'\n\nif 15 in lista:\n print '15 esta en la lista'\nelse:\n print '15 no esta en la lista'", "También es muy fácil saber si un elemento no esta en la lista:", "lista = [11, 4, 6, 1, 3, 5, 7]\n\nif 3 not in lista:\n print '3 NO esta en la lista'\nelse:\n print '3 SI esta en la lista'", "En cambio, si lo que queremos es saber es dónde se encuentra el número 3 en la lista es:", "lista = [11, 4, 6, 1, 3, 5, 7]\n\npos = lista.index(3)\nprint 'El 3 se encuentra en la posición', pos\n\npos = lista.index(15)\nprint 'El 15 se encuentra en la posición', pos", "Funciones anónimas\nHasta ahora, a todas las funciones que creamos les poníamos un nombre al momento de crearlas, pero cuando tenemos que crear funciones que sólo tienen una línea y no se usen en una gran cantidad de lugares se pueden usar las funciones lambda:", "help(\"lambda\")\n\nmi_funcion = lambda x, y: x+y\n\nresultado = mi_funcion(1,2)\nprint resultado", "Si bien no son funciones que se usen todos los días, se suelen usar cuando una función recibe otra función como parámetro (las funciones son un tipo de dato, por lo que se las pueden asignar a variables, y por lo tanto, también pueden ser parámetros).\nPor ejemplo, para ordenar los alumnos por padrón podríamos usar:\nPython\nsorted(curso, key=lambda x: x['padron'])\nAhora, si quiero ordenar la lista anterior por nota decreciente y, en caso de igualdad, por padrón podríamos usar:", "curso = crear_alumnos(15)\nprint 'Curso original'\nimprimir_curso(curso)\n\nlista_ordenada = sorted(curso, key=lambda x: (-x['nota'], x['padron']))\nprint 'Curso ordenado'\nimprimir_curso(lista_ordenada)", "Otro ejemplo podría ser implementar una búsqueda binaria que permita buscar tanto en listas crecientes como decrecientes:", "es_mayor = lambda n1, n2: n1 > n2\nes_menor = lambda n1, n2: n1 < n2\n\n\ndef binaria(cmp, lista, clave):\n \"\"\"Binaria es una función que busca en una lista la clave pasada. Es un requisito\n de la búsqueda binaria que la lista se encuentre ordenada, pero no si el orden\n es ascendente o descendente. Por este motivo es que también recibe una función\n que le indique en que sentido ir.\n Si la lista está ordenada en forma ascendente la función que se le pasa tiene\n que ser verdadera cuando el primer valor es mayor que la segundo; y falso en\n caso contrario.\n Si la lista está ordenada en forma descendente la función que se le pasa tiene\n que ser verdadera cuando el primer valor es menor que la segundo; y falso en\n caso contrario.\n \"\"\"\n min = 0\n max = len(lista) - 1\n centro = (min + max) / 2\n while (lista[centro] != clave) and (min < max):\n if cmp(lista[centro], clave):\n max = centro - 1\n else:\n min = centro + 1\n centro = (min + max) / 2\n if lista[centro] == clave:\n return centro\n else:\n return -1\n\nprint binaria(es_mayor, [1, 2, 3, 4, 5, 6, 7, 8, 9], 8)\nprint binaria(es_menor, [1, 2, 3, 4, 5, 6, 7, 8, 9], 8)\nprint binaria(es_mayor, [1, 2, 3, 4, 5, 6, 7, 8, 9], 123)\n\nprint binaria(es_menor, [9, 8, 7, 6, 5, 4, 3, 2, 1], 6)\n", "Excepciones\nUna excepción es la forma que tiene el intérprete de que indicarle al programador y/o usuario que ha ocurrido un error. Si la excepción no es controlada por el desarrollador ésta llega hasta el usuario y termina abruptamente la ejecución del sistema. <br>\nPor ejemplo:", "print 1/0", "Pero no hay que tenerle miedo a las excepciones, sólo hay que tenerlas en cuenta y controlarlas en el caso de que ocurran:", "dividendo = 1\ndivisor = 0\nprint 'Intentare hacer la división de %d/%d' % (dividendo, divisor)\ntry:\n resultado = dividendo / divisor\n print resultado\nexcept ZeroDivisionError:\n print 'No se puede hacer la división ya que el divisor es 0.'", "Pero supongamos que implementamos la regla de tres de la siguiente forma:", "def dividir(x, y):\n return x/y\n\ndef regla_de_tres(x, y, z):\n return dividir(z*y, x)\n\n\n# Si de 28 alumnos, aprobaron 15, el porcentaje de aprobados es de...\nporcentaje_de_aprobados = regla_de_tres(28, 15, 100)\nprint 'Porcentaje de aprobados: %0.2f %%' % porcentaje_de_aprobados", "En cambio, si le pasamos 0 en el lugar de x:", "resultado = regla_de_tres(0, 13, 100)\nprint 'Porcentaje de aprobados: %0.2f %%' % resultado", "Acá podemos ver todo el traceback o stacktrace, que son el cómo se fueron llamando las distintas funciones entre sí hasta que llegamos al error. <br>\nPero no es bueno que este tipo de excepciones las vea directamente el usuario, por lo que podemos controlarlas en distintos momentos. Se pueden controlar inmediatamente donde ocurre el error, como mostramos antes, o en cualquier parte de este stacktrace. <br>\nEn el caso de la regla_de_tres no nos conviene poner el try/except encerrando la línea x/y, ya que en ese punto no tenemos toda la información que necesitamos para informarle correctamente al usuario, por lo que podemos ponerla en:", "def dividir(x, y):\n return x/y\n\ndef regla_de_tres(x, y, z):\n resultado = 0\n try:\n resultado = dividir(z*y, x)\n except ZeroDivisionError:\n print 'No se puede calcular la regla de tres porque el divisor es 0'\n \n return resultado\n \nprint regla_de_tres(0, 1, 2)", "Pero en este caso igual muestra 0, por lo que si queremos, podemos poner los try/except incluso más arriba en el stacktrace:", "def dividir(x, y):\n return x/y\n\ndef regla_de_tres(x, y, z):\n return dividir(z*y, x)\n \ntry:\n print regla_de_tres(0, 1, 2)\nexcept ZeroDivisionError:\n print 'No se puede calcular la regla de tres porque el divisor es 0'\n", "Todos los casos son distintos y no hay UN lugar ideal dónde capturar la excepción; es cuestión del desarrollador decidir dónde conviene ponerlo para cada problema. <br>\nIncluso, una única línea puede lanzar distintas excepciones, por lo que capturar un tipo de excepción en particular no me asegura que el programa no pueda lanzar un error en esa línea que supuestamente es segura:\nCapturar múltiples excepciones\nEn algunos casos tenemos en cuenta que el código puede lanzar una excepción como la de ZeroDivisionError, pero eso puede no ser suficiente:", "def dividir_numeros(x, y):\n try:\n resultado = x/y\n print 'El resultado es: %s' % resultado\n except ZeroDivisionError:\n print 'ERROR: Ha ocurrido un error por mezclar tipos de datos'\n\ndividir_numeros(1, 0)\ndividir_numeros(10, 2)\ndividir_numeros(\"10\", 2)", "En esos casos podemos capturar más de una excepción de la siguiente forma:", "def dividir_numeros(x, y):\n try:\n resultado = x/y\n print 'El resultado es: %s' % resultado\n except TypeError:\n print 'ERROR: Ha ocurrido un error por mezclar tipos de datos'\n except ZeroDivisionError:\n print 'ERROR: Ha ocurrido un error de división por cero'\n except Exception:\n print 'ERROR: Ha ocurrido un error inesperado'\n\ndividir_numeros(1, 0)\ndividir_numeros(10, 2)\ndividir_numeros(\"10\", 2)", "Incluso, si queremos que los dos errores muestren el mismo mensaje podemos capturar ambas excepciones juntas:", "def dividir_numeros(x, y):\n try:\n resultado = x/y\n print 'El resultado es: %s' % resultado\n except (ZeroDivisionError, TypeError):\n print 'ERROR: No se puede calcular la división'\n\ndividir_numeros(1, 0)\ndividir_numeros(10, 2)\ndividir_numeros(\"10\", 2)", "Jerarquía de excepciones\nExiste una <a href=\"https://docs.python.org/2/library/exceptions.html\">jerarquía de excepciones</a>, de forma que si se sabe que puede venir un tipo de error, pero no se sabe exactamente qué excepción puede ocurrir siempre se puede poner una excepción de mayor jerarquía:\n<img src=\"excepciones.png\"/>\nPor lo que el error de división por cero se puede evitar como:", "try:\n print 1/0\nexcept ZeroDivisionError:\n print 'Ha ocurrido un error de división por cero'", "Y también como:", "try:\n print 1/0\nexcept Exception:\n print 'Ha ocurrido un error inesperado'", "Si bien siempre se puede poner Exception en lugar del tipo de excepción que se espera, no es una buena práctica de programación ya que se pueden esconder errores indeseados. Por ejemplo, un error de sintaxis.\nAdemás, cuando se lanza una excepción en el bloque try, el intérprete comienza a buscar entre todas cláusulas except una que coincida con el error que se produjo, o que sea de mayor jerarquía. Por lo tanto, es recomendable poner siempre las excepciones más específicas al principio y las más generales al final:\nPython\ndef dividir_numeros(x, y):\n try:\n resultado = x/y\n print 'El resultado es: %s' % resultado\n except TypeError:\n print 'ERROR: Ha ocurrido un error por mezclar tipos de datos'\n except ZeroDivisionError:\n print 'ERROR: Ha ocurrido un error de división por cero'\n except Exception:\n print 'ERROR: Ha ocurrido un error inesperado'\nSi el error no es capturado por ninguna clausula se propaga de la misma forma que si no se hubiera puesto nada.\nOtras cláusulas para el manejo de excepciones\nAdemás de las cláusulas try y except existen otras relacionadas con las excepciones que nos permiten manejar de mejor manera el flujo del programa:\n* else: se usa para definir un bloque de código que se ejecutará sólo si no ocurrió ningún error.\n* finally: se usa para definir un bloque de código que se ejecutará siempre, independientemente de si se lanzó una excepción o no.", "def dividir_numeros(x, y):\n try:\n resultado = x/y\n print 'El resultado es {}'.format(resultado)\n except ZeroDivisionError:\n print 'Error: División por cero'\n else:\n print 'Este mensaje se mostrará sólo si no ocurre ningún error'\n finally: \n print 'Este bloque de código se muestra siempre'\n\ndividir_numeros(1, 0)\nprint '-------------'\ndividir_numeros(10, 2)", "Pero entonces, ¿por qué no poner ese código dentro del try-except?. Porque tal vez no queremos capturar con las cláusulas except lo que se ejecute en ese bloque de código:", "def dividir_numeros(x, y):\n try:\n resultado = x/y\n print 'El resultado es {}'.format(resultado)\n except ZeroDivisionError:\n print 'Error: División por cero'\n else:\n print 'Ahora hago que ocurra una excepción'\n print 1/0\n finally: \n print 'Este bloque de código se muestra siempre'\n\ndividir_numeros(1, 0)\nprint '-------------'\ndividir_numeros(10, 2)", "Lanzar excepciones\nHasta ahora vimos cómo capturar un error y trabajar con él sin que el programa termine abruptamente, pero en algunos casos somos nosotros mismos quienes van a querer lanzar una excepción. Y para eso, usaremos la palabra reservada raise:", "def dividir_numeros(x, y):\n if y == 0:\n raise Exception('Error de división por cero')\n \n resultado = x/y\n print 'El resultado es {0}'.format(resultado)\n\ntry:\n dividir_numeros(1, 0)\nexcept ZeroDivisionError as e:\n print 'ERROR: División por cero'\nexcept Exception as e:\n print 'ERROR: ha ocurrido un error del tipo Exception'\n\nprint '----------'\ndividir_numeros(1, 0)\n", "Crear excepciones\nPero así como podemos usar las excepciones estándares, también podemos crear nuestras propias excepciones:\n```Python\nclass MiPropiaExcepcion(Exception):\ndef __str__(self):\n return 'Mensaje del error'\n\n```\nPor ejemplo:", "class ExcepcionDeDivisionPor2(Exception):\n \n def __str__(self):\n return 'ERROR: No se puede dividir por dos'\n \n\ndef dividir_numeros(x, y):\n if y == 2:\n raise ExcepcionDeDivisionPor2()\n \n resultado = x/y\n\ntry:\n dividir_numeros(1, 2)\nexcept ExcepcionDeDivisionPor2:\n print 'No se puede dividir por 2'\n\ndividir_numeros(1, 2)", "Para más información, ingresar a https://docs.python.org/2/tutorial/errors.html\nEjercicios\n\nSe leen dos listas A y B, de N y M elementos respectivamente. Construir un algoritmo que halle las listas unión e intersección de A y B. Previamente habrá que ordenarlos.\nEscribir una función que reciba una lista desordenada y un elemento, que:\nBusque todos los elementos coincidan con el pasado por parámetro y devuelva la cantidad de coincidencias encontradas.\nBusque la primera coincidencia del elemento en la lista y devuelva su posición.\nEscribir una función que reciba una lista de números no ordenada, que:\nDevuelva el valor máximo.\nDevuelva una tupla que incluya el valor máximo y su posición.\n¿Qué sucede si los elementos son cadenas de caracteres? <br>\n Nota: no utilizar lista.sort() ni la función sorted.\nSe cuenta con una lista ordenada de productos, en la que uno consiste en una tupla de (identificador, descripción, precio), y una lista de los productos a facturar, en la que cada uno consiste en una tupla de (identificador, cantidad). <br>\nSe desea generar una factura que incluya la cantidad, la descripción, el precio unitario y el precio total de cada producto comprado, y al final imprima el total general. <br>\nEscribir una función que reciba ambas listas e imprima por pantalla la factura solicitada.\nLeer de teclado (usando la función raw_input) los datos de un listado de alumnos terminados con padrón 0. Para cada alumno deben leer:<br> # Padrón<br> # Nombre<br> # Apellido<br> # Nota del primer parcial<br> # Nota del primer recuperatorio (en caso de no haber aprobado el parcial)<br> # Nota del segundo recuperatorio (en caso de no haber aprobado en el primero)<br> # Nombre del grupo<br> # Nota del TP 1<br> # Nota del TP 2<br>\nSi el padrón es 0, no deben seguir pidiendo el resto de los campos.<br>\nTanto el padrón, como el nombre y apellido deben leerse como strings (existen padrones que comienzan con una letra b), pero debe validarse que se haya ingresado algo de por lo menos 2 caracteres. <br>\nTodas las notas serán números enteros entre 0 y 10, aunque puede ser que el usuario accidentalmente ingrese algo que no sea un número, por lo que deberán validar la entrada y volver a pedirle los datos al usuario hasta que ingrese algo válido. También deben validar que las notas pertenezcan al rango de 0 a 10. <br>\nSe asume que todos los alumnos se presentan a todos los parciales hasta aprobar o completar sus 3 chances. <br>\nAl terminar deben:\nimprimir por pantalla un listado de todos los alumnos en condiciones de rendir coloquio (último parcial aprobado y todos los TP aprobados) en el mismo orden en el que el usuario los ingreso.\nimprimir por pantalla un listado de todos los alumnos en condiciones de rendir coloquio (último parcial aprobado y todos los TP aprobados) ordenados por padrón en forma creciente.\nimprimir por pantalla un listado de todos los alumnos en condiciones de rendir coloquio (último parcial aprobado y todos los TP aprobados) ordenados por nota y, en caso de igualdad, por padrón (ambos en forma creciente).\nCalcular para cada alumno el promedio de sus notas del parcial y luego el promedio del curso como el promedio de todos los promedios.\nInformar cuál es la nota que más se repite entre todos los parciales (sin importar si es primer, segundo o tercer parcial) e indicar la cantidad de ocurrencias.\nlistar todas las notas que se sacaron los alumnos en el primer parcial y los padrones de quienes se sacaron esas notas con el siguiente formato:\n\nNota: 2\n * nnnn1\n * nnnn2\n * nnnn3\n * nnnn4\nNota: 4\n * nnnn1\n * nnnn2\n ...\nTener en cuenta que las notas pueden ser del 2 al 10 y puede ocurrir que nadie se haya sacado esa nota (y en dicho caso no esa nota no tiene que aparecer en el listado)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ES-DOC/esdoc-jupyterhub
notebooks/cmcc/cmip6/models/cmcc-esm2-hr5/aerosol.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Aerosol\nMIP Era: CMIP6\nInstitute: CMCC\nSource ID: CMCC-ESM2-HR5\nTopic: Aerosol\nSub-Topics: Transport, Emissions, Concentrations, Optical Radiative Properties, Model. \nProperties: 69 (37 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:53:50\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'cmcc', 'cmcc-esm2-hr5', 'aerosol')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --&gt; Software Properties\n3. Key Properties --&gt; Timestep Framework\n4. Key Properties --&gt; Meteorological Forcings\n5. Key Properties --&gt; Resolution\n6. Key Properties --&gt; Tuning Applied\n7. Transport\n8. Emissions\n9. Concentrations\n10. Optical Radiative Properties\n11. Optical Radiative Properties --&gt; Absorption\n12. Optical Radiative Properties --&gt; Mixtures\n13. Optical Radiative Properties --&gt; Impact Of H2o\n14. Optical Radiative Properties --&gt; Radiative Scheme\n15. Optical Radiative Properties --&gt; Cloud Interactions\n16. Model \n1. Key Properties\nKey properties of the aerosol model\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of aerosol model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of aerosol model code", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Scheme Scope\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nAtmospheric domains covered by the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.scheme_scope') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"troposhere\" \n# \"stratosphere\" \n# \"mesosphere\" \n# \"mesosphere\" \n# \"whole atmosphere\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.4. Basic Approximations\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBasic approximations made in the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.basic_approximations') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.5. Prognostic Variables Form\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPrognostic variables in the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"3D mass/volume ratio for aerosols\" \n# \"3D number concenttration for aerosols\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.6. Number Of Tracers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of tracers in the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.number_of_tracers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "1.7. Family Approach\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre aerosol calculations generalized into families of species?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.family_approach') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Software Properties\nSoftware properties of aerosol code\n2.1. Repository\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nLocation of code for this component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.2. Code Version\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCode version identifier.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.3. Code Languages\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nCode language(s).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Timestep Framework\nPhysical properties of seawater in ocean\n3.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMathematical method deployed to solve the time evolution of the prognostic variables", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses atmospheric chemistry time stepping\" \n# \"Specific timestepping (operator splitting)\" \n# \"Specific timestepping (integrated)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3.2. Split Operator Advection Timestep\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTimestep for aerosol advection (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.3. Split Operator Physical Timestep\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTimestep for aerosol physics (in seconds).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.4. Integrated Timestep\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTimestep for the aerosol model (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.5. Integrated Scheme Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecify the type of timestep scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Implicit\" \n# \"Semi-implicit\" \n# \"Semi-analytic\" \n# \"Impact solver\" \n# \"Back Euler\" \n# \"Newton Raphson\" \n# \"Rosenbrock\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Meteorological Forcings\n**\n4.1. Variables 3D\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nThree dimensionsal forcing variables, e.g. U, V, W, T, Q, P, conventive mass flux", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Variables 2D\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTwo dimensionsal forcing variables, e.g. land-sea mask definition", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Frequency\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nFrequency with which meteological forcings are applied (in seconds).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Resolution\nResolution in the aersosol model grid\n5.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Canonical Horizontal Resolution\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.3. Number Of Horizontal Gridpoints\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5.4. Number Of Vertical Levels\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nNumber of vertical levels resolved on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5.5. Is Adaptive Grid\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDefault is False. Set true if grid resolution changes during execution.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Tuning Applied\nTuning methodology for aerosol model\n6.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.2. Global Mean Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList set of metrics of the global mean state used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.3. Regional Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of regional metrics of mean state used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.4. Trend Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList observed trend metrics used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7. Transport\nAerosol transport\n7.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of transport in atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod for aerosol transport modeling", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Specific transport scheme (eulerian)\" \n# \"Specific transport scheme (semi-lagrangian)\" \n# \"Specific transport scheme (eulerian and semi-lagrangian)\" \n# \"Specific transport scheme (lagrangian)\" \n# TODO - please enter value(s)\n", "7.3. Mass Conservation Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMethod used to ensure mass conservation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Mass adjustment\" \n# \"Concentrations positivity\" \n# \"Gradients monotonicity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "7.4. Convention\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nTransport by convention", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.convention') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Convective fluxes connected to tracers\" \n# \"Vertical velocities connected to tracers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8. Emissions\nAtmospheric aerosol emissions\n8.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of emissions in atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMethod used to define aerosol species (several methods allowed because the different species may not use the same method).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Prescribed (climatology)\" \n# \"Prescribed CMIP6\" \n# \"Prescribed above surface\" \n# \"Interactive\" \n# \"Interactive above surface\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.3. Sources\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nSources of the aerosol species are taken into account in the emissions scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.sources') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Vegetation\" \n# \"Volcanos\" \n# \"Bare ground\" \n# \"Sea surface\" \n# \"Lightning\" \n# \"Fires\" \n# \"Aircraft\" \n# \"Anthropogenic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.4. Prescribed Climatology\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify the climatology type for aerosol emissions", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_climatology') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Interannual\" \n# \"Annual\" \n# \"Monthly\" \n# \"Daily\" \n# TODO - please enter value(s)\n", "8.5. Prescribed Climatology Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and prescribed via a climatology", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.6. Prescribed Spatially Uniform Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and prescribed as spatially uniform", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.7. Interactive Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and specified via an interactive method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.8. Other Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and specified via an &quot;other method&quot;", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.other_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.9. Other Method Characteristics\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCharacteristics of the &quot;other method&quot; used for aerosol emissions", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.other_method_characteristics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Concentrations\nAtmospheric aerosol concentrations\n9.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of concentrations in atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.2. Prescribed Lower Boundary\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed at the lower boundary.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.3. Prescribed Upper Boundary\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed at the upper boundary.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.4. Prescribed Fields Mmr\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed as mass mixing ratios.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.5. Prescribed Fields Mmr\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed as AOD plus CCNs.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Optical Radiative Properties\nAerosol optical and radiative properties\n10.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of optical and radiative properties", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11. Optical Radiative Properties --&gt; Absorption\nAbsortion properties in aerosol scheme\n11.1. Black Carbon\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAbsorption mass coefficient of black carbon at 550nm (if non-absorbing enter 0)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.2. Dust\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAbsorption mass coefficient of dust at 550nm (if non-absorbing enter 0)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.3. Organics\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAbsorption mass coefficient of organics at 550nm (if non-absorbing enter 0)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "12. Optical Radiative Properties --&gt; Mixtures\n**\n12.1. External\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there external mixing with respect to chemical composition?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.2. Internal\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there internal mixing with respect to chemical composition?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.3. Mixing Rule\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf there is internal mixing with respect to chemical composition then indicate the mixinrg rule", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Optical Radiative Properties --&gt; Impact Of H2o\n**\n13.1. Size\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes H2O impact size?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "13.2. Internal Mixture\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes H2O impact internal mixture?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "14. Optical Radiative Properties --&gt; Radiative Scheme\nRadiative scheme for aerosol\n14.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of radiative scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.2. Shortwave Bands\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of shortwave bands", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14.3. Longwave Bands\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of longwave bands", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15. Optical Radiative Properties --&gt; Cloud Interactions\nAerosol-cloud interactions\n15.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of aerosol-cloud interactions", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.2. Twomey\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the Twomey effect included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "15.3. Twomey Minimum Ccn\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf the Twomey effect is included, then what is the minimum CCN number?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15.4. Drizzle\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the scheme affect drizzle?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "15.5. Cloud Lifetime\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the scheme affect cloud lifetime?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "15.6. Longwave Bands\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of longwave bands", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "16. Model\nAerosol model\n16.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16.2. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nProcesses included in the Aerosol model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Dry deposition\" \n# \"Sedimentation\" \n# \"Wet deposition (impaction scavenging)\" \n# \"Wet deposition (nucleation scavenging)\" \n# \"Coagulation\" \n# \"Oxidation (gas phase)\" \n# \"Oxidation (in cloud)\" \n# \"Condensation\" \n# \"Ageing\" \n# \"Advection (horizontal)\" \n# \"Advection (vertical)\" \n# \"Heterogeneous chemistry\" \n# \"Nucleation\" \n# TODO - please enter value(s)\n", "16.3. Coupling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nOther model components coupled to the Aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Radiation\" \n# \"Land surface\" \n# \"Heterogeneous chemistry\" \n# \"Clouds\" \n# \"Ocean\" \n# \"Cryosphere\" \n# \"Gas phase chemistry\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.4. Gas Phase Precursors\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of gas phase aerosol precursors.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.gas_phase_precursors') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"DMS\" \n# \"SO2\" \n# \"Ammonia\" \n# \"Iodine\" \n# \"Terpene\" \n# \"Isoprene\" \n# \"VOC\" \n# \"NOx\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.5. Scheme Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nType(s) of aerosol scheme used by the aerosols model (potentially multiple: some species may be covered by one type of aerosol scheme and other species covered by another type).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Bulk\" \n# \"Modal\" \n# \"Bin\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.6. Bulk Scheme Species\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of species covered by the bulk scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.bulk_scheme_species') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sulphate\" \n# \"Nitrate\" \n# \"Sea salt\" \n# \"Dust\" \n# \"Ice\" \n# \"Organic\" \n# \"Black carbon / soot\" \n# \"SOA (secondary organic aerosols)\" \n# \"POM (particulate organic matter)\" \n# \"Polar stratospheric ice\" \n# \"NAT (Nitric acid trihydrate)\" \n# \"NAD (Nitric acid dihydrate)\" \n# \"STS (supercooled ternary solution aerosol particule)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ContinuumIO/cube-explorer
doc/Homepage.ipynb
bsd-3-clause
[ "HoloCube is a Python library that makes it easy to explore and visualize geographical, meterological, oceanographic, and other multidimensional gridded datasets. HoloCube interfaces between the HoloViews library for flexible visualizations of multidimensional data, the Iris library for storing and processing climate and weather data, and the Cartopy library for working with cartographic projections and visualizations in Matplotlib. Specifically, HoloCube:\n\n\nExtends HoloViews objects to allow them to use data stored in Iris cubes. After import holocube, data can be provided to any Holoviews Element directly as a cube, without needing to first convert into one of the other supported formats (NumPy arrays, Pandas data frames, etc.). This support is independent of the other support below -- data from Iris cubes can be used even in non-geographic Elements, and most geographic Elements can accept data in any format.\n\n\nAdds a set of new HoloViews Elements that have an associated geographic projection (GeoElements), based on cartopy.crs. These currently include GeoFeature, WMTS, GeoTiles, Points, Contours, Image, and Text objects, each of which can easily be overlaid in the same plots. E.g. an object with temperature data can be overlaid with coastline data using an expression like Image(temp_cube)*hc.GeoFeature(cartopy.feature.COASTLINE). Each GeoElement can also be freely combined in layouts with any other HoloViews Element, making it simple to make even complex multi-figure layours.\n\n\nWith HoloCube, you can now work easily and naturally with large, multidimensional datasets, instantly visualizing any subset or combination of them, while always being able to access the raw data underlying any plot. Here's a simple example:", "import holoviews as hv\nimport holocube as hc\nfrom cartopy import crs\nfrom cartopy import feature as cf\n\nhv.notebook_extension()\n\n%%opts GeoFeature [projection=crs.Geostationary()]\n\ncoasts = hc.GeoFeature(cf.COASTLINE)\nborders = hc.GeoFeature(cf.BORDERS)\nocean = hc.GeoFeature(cf.OCEAN)\n\nocean + borders + (ocean*borders).relabel(\"Overlay\")", "The following example loads a cube from iris-sample-data and displays it as follows:", "import iris\nsurface_temp = iris.load_cube(iris.sample_data_path('GloSea4', 'ensemble_001.pp'))\nprint surface_temp.summary()", "With HoloViews, you can quickly view the data in the cube interactively:", "%%opts GeoImage [colorbar=True] (cmap='viridis')\n(hc.HoloCube(surface_temp).groupby(['time'], group_type=hc.Image) * hc.GeoFeature(cf.COASTLINE))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
kylepjohnson/notebooks
lda/leipzig/1_lda_tests/2 LDA basics, pt 1.ipynb
mit
[ "About\nWe'll be following the tutorial \"Topic Modeling for Fun and Profit\" from the author of the Gensim library.", "# import and setup modules we'll be using in this notebook\nimport logging\nimport itertools\nimport os\nimport pickle\n\nimport numpy as np\nimport gensim\n\nlogging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)\nlogging.root.level = logging.INFO # ipython sometimes messes up the logging setup; restore\n\ndef head(stream, n=10):\n \"\"\"Convenience fnc: return the first `n` elements of the stream, as plain list.\"\"\"\n return list(itertools.islice(stream, n))", "Setup document streaming\nAnd a little more preprocessing", "from gensim.utils import smart_open, simple_preprocess\nfrom gensim.corpora.wikicorpus import _extract_pages, filter_wiki\n# from gensim.parsing.preprocessing import STOPWORDS\nfrom cltk.stop.greek.stops import STOPS_LIST\n\nSTOPS_LIST = [simple_preprocess(stop, deacc=True)[0] for stop in STOPS_LIST if len(simple_preprocess(stop, deacc=True)) > 0]\n\ndef tokenize(text):\n # https://radimrehurek.com/gensim/utils.html#gensim.utils.simple_preprocess\n tokens = [token for token in simple_preprocess(text, deacc=True)]\n return [token for token in tokens if token not in STOPS_LIST]\n \n\ndef iter_wiki(dump_file):\n \"\"\"Yield each article from the Wikipedia dump, as a `(title, tokens)` 2-tuple.\"\"\"\n ignore_namespaces = 'Wikipedia Category File Portal Template MediaWiki User Help Book Draft'.split()\n for title, text, pageid in _extract_pages(smart_open(dump_file)):\n text = filter_wiki(text)\n tokens = tokenize(text)\n if len(tokens) < 50 or any(title.startswith(ns + ':') for ns in ignore_namespaces):\n continue # ignore short articles and various meta-articles\n yield title, tokens\n\ndef iter_tlg(tlg_dir):\n file_names = os.listdir(tlg_dir)\n for file_name in file_names:\n file_path = os.path.join(tlg_dir, file_name)\n with open(file_path) as file_open:\n file_read = file_open.read()\n tokens = tokenize(file_read)\n # ignore short docs\n if len(tokens) < 50:\n continue\n yield file_name, tokens\n\n#stream = iter_wiki('./data/simplewiki-20140623-pages-articles.xml.bz2')\n\ntlg_preprocessed = os.path.expanduser('~/cltk_data/greek/text/tlg/plaintext/')\nstream = iter_tlg(tlg_preprocessed)\n\nfor title, tokens in itertools.islice(iter_tlg(tlg_preprocessed), 8):\n print(title, tokens[:10]) # print the article title and its first ten tokens", "Mk word dictionaries", "doc_stream = (tokens for _, tokens in iter_tlg(tlg_preprocessed))\n\n%time id2word_tlg = gensim.corpora.Dictionary(doc_stream)\nprint(id2word_tlg)\n\n# this cutoff might lose too much info, we'll see\n# ignore words that appear in less than 20 documents or more than 10% documents\nid2word_tlg.filter_extremes(no_below=20, no_above=0.1)\nprint(id2word_tlg)", "Mk vectors\nNow start again with the corpus, turning the actual words into integers from our map.", "doc = \"περὶ ποιητικῆς αὐτῆς τε καὶ τῶν εἰδῶν αὐτῆς, ἥν τινα δύναμιν ἕκαστον ἔχει, καὶ πῶς δεῖ συνίστασθαι τοὺς μύθους [10] εἰ μέλλει καλῶς ἕξειν ἡ ποίησις, ἔτι δὲ ἐκ πόσων καὶ ποίων ἐστὶ μορίων, ὁμοίως δὲ καὶ περὶ τῶν ἄλλων ὅσα τῆς αὐτῆς ἐστι μεθόδου, λέγωμεν ἀρξάμενοι κατὰ φύσιν πρῶτον ἀπὸ τῶν πρώτων.\"\ndoc = ' '.join(simple_preprocess(doc))\nbow = id2word_tlg.doc2bow(tokenize(doc))\nprint(bow)\n\nprint(id2word_tlg[6880], id2word_tlg[12323])\n\n# Save for reuse\n# can also use `id2word_tlg.save('~/cltk_data/user_data/tlg_bow_id2word.dict')`\nwith open(os.path.expanduser('~/cltk_data/user_data/tlg_bow_id2word.dict'), 'wb') as file_open:\n pickle.dump(id2word_tlg, file_open)\n\nclass WikiCorpus(object):\n def __init__(self, dump_file, dictionary, clip_docs=None):\n \"\"\"\n Parse the first `clip_docs` Wikipedia documents from file `dump_file`.\n Yield each document in turn, as a list of tokens (unicode strings).\n \n \"\"\"\n self.dump_file = dump_file\n self.dictionary = dictionary\n self.clip_docs = clip_docs\n \n def __iter__(self):\n self.titles = []\n for title, tokens in itertools.islice(iter_wiki(self.dump_file), self.clip_docs):\n self.titles.append(title)\n yield self.dictionary.doc2bow(tokens)\n \n def __len__(self):\n return self.clip_docs\n\nclass TLGCorpus(object):\n def __init__(self, dump_file, dictionary, clip_docs=None):\n \"\"\"Yield each document in turn, as a list of tokens (unicode strings).\n \"\"\"\n self.dump_file = dump_file\n self.dictionary = dictionary\n self.clip_docs = clip_docs\n \n def __iter__(self):\n self.titles = []\n for title, tokens in itertools.islice(iter_tlg(self.dump_file), self.clip_docs):\n self.titles.append(title)\n yield self.dictionary.doc2bow(tokens)\n \n def __len__(self):\n return self.clip_docs\n\n# create a stream of bag-of-words vectors\ntlg_corpus = TLGCorpus(tlg_preprocessed, id2word_tlg)\nvector = next(iter(tlg_corpus))\nprint(vector) # print the first vector in the stream\n\n# get titles, save to disk\ntlg_corpus = TLGCorpus(tlg_preprocessed, id2word_tlg)\nfor item in tlg_corpus:\n print(type(item))\n input()\n\n# what is the most common word in that first article?\nmost_index, most_count = max(vector, key=lambda _tuple: _tuple[1])\nprint(id2word_tlg[most_index], most_count)\n\nfrom gensim.corpora.mmcorpus import MmCorpus\n\n# Save BoW\nuser_dir = os.path.expanduser('~/cltk_data/user_data/')\ntry:\n os.makedirs(user_dir)\nexcept FileExistsError:\n pass\nbow_path = os.path.join(user_dir, 'bow_lda_gensim.mm')\n\n%time gensim.corpora.MmCorpus.serialize(bow_path, tlg_corpus)\n\nmm_corpus = gensim.corpora.MmCorpus(bow_path)\nprint(mm_corpus)\n\nprint(next(iter(mm_corpus)))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
jmschrei/pomegranate
tutorials/B_Model_Tutorial_6_Markov_Chain.ipynb
mit
[ "Markov Chains\nauthor: Jacob Schreiber <br>\ncontact: jmschreiber91@gmail.com\nMarkov Chains are a simple model based on conditional probability, where a sequence is modelled as the product of conditional probabilities. A n-th order Markov chain looks back n emissions to base its conditional probability on. For example, a 3rd order Markov chain models $P(X_{t} | X_{t-1}, X_{t-2}, X_{t-3})$.\nHowever, a full Markov model needs to model the first observations, and the first n-1 observations. The first observation can't really be modelled well using $P(X_{t} | X_{t-1}, X_{t-2}, X_{t-3})$, but can be modelled by $P(X_{t})$. The second observation has to be modelled by $P(X_{t} | X_{t-1} )$. This means that these distributions have to be passed into the Markov chain as well. \nWe can initialize a Markov chain easily enough by passing in a list of the distributions.", "%matplotlib inline\nimport time\nimport pandas\nimport random\nimport numpy\nimport matplotlib.pyplot as plt\nimport seaborn; seaborn.set_style('whitegrid')\nimport itertools\n\nfrom pomegranate import *\n\nrandom.seed(0)\nnumpy.random.seed(0)\nnumpy.set_printoptions(suppress=True)\n\n%load_ext watermark\n%watermark -m -n -p numpy,scipy,pomegranate\n\nfrom pomegranate import *\n%pylab inline\n\nd1 = DiscreteDistribution({'A': 0.10, 'C': 0.40, 'G': 0.40, 'T': 0.10})\nd2 = ConditionalProbabilityTable([['A', 'A', 0.10],\n ['A', 'C', 0.50],\n ['A', 'G', 0.30],\n ['A', 'T', 0.10],\n ['C', 'A', 0.10],\n ['C', 'C', 0.40],\n ['C', 'T', 0.40],\n ['C', 'G', 0.10],\n ['G', 'A', 0.05],\n ['G', 'C', 0.45],\n ['G', 'G', 0.45],\n ['G', 'T', 0.05],\n ['T', 'A', 0.20],\n ['T', 'C', 0.30],\n ['T', 'G', 0.30],\n ['T', 'T', 0.20]], [d1])\n\nclf = MarkovChain([d1, d2])", "Markov chains have log probability, fit, summarize, and from summaries methods implemented. They do not have classification capabilities by themselves, but when combined with a Naive Bayes classifier can be used to do discrimination between multiple models (see the Naive Bayes tutorial notebook).\nLets see the log probability of some data.", "clf.log_probability( list('CAGCATCAGT') ) \n\nclf.log_probability( list('C') )\n\nclf.log_probability( list('CACATCACGACTAATGATAAT') )", "We can fit the model to sequences which we pass in, and as expected, get better performance on sequences which we train on.", "clf.fit(list(map(list,('CAGCATCAGT', 'C', 'ATATAGAGATAAGCT', 'GCGCAAGT', 'GCATTGC', 'CACATCACGACTAATGATAAT'))))\nprint(clf.log_probability( list('CAGCATCAGT') ) )\nprint(clf.log_probability( list('C') ))\nprint(clf.log_probability( list('CACATCACGACTAATGATAAT') ))\n\nprint(clf.distributions[0])\n\nprint(clf.distributions[1])" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
ljo/collatex-tutorial
unit5/CollateX and XML, Part 3.ipynb
gpl-3.0
[ "CollateX and XML, Part 3\nDavid J. Birnbaum (&#100;&#106;&#98;&#112;&#105;&#116;&#116;&#64;&#103;&#109;&#97;&#105;&#108;&#46;&#99;&#111;&#109;, http://www.obdurodon.org), Last modified 2015-0y-07 \nThis example collates ten full witnesses of Partonopeus de Blois (the files are available at the Oxford Text Archive; quasi-TEI XML files are in 2499/data/xml subdirectory of the zip file). \nIn Part 1 of this tutorial we collated just a single line from just four witnesses, spelling out the details step by step in a way that would not be used in a real project, but that made it easy to see how each step moves toward the final result. In Part 2 we employed three classes (WitnessSet, Line, Word) to make the code more extensible and adaptable. In Part 3 we enhance the processing by:\n\nprocessing the full text from all ten witnesses\nreading the input from files, instead of from strings within the Python code itself, and\nletting our Python script tell us which elements to flatten, so that we don’t have to identify them manually in advance.\n\nThe markup in the input files is similar in some respects to TEI, but the root element is &lt;part&gt;, obligatory TEI elements like &lt;teiHeader&gt; and &lt;text&gt; are not present, and the documents are in no namespace. Lines are tagged as &lt;l&gt;, and each line has @id and @n attributes. The value of the @n attribute refers to the order of the line within the individual witness, which is not relevant for collation. The @id attribute, on the other hand, represents the line number in a synopsis of all witnesses, which means that, for example, the &lt;l id='34'&gt; lines from all witnesses should be collated together, and similarly for other @id values. This makes it easy to identify the segments to be treated as separate collation sets; we can collate all versions of line #1 against one another, and then, separately, collate all version of line #2 against one another, etc., ultimately concatenating the results. There are two peculiarities of the @id values that are relevant here:\n\nNot every line occurs in every witness. This means that when we iterate over the @id numbers, we need to accommodate gaps in the data.\nThe @id values are not only consecutive integers. Some values have appended letters, so that, for example, in witness G line 4008 is followed by 4008a and then 4009. This means that if we want to iterate over the @id values in order, we cannot rely on either purely numeric or purely string order.\n\nAdditionally, in Part 1 and Part 2 of this tutorial:\n\nWe didn’t worry about the order of the witnesses in the output. Now that we are dealing with multiple segments, we probably want to ensure that the witnesses are rendered in the same order in all of the segments, which means that have to sort them. For this tutorial the witness identifiers are all single upper-case Latin letters (A, B, C, F, G, L, P, T, V, W), and we’ll sort them in alphabetical order. (Alternatively, it is also possible to order them explicitly, perhaps in order to group them by hypearchetype.)\nThe witness siglum was attached to the &lt;l&gt; element. Now that we are dealing with full witnesses that contain multiple lines, we have to locate the siglum elsewhere. \nThe input \"document\" was a single &lt;l&gt; element, and we ignored the rest of the documents whence those single lines had been extracted manually. Now that we are dealing with complete TEI-based documents, we have to decide what to do with the rest of the content, that is, with the elements that are not just lines.\n\nIn this tutorial we ignore the other elements of the input documents except for the siglum. In Real-Life collation tasks with complete TEI documents, developers would probably want to incorporate at least some metadata from the &lt;teiheader&gt; components of the sources.\nLoad libraries. In addition to the libraries used in Part 2, we also load the os library because we will be reading input from the file system and the itertools library to help concatenate lists efficiently.", "from collatex import *\nfrom lxml import etree\nimport json,re,os,itertools", "split(id)\nWe create our own sort function, for which we define linenoRegex, which includes two capture groups, both of which are strings by default. The first captures all digits from the beginning of the line number (@id) value. The second captures anything after the numbers. The regex splits the input into a tuple that contains the two values as strings, and we convert the first value to an integer before we return it. For example, the input value '4008a' will return (4008,'a'), where the '4008' is an integer and the 'a' is a string. We can then specify that our @id values should be sorted according to the results of processing them with this function. This overcomes the limitation of our being unable to sort them numerically (because some of them contain letters) or alphabetically (because '10' would sort before '9' alphabetically).", "def splitId(id):\n \"\"\"Splits @id value like 4008a into parts, for sorting\"\"\"\n linenoRegex = re.compile('(\\d+)(.*)')\n results = linenoRegex.match(id).groups()\n return (int(results[0]),results[1])", "The WitnessSet class\nThe WitnessSet class represents all of the witnesses being collated. \nall_line_ids()\nUnlike in Parts 1 and 2, where each witness contained just one line (&lt;l&gt; element), the witnesses now contain multiple lines. We segment the witnesses by @id value, so that each segment (set of readings to be collated) consists of lines that share an @id value. To iterate over those values, we need to get a complete list of them, and to ensure that the output is in the correct order, we need to sort them. Lines will be processed individually, segmenting the collation task into subtasks that collate just one line at a time. The all_line_ids() method returns a list of line identifiers (@id values) from all witnesses in the correct order.\ngenerate_json(input()\nThe generate_json_input() method returns a JSON object that is suitable for input into CollateX.", "class WitnessSet:\n def __init__(self,witnessList):\n self.witnessList = witnessList\n def all_witnesses(self):\n \"\"\"List of tuples consisting of siglum and contents\"\"\"\n return [Witness(witness) for witness in self.witnessList]\n def all_ids(self):\n \"\"\"Sorted deduplicated list of all ids in corpus\"\"\"\n return sorted(set(itertools.chain.from_iterable([witness.XML().xpath('//l/@id') for witness in self.all_witnesses()])),key=splitId)\n def get_lines_by_id(self,id):\n \"\"\"List of tuples of siglum plus <l> element from each witness that corresponds to a certain line\"\"\"\n witnesses_with_line = []\n for witness in self.all_witnesses():\n try:\n witnesses_with_line.append((witness.siglum,witness.XML().xpath('//l[@id = ' + id + ']')[0]))\n except:\n pass\n return witnesses_with_line\n def generate_json_input(self, lineId):\n \"\"\"JSON input to CollateX for an <l> segment\"\"\"\n json_input = {}\n witnesses = []\n for witness in self.get_lines_by_id(lineId):\n currentWitness = {}\n currentWitness['id'] = witness[0]\n currentWitness['tokens'] = Line(witness[1]).tokens()\n witnesses.append(currentWitness)\n json_input['witnesses'] = witnesses\n return json_input", "The Witness class\nEach witness in the witness set is an instance of class Witness. witness.siglum is a string and witness.contents is an XML tree.", "class Witness:\n \"\"\"Each witness in the witness set is an instance of class Witness\"\"\"\n def __init__(self,witness):\n self.witness = witness\n self.siglum = self.witness[0]\n self.contents = self.witness[1]\n def XML(self):\n return etree.XML(self.contents)", "The Line class\nThe Line class contains methods applied to individual lines. The XSLT stylesheets and the functions to use them have been moved into the Line class, since they apply to individual lines. The siglum for the line is retrieved from the witness that contains it, and is part of the Witness class. The line.tokens() method returns a list of JSON objects, one for each word token.", "class Line:\n \"\"\"An instance of Line is a line in a witness, expressed as an <l> element\"\"\"\n addWMilestones = etree.XML(\"\"\"\n <xsl:stylesheet version=\"1.0\" xmlns:xsl=\"http://www.w3.org/1999/XSL/Transform\">\n <xsl:output method=\"xml\" indent=\"no\" encoding=\"UTF-8\" omit-xml-declaration=\"yes\"/>\n <xsl:template match=\"*|@*\">\n <xsl:copy>\n <xsl:apply-templates select=\"node() | @*\"/>\n </xsl:copy>\n </xsl:template>\n <xsl:template match=\"/*\">\n <xsl:copy>\n <xsl:apply-templates select=\"@*\"/>\n <!-- insert a <w/> milestone before the first word -->\n <w/>\n <xsl:apply-templates/>\n </xsl:copy>\n </xsl:template>\n <!-- convert <add>, <sic>, and <crease> to milestones (and leave them that way)\n CUSTOMIZE HERE: add other elements that may span multiple word tokens\n -->\n <xsl:template match=\"add | sic | crease \">\n <xsl:element name=\"{name()}\">\n <xsl:attribute name=\"n\">start</xsl:attribute>\n </xsl:element>\n <xsl:apply-templates/>\n <xsl:element name=\"{name()}\">\n <xsl:attribute name=\"n\">end</xsl:attribute>\n </xsl:element>\n </xsl:template>\n <xsl:template match=\"note\"/>\n <xsl:template match=\"text()\">\n <xsl:call-template name=\"whiteSpace\">\n <xsl:with-param name=\"input\" select=\"translate(.,'&#x0a;',' ')\"/>\n </xsl:call-template>\n </xsl:template>\n <xsl:template name=\"whiteSpace\">\n <xsl:param name=\"input\"/>\n <xsl:choose>\n <xsl:when test=\"not(contains($input, ' '))\">\n <xsl:value-of select=\"$input\"/>\n </xsl:when>\n <xsl:when test=\"starts-with($input,' ')\">\n <xsl:call-template name=\"whiteSpace\">\n <xsl:with-param name=\"input\" select=\"substring($input,2)\"/>\n </xsl:call-template>\n </xsl:when>\n <xsl:otherwise>\n <xsl:value-of select=\"substring-before($input, ' ')\"/>\n <w/>\n <xsl:call-template name=\"whiteSpace\">\n <xsl:with-param name=\"input\" select=\"substring-after($input,' ')\"/>\n </xsl:call-template>\n </xsl:otherwise>\n </xsl:choose>\n </xsl:template>\n </xsl:stylesheet>\n \"\"\")\n transformAddW = etree.XSLT(addWMilestones)\n xsltWrapW = etree.XML('''\n <xsl:stylesheet xmlns:xsl=\"http://www.w3.org/1999/XSL/Transform\" version=\"1.0\">\n <xsl:output method=\"xml\" indent=\"no\" omit-xml-declaration=\"yes\"/>\n <xsl:template match=\"/*\">\n <xsl:copy>\n <xsl:apply-templates select=\"w\"/>\n </xsl:copy>\n </xsl:template>\n <xsl:template match=\"w\">\n <!-- faking <xsl:for-each-group> as well as the \"<<\" and except\" operators -->\n <xsl:variable name=\"tooFar\" select=\"following-sibling::w[1] | following-sibling::w[1]/following::node()\"/>\n <w>\n <xsl:copy-of select=\"following-sibling::node()[count(. | $tooFar) != count($tooFar)]\"/>\n </w>\n </xsl:template>\n </xsl:stylesheet>\n ''')\n transformWrapW = etree.XSLT(xsltWrapW)\n def __init__(self,line):\n self.line = line\n def tokens(self):\n return [Word(token).createToken() for token in Line.transformWrapW(Line.transformAddW(self.line)).xpath('//w')]\n ", "The Word class contains methods that apply to individual words. unwrap() and normalize() are private; they are used by createToken() to return a JSON object with the \"t\" and \"n\" properties for a word token.", "class Word:\n unwrapRegex = re.compile('<w>(.*)</w>')\n stripTagsRegex = re.compile('<.*?>')\n def __init__(self,word):\n self.word = word\n def unwrap(self):\n return Word.unwrapRegex.match(etree.tostring(self.word,encoding='unicode')).group(1)\n def normalize(self):\n return Word.stripTagsRegex.sub('',self.unwrap().lower())\n def createToken(self):\n token = {}\n token['t'] = self.unwrap()\n token['n'] = self.normalize()\n return token", "Create XML data and assign to a witnessSet variable\nOur witnesses are XML files in the 'partonopeus' subdirectory of our current location. Verify that the files are there by listing them.", "os.listdir('partonopeus')", "Create a two-member tuple for each file, consisting of two strings: the one-letter identifier (the filename with the '.xml' extension removed) and the contents of the files. Assemble these into a list of tuples and use it to create an instance of the WitnessSet class, assigned to the variable witnessSet. We use the lxml library to parse the XML and a file that contains Unicode data must be opened in raw (bytes) mode.", "witnessSet = WitnessSet([(inputFile[0],open('partonopeus/' + inputFile,'rb').read()) for inputFile in os.listdir('partonopeus')])", "Generate sample JSON from a random line of data and examine it", "json_input = witnessSet.generate_json_input('10')\nprint(json_input)", "Collate and output the results of the sample as a plain-text alignment table, as JSON, and as colored HTML", "collationText = collate_pretokenized_json(json_input,output='table')\nprint(collationText)\ncollationJSON = collate_pretokenized_json(json_input,output='json')\nprint(collationJSON)\ncollationHTML2 = collate_pretokenized_json(json_input,output='html2')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ReactiveX/RxPY
notebooks/Getting Started.ipynb
mit
[ "Getting Started with RxPY\nReactiveX, or Rx for short, is an API for programming with observable event streams. RxPY is a port of ReactiveX to Python. Learning Rx with Python is particularly interesting since Python removes much of the clutter that comes with statically typed languages. RxPY works with both Python 2 and Python 3 but all examples in this tutorial uses Python 3.4.\nRx is about processing streams of events. With Rx you:\n\nTell what you want to process (Observable)\nHow you want to process it (A composition of operators)\nWhat you want to do with the result (Observer)\n\nIt's important to understand that with Rx you describe what you want to do with events if and when they arrive. It's all a declarative composition of operators that will do some processing the events when they arrive. If nothing happens, then nothing is processed.\nThus the pattern is that you subscribe to an Observable using an Observer:\npython\nsubscription = Observable.subscribe(observer)\nNOTE: Observables are not active in themselves. They need to be subscribed to make something happen. Simply having an Observable lying around doesn't make anything happen.\nInstall\nUse pip to install RxPY:", "%%bash\npip install reactivex", "Importing the Rx module", "import reactivex\nfrom reactivex import operators as ops\nfrom reactivex import Observer", "Generating a sequence\nThere are many ways to generate a sequence of events. The easiest way to get started is to use the from_iterable() operator that is also called just from_. Other operators you may use to generate a sequence such as just, generate, create and range.", "class MyObserver(Observer[int]):\n def on_next(self, value: int):\n print(\"Got: %s\" % value)\n\n def on_error(self, error: Exception):\n print(\"Got error: %s\" % error)\n\n def on_completed(self):\n print(\"Sequence completed\")\n\nxs = reactivex.from_iterable(range(10))\nd = xs.subscribe(MyObserver())\n\nxs = reactivex.from_(range(10))\nd = xs.subscribe(print)", "NOTE: The subscribe method takes an observer, or one to three callbacks for handing on_next(), on_error(), and on_completed(). This is why we can use print directly as the observer in the example above, since it becomes the on_next() handler for an anonymous observer. \nFiltering a sequence", "xs = reactivex.from_(range(10))\nd = xs.pipe(\n ops.filter(\n lambda x: x % 2\n )).subscribe(print)", "Transforming a sequence", "xs = reactivex.from_(range(10))\nd = xs.pipe(\n ops.map(\n lambda x: x * 2\n )).subscribe(print)", "NOTE: You can also take an index as the second parameter to the mapper function:", "xs = reactivex.from_(range(10, 20, 2))\nd = xs.pipe(\n ops.map_indexed(\n lambda x, i: \"%s: %s\" % (i, x * 2)\n )).subscribe(print)", "Merge\nMerging two observable sequences into a single observable sequence using the merge operator:", "xs = reactivex.range(1, 5)\nys = reactivex.from_(\"abcde\")\nzs = xs.pipe(ops.merge(ys)).subscribe(print)", "The Spacetime of Rx\nIn the examples above all the events happen at the same moment in time. The events are only separated by ordering. This confuses many newcomers to Rx since the result of the merge operation above may have several valid results such as:\na1b2c3d4e5\n1a2b3c4d5e\nab12cd34e5\nabcde12345\n\nThe only guarantee you have is that 1 will be before 2 in xs, but 1 in xs can be before or after a in ys. It's up the the sort stability of the scheduler to decide which event should go first. For real time data streams this will not be a problem since the events will be separated by actual time. To make sure you get the results you \"expect\", it's always a good idea to add some time between the events when playing with Rx.\nMarbles and Marble Diagrams\nAs we saw in the previous section it's nice to add some time when playing with Rx and RxPY. A great way to explore RxPY is to use the marbles test module that enables us to play with marble diagrams. The marbles module adds two new function to. The methods are from_marbles() and to_marbles().\nExamples:\n1. res = reactivex.from_marbles(\"1-2-3-|\")\n2. res = reactivex.from_marbles(\"1-2-3-x\", rx.Scheduler.timeout)\nThe marble string consists of some special characters:\n- = Timespan of 100 ms\n x = on_error()\n | = on_completed()\nAll other characters are treated as an on_next() event at the given moment they are found on the string. If you need to represent multi character values, then you can group then with brackets such as \"1-(42)-3\". \nLets try it out:", "xs = reactivex.from_marbles(\"a-b-c-|\")\nxs.pipe(ops.to_list()).run()", "It's now easy to also add errors into the even stream by inserting x into the marble string:", "xs = reactivex.from_marbles(\"1-2-3-#-\")\nys = reactivex.from_marbles(\"1-2-3-4-5\")\nxs.pipe(ops.merge(ys)).subscribe(on_error=print)", "Subjects and Streams\nA simple way to create an observable stream is to use a subject. It's probably called a subject after the Subject-Observer pattern described in the Design Patterns book by the gang of four (GOF).\nAnyway, a Subject is both an Observable and an Observer, so you can both subscribe to it and on_next it with events. This makes it an obvious candidate if need to publish values into an observable stream for processing:", "from reactivex.subject import Subject\n\nstream = Subject[int]()\nstream.on_next(41)\n\nd = stream.subscribe(lambda x: print(\"Got: %s\" % x))\n\nstream.on_next(42)\n\nd.dispose()\nstream.on_next(43)", "That's all for now" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ES-DOC/esdoc-jupyterhub
notebooks/inm/cmip6/models/inm-cm5-0/land.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Land\nMIP Era: CMIP6\nInstitute: INM\nSource ID: INM-CM5-0\nTopic: Land\nSub-Topics: Soil, Snow, Vegetation, Energy Balance, Carbon Cycle, Nitrogen Cycle, River Routing, Lakes. \nProperties: 154 (96 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:04\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'inm', 'inm-cm5-0', 'land')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --&gt; Conservation Properties\n3. Key Properties --&gt; Timestepping Framework\n4. Key Properties --&gt; Software Properties\n5. Grid\n6. Grid --&gt; Horizontal\n7. Grid --&gt; Vertical\n8. Soil\n9. Soil --&gt; Soil Map\n10. Soil --&gt; Snow Free Albedo\n11. Soil --&gt; Hydrology\n12. Soil --&gt; Hydrology --&gt; Freezing\n13. Soil --&gt; Hydrology --&gt; Drainage\n14. Soil --&gt; Heat Treatment\n15. Snow\n16. Snow --&gt; Snow Albedo\n17. Vegetation\n18. Energy Balance\n19. Carbon Cycle\n20. Carbon Cycle --&gt; Vegetation\n21. Carbon Cycle --&gt; Vegetation --&gt; Photosynthesis\n22. Carbon Cycle --&gt; Vegetation --&gt; Autotrophic Respiration\n23. Carbon Cycle --&gt; Vegetation --&gt; Allocation\n24. Carbon Cycle --&gt; Vegetation --&gt; Phenology\n25. Carbon Cycle --&gt; Vegetation --&gt; Mortality\n26. Carbon Cycle --&gt; Litter\n27. Carbon Cycle --&gt; Soil\n28. Carbon Cycle --&gt; Permafrost Carbon\n29. Nitrogen Cycle\n30. River Routing\n31. River Routing --&gt; Oceanic Discharge\n32. Lakes\n33. Lakes --&gt; Method\n34. Lakes --&gt; Wetlands \n1. Key Properties\nLand surface key properties\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of land surface model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of land surface model code (e.g. MOSES2.2)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description of the processes modelled (e.g. dymanic vegation, prognostic albedo, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.4. Land Atmosphere Flux Exchanges\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nFluxes exchanged with the atmopshere.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"water\" \n# \"energy\" \n# \"carbon\" \n# \"nitrogen\" \n# \"phospherous\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.5. Atmospheric Coupling Treatment\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the treatment of land surface coupling with the Atmosphere model component, which may be different for different quantities (e.g. dust: semi-implicit, water vapour: explicit)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.6. Land Cover\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nTypes of land cover defined in the land surface model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.land_cover') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"bare soil\" \n# \"urban\" \n# \"lake\" \n# \"land ice\" \n# \"lake ice\" \n# \"vegetated\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.7. Land Cover Change\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe how land cover change is managed (e.g. the use of net or gross transitions)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.land_cover_change') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.8. Tiling\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general tiling procedure used in the land surface (if any). Include treatment of physiography, land/sea, (dynamic) vegetation coverage and orography/roughness", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Conservation Properties\nTODO\n2.1. Energy\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how energy is conserved globally and to what level (e.g. within X [units]/year)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.conservation_properties.energy') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.2. Water\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how water is conserved globally and to what level (e.g. within X [units]/year)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.conservation_properties.water') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.3. Carbon\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how carbon is conserved globally and to what level (e.g. within X [units]/year)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.conservation_properties.carbon') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Timestepping Framework\nTODO\n3.1. Timestep Dependent On Atmosphere\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs a time step dependent on the frequency of atmosphere coupling?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "3.2. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverall timestep of land surface model (i.e. time between calls)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.3. Timestepping Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description of time stepping method and associated time step(s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Software Properties\nSoftware properties of land surface code\n4.1. Repository\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nLocation of code for this component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Code Version\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCode version identifier.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Code Languages\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nCode language(s).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5. Grid\nLand surface grid\n5.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of the grid in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.grid.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6. Grid --&gt; Horizontal\nThe horizontal grid in the land surface\n6.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general structure of the horizontal grid (not including any tiling)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.grid.horizontal.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.2. Matches Atmosphere Grid\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the horizontal grid match the atmosphere?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "7. Grid --&gt; Vertical\nThe vertical grid in the soil\n7.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general structure of the vertical grid in the soil (not including any tiling)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.grid.vertical.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. Total Depth\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe total depth of the soil (in metres)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.grid.vertical.total_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "8. Soil\nLand surface soil\n8.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of soil in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Heat Water Coupling\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the coupling between heat and water in the soil", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_water_coupling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.3. Number Of Soil layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe number of soil layers", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.number_of_soil layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "8.4. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the soil scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Soil --&gt; Soil Map\nKey properties of the land surface soil map\n9.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description of soil map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.2. Structure\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil structure map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.structure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.3. Texture\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil texture map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.texture') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.4. Organic Matter\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil organic matter map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.organic_matter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.5. Albedo\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil albedo map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.albedo') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.6. Water Table\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil water table map, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.water_table') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.7. Continuously Varying Soil Depth\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the soil properties vary continuously with depth?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "9.8. Soil Depth\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil depth map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.soil_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Soil --&gt; Snow Free Albedo\nTODO\n10.1. Prognostic\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs snow free albedo prognostic?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "10.2. Functions\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf prognostic, describe the dependancies on snow free albedo calculations", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.snow_free_albedo.functions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"vegetation type\" \n# \"soil humidity\" \n# \"vegetation state\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.3. Direct Diffuse\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf prognostic, describe the distinction between direct and diffuse albedo", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"distinction between direct and diffuse albedo\" \n# \"no distinction between direct and diffuse albedo\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.4. Number Of Wavelength Bands\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf prognostic, enter the number of wavelength bands used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11. Soil --&gt; Hydrology\nKey properties of the land surface soil hydrology\n11.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description of the soil hydrological model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.2. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of river soil hydrology in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.3. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil hydrology tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.4. Vertical Discretisation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the typical vertical discretisation", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.5. Number Of Ground Water Layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe number of soil layers that may contain water", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.6. Lateral Connectivity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nDescribe the lateral connectivity between tiles", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"perfect connectivity\" \n# \"Darcian flow\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "11.7. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe hydrological dynamics scheme in the land surface model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Bucket\" \n# \"Force-restore\" \n# \"Choisnel\" \n# \"Explicit diffusion\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12. Soil --&gt; Hydrology --&gt; Freezing\nTODO\n12.1. Number Of Ground Ice Layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow many soil layers may contain ground ice", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "12.2. Ice Storage Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method of ice storage", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12.3. Permafrost\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the treatment of permafrost, if any, within the land surface scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Soil --&gt; Hydrology --&gt; Drainage\nTODO\n13.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral describe how drainage is included in the land surface scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.drainage.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13.2. Types\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nDifferent types of runoff represented by the land surface model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.drainage.types') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Gravity drainage\" \n# \"Horton mechanism\" \n# \"topmodel-based\" \n# \"Dunne mechanism\" \n# \"Lateral subsurface flow\" \n# \"Baseflow from groundwater\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14. Soil --&gt; Heat Treatment\nTODO\n14.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description of how heat treatment properties are defined", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.2. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of soil heat scheme in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14.3. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil heat treatment tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.4. Vertical Discretisation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the typical vertical discretisation", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.5. Heat Storage\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecify the method of heat storage", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.heat_storage') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Force-restore\" \n# \"Explicit diffusion\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.6. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nDescribe processes included in the treatment of soil heat", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"soil moisture freeze-thaw\" \n# \"coupling with snow temperature\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15. Snow\nLand surface snow\n15.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of snow in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.2. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the snow tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.3. Number Of Snow Layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe number of snow levels used in the land surface scheme/model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.number_of_snow_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15.4. Density\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of the treatment of snow density", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.density') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"constant\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.5. Water Equivalent\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of the treatment of the snow water equivalent", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.water_equivalent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.6. Heat Content\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of the treatment of the heat content of snow", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.heat_content') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.7. Temperature\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of the treatment of snow temperature", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.temperature') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.8. Liquid Water Content\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of the treatment of snow liquid water", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.liquid_water_content') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.9. Snow Cover Fractions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSpecify cover fractions used in the surface snow scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.snow_cover_fractions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"ground snow fraction\" \n# \"vegetation snow fraction\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.10. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSnow related processes in the land surface scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"snow interception\" \n# \"snow melting\" \n# \"snow freezing\" \n# \"blowing snow\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.11. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the snow scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16. Snow --&gt; Snow Albedo\nTODO\n16.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the treatment of snow-covered land albedo", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.snow_albedo.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"prescribed\" \n# \"constant\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.2. Functions\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\n*If prognostic, *", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.snow_albedo.functions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"vegetation type\" \n# \"snow age\" \n# \"snow density\" \n# \"snow grain type\" \n# \"aerosol deposition\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17. Vegetation\nLand surface vegetation\n17.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of vegetation in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.2. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of vegetation scheme in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "17.3. Dynamic Vegetation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there dynamic evolution of vegetation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.dynamic_vegetation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "17.4. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the vegetation tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.5. Vegetation Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nVegetation classification used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.vegetation_representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"vegetation types\" \n# \"biome types\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.6. Vegetation Types\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of vegetation types in the classification, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.vegetation_types') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"broadleaf tree\" \n# \"needleleaf tree\" \n# \"C3 grass\" \n# \"C4 grass\" \n# \"vegetated\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.7. Biome Types\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of biome types in the classification, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.biome_types') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"evergreen needleleaf forest\" \n# \"evergreen broadleaf forest\" \n# \"deciduous needleleaf forest\" \n# \"deciduous broadleaf forest\" \n# \"mixed forest\" \n# \"woodland\" \n# \"wooded grassland\" \n# \"closed shrubland\" \n# \"opne shrubland\" \n# \"grassland\" \n# \"cropland\" \n# \"wetlands\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.8. Vegetation Time Variation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow the vegetation fractions in each tile are varying with time", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.vegetation_time_variation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"fixed (not varying)\" \n# \"prescribed (varying from files)\" \n# \"dynamical (varying from simulation)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.9. Vegetation Map\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf vegetation fractions are not dynamically updated , describe the vegetation map used (common name and reference, if possible)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.vegetation_map') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.10. Interception\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs vegetation interception of rainwater represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.interception') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "17.11. Phenology\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTreatment of vegetation phenology", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.phenology') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic (vegetation map)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.12. Phenology Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of the treatment of vegetation phenology", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.phenology_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.13. Leaf Area Index\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTreatment of vegetation leaf area index", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.leaf_area_index') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prescribed\" \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.14. Leaf Area Index Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of the treatment of leaf area index", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.leaf_area_index_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.15. Biomass\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\n*Treatment of vegetation biomass *", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.biomass') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.16. Biomass Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of the treatment of vegetation biomass", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.biomass_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.17. Biogeography\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTreatment of vegetation biogeography", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.biogeography') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.18. Biogeography Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of the treatment of vegetation biogeography", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.biogeography_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.19. Stomatal Resistance\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSpecify what the vegetation stomatal resistance depends on", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.stomatal_resistance') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"light\" \n# \"temperature\" \n# \"water availability\" \n# \"CO2\" \n# \"O3\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.20. Stomatal Resistance Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of the treatment of vegetation stomatal resistance", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.stomatal_resistance_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.21. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the vegetation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18. Energy Balance\nLand surface energy balance\n18.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of energy balance in land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.energy_balance.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18.2. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the energy balance tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.energy_balance.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18.3. Number Of Surface Temperatures\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe maximum number of distinct surface temperatures in a grid cell (for example, each subgrid tile may have its own temperature)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "18.4. Evaporation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSpecify the formulation method for land surface evaporation, from soil and vegetation", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.energy_balance.evaporation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"alpha\" \n# \"beta\" \n# \"combined\" \n# \"Monteith potential evaporation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "18.5. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nDescribe which processes are included in the energy balance scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.energy_balance.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"transpiration\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "19. Carbon Cycle\nLand surface carbon cycle\n19.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of carbon cycle in land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19.2. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the carbon cycle tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19.3. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of carbon cycle in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "19.4. Anthropogenic Carbon\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nDescribe the treament of the anthropogenic carbon pool", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"grand slam protocol\" \n# \"residence time\" \n# \"decay time\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "19.5. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the carbon scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "20. Carbon Cycle --&gt; Vegetation\nTODO\n20.1. Number Of Carbon Pools\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEnter the number of carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "20.2. Carbon Pools\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "20.3. Forest Stand Dynamics\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the treatment of forest stand dyanmics", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "21. Carbon Cycle --&gt; Vegetation --&gt; Photosynthesis\nTODO\n21.1. Method\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the general method used for photosynthesis (e.g. type of photosynthesis, distinction between C3 and C4 grasses, Nitrogen depencence, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22. Carbon Cycle --&gt; Vegetation --&gt; Autotrophic Respiration\nTODO\n22.1. Maintainance Respiration\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the general method used for maintainence respiration", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.2. Growth Respiration\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the general method used for growth respiration", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "23. Carbon Cycle --&gt; Vegetation --&gt; Allocation\nTODO\n23.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general principle behind the allocation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "23.2. Allocation Bins\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecify distinct carbon bins used in allocation", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"leaves + stems + roots\" \n# \"leaves + stems + roots (leafy + woody)\" \n# \"leaves + fine roots + coarse roots + stems\" \n# \"whole plant (no distinction)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.3. Allocation Fractions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how the fractions of allocation are calculated", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"fixed\" \n# \"function of vegetation type\" \n# \"function of plant allometry\" \n# \"explicitly calculated\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "24. Carbon Cycle --&gt; Vegetation --&gt; Phenology\nTODO\n24.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general principle behind the phenology scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "25. Carbon Cycle --&gt; Vegetation --&gt; Mortality\nTODO\n25.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general principle behind the mortality scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "26. Carbon Cycle --&gt; Litter\nTODO\n26.1. Number Of Carbon Pools\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEnter the number of carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "26.2. Carbon Pools\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "26.3. Decomposition\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the decomposition methods used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.litter.decomposition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "26.4. Method\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the general method used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.litter.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "27. Carbon Cycle --&gt; Soil\nTODO\n27.1. Number Of Carbon Pools\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEnter the number of carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "27.2. Carbon Pools\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "27.3. Decomposition\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the decomposition methods used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.soil.decomposition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "27.4. Method\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the general method used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.soil.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "28. Carbon Cycle --&gt; Permafrost Carbon\nTODO\n28.1. Is Permafrost Included\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs permafrost included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "28.2. Emitted Greenhouse Gases\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the GHGs emitted", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "28.3. Decomposition\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the decomposition methods used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "28.4. Impact On Soil Properties\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the impact of permafrost on soil properties", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "29. Nitrogen Cycle\nLand surface nitrogen cycle\n29.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of the nitrogen cycle in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.nitrogen_cycle.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "29.2. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the notrogen cycle tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.nitrogen_cycle.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "29.3. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of nitrogen cycle in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.nitrogen_cycle.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "29.4. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the nitrogen scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30. River Routing\nLand surface river routing\n30.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of river routing in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30.2. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the river routing, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30.3. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of river routing scheme in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "30.4. Grid Inherited From Land Surface\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the grid inherited from land surface?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "30.5. Grid Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of grid, if not inherited from land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.grid_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30.6. Number Of Reservoirs\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEnter the number of reservoirs", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.number_of_reservoirs') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "30.7. Water Re Evaporation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nTODO", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.water_re_evaporation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"flood plains\" \n# \"irrigation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "30.8. Coupled To Atmosphere\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIs river routing coupled to the atmosphere model component?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "30.9. Coupled To Land\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the coupling between land and rivers", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.coupled_to_land') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30.10. Quantities Exchanged With Atmosphere\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf couple to atmosphere, which quantities are exchanged between river routing and the atmosphere model components?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"heat\" \n# \"water\" \n# \"tracers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "30.11. Basin Flow Direction Map\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat type of basin flow direction map is being used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.basin_flow_direction_map') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"present day\" \n# \"adapted for other periods\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "30.12. Flooding\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the representation of flooding, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.flooding') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30.13. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the river routing", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "31. River Routing --&gt; Oceanic Discharge\nTODO\n31.1. Discharge Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecify how rivers are discharged to the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"direct (large rivers)\" \n# \"diffuse\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "31.2. Quantities Transported\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nQuantities that are exchanged from river-routing to the ocean model component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"heat\" \n# \"water\" \n# \"tracers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "32. Lakes\nLand surface lakes\n32.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of lakes in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "32.2. Coupling With Rivers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre lakes coupled to the river routing model component?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.coupling_with_rivers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "32.3. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of lake scheme in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "32.4. Quantities Exchanged With Rivers\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf coupling with rivers, which quantities are exchanged between the lakes and rivers", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"heat\" \n# \"water\" \n# \"tracers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "32.5. Vertical Grid\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the vertical grid of lakes", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.vertical_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "32.6. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the lake scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "33. Lakes --&gt; Method\nTODO\n33.1. Ice Treatment\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs lake ice included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.method.ice_treatment') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "33.2. Albedo\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the treatment of lake albedo", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.method.albedo') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "33.3. Dynamics\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhich dynamics of lakes are treated? horizontal, vertical, etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.method.dynamics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"No lake dynamics\" \n# \"vertical\" \n# \"horizontal\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "33.4. Dynamic Lake Extent\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs a dynamic lake extent scheme included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "33.5. Endorheic Basins\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBasins not flowing to ocean included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.method.endorheic_basins') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "34. Lakes --&gt; Wetlands\nTODO\n34.1. Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the treatment of wetlands, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.wetlands.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
thesby/CaffeAssistant
tutorial/ipynb/01-learning-lenet.ipynb
mit
[ "Solving in Python with LeNet\nIn this example, we'll explore learning with Caffe in Python, using the fully-exposed Solver interface.\n1. Setup\n\nSet up the Python environment: we'll use the pylab import for numpy and plot inline.", "from pylab import *\n%matplotlib inline", "Import caffe, adding it to sys.path if needed. Make sure you've built pycaffe.", "caffe_root = '../' # this file should be run from {caffe_root}/examples (otherwise change this line)\n\nimport sys\nsys.path.insert(0, caffe_root + 'python')\nimport caffe", "We'll be using the provided LeNet example data and networks (make sure you've downloaded the data and created the databases, as below).", "# run scripts from caffe root\nimport os\nos.chdir(caffe_root)\n# Download data\n!data/mnist/get_mnist.sh\n# Prepare data\n!examples/mnist/create_mnist.sh\n# back to examples\nos.chdir('examples')", "2. Creating the net\nNow let's make a variant of LeNet, the classic 1989 convnet architecture.\nWe'll need two external files to help out:\n* the net prototxt, defining the architecture and pointing to the train/test data\n* the solver prototxt, defining the learning parameters\nWe start by creating the net. We'll write the net in a succinct and natural way as Python code that serializes to Caffe's protobuf model format.\nThis network expects to read from pregenerated LMDBs, but reading directly from ndarrays is also possible using MemoryDataLayer.", "from caffe import layers as L, params as P\n\ndef lenet(lmdb, batch_size):\n # our version of LeNet: a series of linear and simple nonlinear transformations\n n = caffe.NetSpec()\n \n n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,\n transform_param=dict(scale=1./255), ntop=2)\n \n n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))\n n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)\n n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))\n n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)\n n.fc1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))\n n.relu1 = L.ReLU(n.fc1, in_place=True)\n n.score = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier'))\n n.loss = L.SoftmaxWithLoss(n.score, n.label)\n \n return n.to_proto()\n \nwith open('mnist/lenet_auto_train.prototxt', 'w') as f:\n f.write(str(lenet('mnist/mnist_train_lmdb', 64)))\n \nwith open('mnist/lenet_auto_test.prototxt', 'w') as f:\n f.write(str(lenet('mnist/mnist_test_lmdb', 100)))", "The net has been written to disk in a more verbose but human-readable serialization format using Google's protobuf library. You can read, write, and modify this description directly. Let's take a look at the train net.", "!cat mnist/lenet_auto_train.prototxt", "Now let's see the learning parameters, which are also written as a prototxt file (already provided on disk). We're using SGD with momentum, weight decay, and a specific learning rate schedule.", "!cat mnist/lenet_auto_solver.prototxt", "3. Loading and checking the solver\n\nLet's pick a device and load the solver. We'll use SGD (with momentum), but other methods (such as Adagrad and Nesterov's accelerated gradient) are also available.", "caffe.set_device(0)\ncaffe.set_mode_gpu()\n\n### load the solver and create train and test nets\nsolver = None # ignore this workaround for lmdb data (can't instantiate two solvers on the same data)\nsolver = caffe.SGDSolver('mnist/lenet_auto_solver.prototxt')", "To get an idea of the architecture of our net, we can check the dimensions of the intermediate features (blobs) and parameters (these will also be useful to refer to when manipulating data later).", "# each output is (batch size, feature dim, spatial dim)\n[(k, v.data.shape) for k, v in solver.net.blobs.items()]\n\n# just print the weight sizes (we'll omit the biases)\n[(k, v[0].data.shape) for k, v in solver.net.params.items()]", "Before taking off, let's check that everything is loaded as we expect. We'll run a forward pass on the train and test nets and check that they contain our data.", "solver.net.forward() # train net\nsolver.test_nets[0].forward() # test net (there can be more than one)\n\n# we use a little trick to tile the first eight images\nimshow(solver.net.blobs['data'].data[:8, 0].transpose(1, 0, 2).reshape(28, 8*28), cmap='gray'); axis('off')\nprint 'train labels:', solver.net.blobs['label'].data[:8]\n\nimshow(solver.test_nets[0].blobs['data'].data[:8, 0].transpose(1, 0, 2).reshape(28, 8*28), cmap='gray'); axis('off')\nprint 'test labels:', solver.test_nets[0].blobs['label'].data[:8]", "4. Stepping the solver\nBoth train and test nets seem to be loading data, and to have correct labels.\n\nLet's take one step of (minibatch) SGD and see what happens.", "solver.step(1)", "Do we have gradients propagating through our filters? Let's see the updates to the first layer, shown here as a $4 \\times 5$ grid of $5 \\times 5$ filters.", "imshow(solver.net.params['conv1'][0].diff[:, 0].reshape(4, 5, 5, 5)\n .transpose(0, 2, 1, 3).reshape(4*5, 5*5), cmap='gray'); axis('off')", "5. Writing a custom training loop\nSomething is happening. Let's run the net for a while, keeping track of a few things as it goes.\nNote that this process will be the same as if training through the caffe binary. In particular:\n* logging will continue to happen as normal\n* snapshots will be taken at the interval specified in the solver prototxt (here, every 5000 iterations)\n* testing will happen at the interval specified (here, every 500 iterations)\nSince we have control of the loop in Python, we're free to compute additional things as we go, as we show below. We can do many other things as well, for example:\n* write a custom stopping criterion\n* change the solving process by updating the net in the loop", "%%time\nniter = 200\ntest_interval = 25\n# losses will also be stored in the log\ntrain_loss = zeros(niter)\ntest_acc = zeros(int(np.ceil(niter / test_interval)))\noutput = zeros((niter, 8, 10))\n\n# the main solver loop\nfor it in range(niter):\n solver.step(1) # SGD by Caffe\n \n # store the train loss\n train_loss[it] = solver.net.blobs['loss'].data\n \n # store the output on the first test batch\n # (start the forward pass at conv1 to avoid loading new data)\n solver.test_nets[0].forward(start='conv1')\n output[it] = solver.test_nets[0].blobs['score'].data[:8]\n \n # run a full test every so often\n # (Caffe can also do this for us and write to a log, but we show here\n # how to do it directly in Python, where more complicated things are easier.)\n if it % test_interval == 0:\n print 'Iteration', it, 'testing...'\n correct = 0\n for test_it in range(100):\n solver.test_nets[0].forward()\n correct += sum(solver.test_nets[0].blobs['score'].data.argmax(1)\n == solver.test_nets[0].blobs['label'].data)\n test_acc[it // test_interval] = correct / 1e4", "Let's plot the train loss and test accuracy.", "_, ax1 = subplots()\nax2 = ax1.twinx()\nax1.plot(arange(niter), train_loss)\nax2.plot(test_interval * arange(len(test_acc)), test_acc, 'r')\nax1.set_xlabel('iteration')\nax1.set_ylabel('train loss')\nax2.set_ylabel('test accuracy')\nax2.set_title('Test Accuracy: {:.2f}'.format(test_acc[-1]))", "The loss seems to have dropped quickly and coverged (except for stochasticity), while the accuracy rose correspondingly. Hooray!\n\nSince we saved the results on the first test batch, we can watch how our prediction scores evolved. We'll plot time on the $x$ axis and each possible label on the $y$, with lightness indicating confidence.", "for i in range(8):\n figure(figsize=(2, 2))\n imshow(solver.test_nets[0].blobs['data'].data[i, 0], cmap='gray')\n figure(figsize=(10, 2))\n imshow(output[:50, i].T, interpolation='nearest', cmap='gray')\n xlabel('iteration')\n ylabel('label')", "We started with little idea about any of these digits, and ended up with correct classifications for each. If you've been following along, you'll see the last digit is the most difficult, a slanted \"9\" that's (understandably) most confused with \"4\".\n\nNote that these are the \"raw\" output scores rather than the softmax-computed probability vectors. The latter, shown below, make it easier to see the confidence of our net (but harder to see the scores for less likely digits).", "for i in range(8):\n figure(figsize=(2, 2))\n imshow(solver.test_nets[0].blobs['data'].data[i, 0], cmap='gray')\n figure(figsize=(10, 2))\n imshow(exp(output[:50, i].T) / exp(output[:50, i].T).sum(0), interpolation='nearest', cmap='gray')\n xlabel('iteration')\n ylabel('label')", "6. Experiment with architecture and optimization\nNow that we've defined, trained, and tested LeNet there are many possible next steps:\n\nDefine new architectures for comparison\nTune optimization by setting base_lr and the like or simply training longer\nSwitching the solver type from SGD to an adaptive method like AdaDelta or Adam\n\nFeel free to explore these directions by editing the all-in-one example that follows.\nLook for \"EDIT HERE\" comments for suggested choice points.\nBy default this defines a simple linear classifier as a baseline.\nIn case your coffee hasn't kicked in and you'd like inspiration, try out\n\nSwitch the nonlinearity from ReLU to ELU or a saturing nonlinearity like Sigmoid\nStack more fully connected and nonlinear layers\nSearch over learning rate 10x at a time (trying 0.1 and 0.001)\nSwitch the solver type to Adam (this adaptive solver type should be less sensitive to hyperparameters, but no guarantees...)\nSolve for longer by setting niter higher (to 500 or 1,000 for instance) to better show training differences", "train_net_path = 'mnist/custom_auto_train.prototxt'\ntest_net_path = 'mnist/custom_auto_test.prototxt'\nsolver_config_path = 'mnist/custom_auto_solver.prototxt'\n\n### define net\ndef custom_net(lmdb, batch_size):\n # define your own net!\n n = caffe.NetSpec()\n \n # keep this data layer for all networks\n n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,\n transform_param=dict(scale=1./255), ntop=2)\n \n # EDIT HERE to try different networks\n # this single layer defines a simple linear classifier\n # (in particular this defines a multiway logistic regression)\n n.score = L.InnerProduct(n.data, num_output=10, weight_filler=dict(type='xavier'))\n \n # EDIT HERE this is the LeNet variant we have already tried\n # n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))\n # n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)\n # n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))\n # n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)\n # n.fc1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))\n # EDIT HERE consider L.ELU or L.Sigmoid for the nonlinearity\n # n.relu1 = L.ReLU(n.fc1, in_place=True)\n # n.score = L.InnerProduct(n.fc1, num_output=10, weight_filler=dict(type='xavier'))\n \n # keep this loss layer for all networks\n n.loss = L.SoftmaxWithLoss(n.score, n.label)\n \n return n.to_proto()\n\nwith open(train_net_path, 'w') as f:\n f.write(str(custom_net('mnist/mnist_train_lmdb', 64))) \nwith open(test_net_path, 'w') as f:\n f.write(str(custom_net('mnist/mnist_test_lmdb', 100)))\n\n### define solver\nfrom caffe.proto import caffe_pb2\ns = caffe_pb2.SolverParameter()\n\n# Set a seed for reproducible experiments:\n# this controls for randomization in training.\ns.random_seed = 0xCAFFE\n\n# Specify locations of the train and (maybe) test networks.\ns.train_net = train_net_path\ns.test_net.append(test_net_path)\ns.test_interval = 500 # Test after every 500 training iterations.\ns.test_iter.append(100) # Test on 100 batches each time we test.\n\ns.max_iter = 10000 # no. of times to update the net (training iterations)\n \n# EDIT HERE to try different solvers\n# solver types include \"SGD\", \"Adam\", and \"Nesterov\" among others.\ns.type = \"SGD\"\n\n# Set the initial learning rate for SGD.\ns.base_lr = 0.01 # EDIT HERE to try different learning rates\n# Set momentum to accelerate learning by\n# taking weighted average of current and previous updates.\ns.momentum = 0.9\n# Set weight decay to regularize and prevent overfitting\ns.weight_decay = 5e-4\n\n# Set `lr_policy` to define how the learning rate changes during training.\n# This is the same policy as our default LeNet.\ns.lr_policy = 'inv'\ns.gamma = 0.0001\ns.power = 0.75\n# EDIT HERE to try the fixed rate (and compare with adaptive solvers)\n# `fixed` is the simplest policy that keeps the learning rate constant.\n# s.lr_policy = 'fixed'\n\n# Display the current training loss and accuracy every 1000 iterations.\ns.display = 1000\n\n# Snapshots are files used to store networks we've trained.\n# We'll snapshot every 5K iterations -- twice during training.\ns.snapshot = 5000\ns.snapshot_prefix = 'mnist/custom_net'\n\n# Train on the GPU\ns.solver_mode = caffe_pb2.SolverParameter.GPU\n\n# Write the solver to a temporary file and return its filename.\nwith open(solver_config_path, 'w') as f:\n f.write(str(s))\n\n### load the solver and create train and test nets\nsolver = None # ignore this workaround for lmdb data (can't instantiate two solvers on the same data)\nsolver = caffe.get_solver(solver_config_path)\n\n### solve\nniter = 250 # EDIT HERE increase to train for longer\ntest_interval = niter / 10\n# losses will also be stored in the log\ntrain_loss = zeros(niter)\ntest_acc = zeros(int(np.ceil(niter / test_interval)))\n\n# the main solver loop\nfor it in range(niter):\n solver.step(1) # SGD by Caffe\n \n # store the train loss\n train_loss[it] = solver.net.blobs['loss'].data\n \n # run a full test every so often\n # (Caffe can also do this for us and write to a log, but we show here\n # how to do it directly in Python, where more complicated things are easier.)\n if it % test_interval == 0:\n print 'Iteration', it, 'testing...'\n correct = 0\n for test_it in range(100):\n solver.test_nets[0].forward()\n correct += sum(solver.test_nets[0].blobs['score'].data.argmax(1)\n == solver.test_nets[0].blobs['label'].data)\n test_acc[it // test_interval] = correct / 1e4\n\n_, ax1 = subplots()\nax2 = ax1.twinx()\nax1.plot(arange(niter), train_loss)\nax2.plot(test_interval * arange(len(test_acc)), test_acc, 'r')\nax1.set_xlabel('iteration')\nax1.set_ylabel('train loss')\nax2.set_ylabel('test accuracy')\nax2.set_title('Custom Test Accuracy: {:.2f}'.format(test_acc[-1]))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
SJSlavin/phys202-2015-work
assignments/assignment09/IntegrationEx01.ipynb
mit
[ "Integration Exercise 1\nImports", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import integrate", "Trapezoidal rule\nThe trapezoidal rule generates a numerical approximation to the 1d integral:\n$$ I(a,b) = \\int_a^b f(x) dx $$\nby dividing the interval $[a,b]$ into $N$ subdivisions of length $h$:\n$$ h = (b-a)/N $$\nNote that this means the function will be evaluated at $N+1$ points on $[a,b]$. The main idea of the trapezoidal rule is that the function is approximated by a straight line between each of these points.\nWrite a function trapz(f, a, b, N) that performs trapezoidal rule on the function f over the interval $[a,b]$ with N subdivisions (N+1 points).", "def trapz(f, a, b, N):\n \"\"\"Integrate the function f(x) over the range [a,b] with N points.\"\"\"\n h = (b-a)/N\n xvals = np.linspace(a, b, N+1)\n yvals = f(xvals)\n \n return 0.5 * np.sum((h*yvals[0], h*yvals[-1], 2*h*np.sum(yvals[1:-1])))\n\nf = lambda x: x**2\ng = lambda x: np.sin(x)\n\nI = trapz(f, 0, 1, 1000)\nassert np.allclose(I, 0.33333349999999995)\nJ = trapz(g, 0, np.pi, 1000)\nassert np.allclose(J, 1.9999983550656628)", "Now use scipy.integrate.quad to integrate the f and g functions and see how the result compares with your trapz function. Print the results and errors.", "# YOUR CODE HERE \niq_f, err_g = integrate.quad(f, 0, 1)\ntr_f = trapz(f, 0, 1, 1000)\nprint(iq_f, err_g)\nprint(tr_f)\nprint()\n\niq_g, err_g = integrate.quad(g, 0, np.pi)\ntr_g = trapz(g, 0, np.pi, 1000)\nprint(iq_g, err_g)\nprint(tr_g)\n\n\nassert True # leave this cell to grade the previous one" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
bocklund/notebooks
pycalphad/Plot activity.ipynb
mit
[ "Calculate activity with pycalphad\nSet up the database", "from pycalphad import Database, equilibrium, variables as v\nimport numpy as np\n\n# Database from Zhou, C., Guo, C., Li, C. & Du, Z. Thermodynamic assessment of the phase equilibria and prediction of glass-forming ability of the Al–Cu–Zr system. J. Non. Cryst. Solids 461, 47–60 (2017).\ndbf = Database('Al-Cu-Zr_Zhou.TDB') \n\ncomps = ['CU', 'ZR', 'VA']\n# There are lots of phases in Al-Cu-Zr, but we only want the ones that describe Cu-Zr.\n# Filter out the phases that don't have any of the comps in a sublattice\nphases = [phase for phase in dbf.phases.keys() \n if all(len(set(comps).intersection(subl)) > 0 for subl in dbf.phases[phase].constituents)] ", "Activity data\nActivties are digtized from Fig 5 in Zaitsev, A. I., Zaitseva, N. E., Alexeeva, J. P., Dunaev, S. F. & Nechaev, Y. S. Thermodynamics and amorphization of the copper–zirconium alloys. Phys. Chem. Chem. Phys. 5, 4185–4196 (2003).\nThe measurements at at 1623 K and the reference is the pure components at that temperatures.", "temperature = 1623\nexp_acr_cu = [1.00000, 0.94445795, 0.8853034, 0.81050104, 0.76030916, 0.71867985, 0.6638515, 0.6107617, 0.5542213, 0.50262356, 0.44120756, 0.3798061, 0.32600257, 0.26583955, 0.20161846, 0.15157665, 0.119181104, 0.09424159, 0.066696145, 0.045932073, 0.044422694, 0.044210993, 0.04480312, 0.044530697, 0.02343831, 0]\nexp_x_zr = [0, 0.050927155, 0.10347285, 0.14980622, 0.18914688, 0.21043298, 0.24472165, 0.27006668, 0.30789167, 0.33137769, 0.36974367, 0.40512633, 0.44650444, 0.48860443, 0.53740174, 0.58942246, 0.62883025, 0.65987545, 0.70396316, 0.74770355, 0.7531053, 0.7965503, 0.8489486, 0.94833225, 0.97249275, 1.0]", "Calculate the reference state\nYou could also do this in the normal equilibrium calculation and select out the point you want (as long as it is in the calculation), but it might be clear to be more explicit.", "ref_eq = equilibrium(dbf, comps, phases, {v.P: 101325, v.T: temperature, v.X('ZR'): 0})", "Calculate the equilibria\nDo the calculation over the compoisition range", "eq = equilibrium(dbf, comps, phases, {v.P: 1013325, v.T: temperature, v.X('ZR'): (0, 1, 0.005)})", "Get the chemical potentials\nWe need to select the chemical potentials from the xarray", "chempot_ref = ref_eq.MU.sel(component='CU').squeeze()\nchempot = eq.MU.sel(component='CU').squeeze()", "Calculate activity using\n$ \\mu_i = \\mu_i^0 + RT\\ln a_i$\nrearranged to \n$ a_i = \\exp \\left(\\frac{\\mu_i - \\mu_i^0}{RT}\\right) $", "acr_cu = np.exp((chempot - chempot_ref)/(8.315*temperature))", "Plot the result", "%matplotlib inline\nimport matplotlib.pyplot as plt\nplt.plot(eq.X.sel(component='ZR', vertex=0).squeeze(), acr_cu)\nplt.scatter(exp_x_zr, exp_acr_cu)\nplt.xlabel('X(ZR)')\nplt.ylabel('a(CU)')\nplt.title('Activity of Cu at 1673K')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
rhiever/scipy_2015_sklearn_tutorial
notebooks/04.2 Model Complexity and GridSearchCV.ipynb
cc0-1.0
[ "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt", "Parameter selection, Validation & Testing\nMost models have parameters that influence how complex a model they can learn. Remember using KNeighborsRegressor.\nIf we change the number of neighbors we consider, we get a smoother and smoother prediction:", "from figures import plot_kneighbors_regularization\nplot_kneighbors_regularization()", "In the above figure, we see fits for three different values of n_neighbors.\nFor n_neighbors=2, the data is overfit, the model is too flexible and can adjust too much to the noise in the training data. For n_neighbors=20, the model is not flexible enough, and can not model the variation in the data appropriately.\nIn the middle, for n_neighbors = 5, we have found a good mid-point. It fits\nthe data fairly well, and does not suffer from the overfit or underfit\nproblems seen in the figures on either side. What we would like is a\nway to quantitatively identify overfit and underfit, and optimize the\nhyperparameters (in this case, the polynomial degree d) in order to\ndetermine the best algorithm.\nWe trade off remembering too much about the particularities and noise of the training data vs. not modeling enough of the variability. This is a trade-off that needs to be made in basically every machine learning application and is a central concept, called bias-variance-tradeoff or \"overfitting vs underfitting\".\n<img src=\"figures/overfitting_underfitting_cartoon.svg\" width=\"100%\">\nHyperparameters, Over-fitting, and Under-fitting\nUnfortunately, there is no general rule how to find the sweet spot, and so machine learning practitioners have to find the best trade-off of model-complexity and generalization by trying several parameter settings.\nMost commonly this is done using a brute force search, for example over multiple values of n_neighbors:", "from sklearn.cross_validation import cross_val_score, KFold\nfrom sklearn.neighbors import KNeighborsRegressor\n# generate toy dataset:\nx = np.linspace(-3, 3, 100)\nrng = np.random.RandomState(42)\ny = np.sin(4 * x) + x + rng.normal(size=len(x))\nX = x[:, np.newaxis]\n\ncv = KFold(n=len(x), shuffle=True)\n\n# for each parameter setting do cross_validation:\nfor n_neighbors in [1, 3, 5, 10, 20]:\n scores = cross_val_score(KNeighborsRegressor(n_neighbors=n_neighbors), X, y, cv=cv)\n print(\"n_neighbors: %d, average score: %f\" % (n_neighbors, np.mean(scores)))", "There is a function in scikit-learn, called validation_plot to reproduce the cartoon figure above. It plots one parameter, such as the number of neighbors, against training and validation error (using cross-validation):", "from sklearn.learning_curve import validation_curve\nn_neighbors = [1, 3, 5, 10, 20, 50]\ntrain_errors, test_errors = validation_curve(KNeighborsRegressor(), X, y, param_name=\"n_neighbors\",\n param_range=n_neighbors, cv=cv)\nplt.plot(n_neighbors, train_errors.mean(axis=1), label=\"train error\")\nplt.plot(n_neighbors, test_errors.mean(axis=1), label=\"test error\")\nplt.legend(loc=\"best\")", "Note that many neighbors mean a \"smooth\" or \"simple\" model, so the plot is the mirror image of the diagram above.\nIf multiple parameters are important, like the parameters C and gamma in an SVM (more about that later), all possible combinations are tried:", "from sklearn.cross_validation import cross_val_score, KFold\nfrom sklearn.svm import SVR\n\n# each parameter setting do cross_validation:\nfor C in [0.001, 0.01, 0.1, 1, 10]:\n for gamma in [0.001, 0.01, 0.1, 1]:\n scores = cross_val_score(SVR(C=C, gamma=gamma), X, y, cv=cv)\n print(\"C: %f, gamma: %f, average score: %f\" % (C, gamma, np.mean(scores)))", "As this is such a very common pattern, there is a built-in class for this in scikit-learn, GridSearchCV. GridSearchCV takes a dictionary that describes the parameters that should be tried and a model to train.\nThe grid of parameters is defined as a dictionary, where the keys are the parameters and the values are the settings to be tested.", "from sklearn.grid_search import GridSearchCV\nparam_grid = {'C': [0.001, 0.01, 0.1, 1, 10], 'gamma': [0.001, 0.01, 0.1, 1]}\n\ngrid = GridSearchCV(SVR(), param_grid=param_grid, cv=cv, verbose=3)", "One of the great things about GridSearchCV is that it is a meta-estimator. It takes an estimator like SVR above, and creates a new estimator, that behaves exactly the same - in this case, like a regressor.\nSo we can call fit on it, to train it:", "grid.fit(X, y)", "What fit does is a bit more involved then what we did above. First, it runs the same loop with cross-validation, to find the best parameter combination.\nOnce it has the best combination, it runs fit again on all data passed to fit (without cross-validation), to built a single new model using the best parameter setting.\nThen, as with all models, we can use predict or score:", "grid.predict(X)", "You can inspect the best parameters found by GridSearchCV in the best_params_ attribute, and the best score in the best_score_ attribute:", "print(grid.best_score_)\n\nprint(grid.best_params_)", "There is a problem with using this score for evaluation, however. You might be making what is called a multiple hypothesis testing error. If you try very many parameter settings, some of them will work better just by chance, and the score that you obtained might not reflect how your model would perform on new unseen data.\nTherefore, it is good to split off a separate test-set before performing grid-search. This pattern can be seen as a training-validation-test split, and is common in machine learning:\n<img src=\"figures/grid_search_cross_validation.svg\" width=\"100%\">\nWe can do this very easily by splitting of some test data using train_test_split, training GridSearchCV on the training set, and applying the score method to the test set:", "from sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n\nparam_grid = {'C': [0.001, 0.01, 0.1, 1, 10], 'gamma': [0.001, 0.01, 0.1, 1]}\ncv = KFold(n=len(X_train), n_folds=10, shuffle=True)\n\ngrid = GridSearchCV(SVR(), param_grid=param_grid, cv=cv)\n\ngrid.fit(X_train, y_train)\ngrid.score(X_test, y_test)", "Some practitioners go for an easier scheme, splitting the data simply into three parts, training, validation and testing. This is a possible alternative if your training set is very large, or it is infeasible to train many models using cross-validation because training a model takes very long.\nYou can do this with scikit-learn for example by splitting of a test-set and then applying GridSearchCV with ShuffleSplit cross-validation with a single iteration:\n<img src=\"figures/train_validation_test2.svg\" width=\"100%\">", "from sklearn.cross_validation import train_test_split, ShuffleSplit\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n\nparam_grid = {'C': [0.001, 0.01, 0.1, 1, 10], 'gamma': [0.001, 0.01, 0.1, 1]}\nsingle_split_cv = ShuffleSplit(len(X_train), 1)\n\ngrid = GridSearchCV(SVR(), param_grid=param_grid, cv=single_split_cv, verbose=3)\n\ngrid.fit(X_train, y_train)\ngrid.score(X_test, y_test)", "This is much faster, but will likely result worse hyperparameters and therefore worse results.", "clf = GridSearchCV(SVR(), param_grid=param_grid)\ncross_val_score(clf, X, y)" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
hmenke/espresso
doc/tutorials/02-charged_system/02-charged_system-1.ipynb
gpl-3.0
[ "Tutorial 2: A Simple Charged System, Part 1\n1 Introduction\nThis tutorial introduces some of the basic features of ESPResSo for charged systems by constructing a simulation script for a simple salt crystal. In the subsequent task, we use a more realistic force-field for a NaCl crystal. Finally, we introduce constraints and 2D-Electrostatics to simulate a molten salt in a parallel plate capacitor. We assume that the reader is familiar with the basic concepts of Python and MD simulations. Compile espresso with the following features in your myconfig.hpp to be set throughout the whole tutorial:\n```\ndefine EXTERNAL_FORCES\ndefine MASS\ndefine ELECTROSTATICS\ndefine LENNARD_JONES\n```\n2 Basic Set Up\nThe script for the tutorial can be found in your build directory at <tt>/doc/tutorials/02-charged_system/scripts/nacl.py</tt>.\nWe start with importing numpy, pyplot, and the espressomd features and setting up all\nthe relevant simulation parameters in one place:", "from __future__ import print_function\nfrom espressomd import System, electrostatics, features\nimport espressomd\nimport numpy\nimport matplotlib.pyplot as plt\nplt.ion()\n\n# Print enabled features\nrequired_features = [\"EXTERNAL_FORCES\", \"MASS\", \"ELECTROSTATICS\", \"LENNARD_JONES\"]\nespressomd.assert_features(required_features)\nprint(espressomd.features())\n\n# System Parameters\nn_part = 200\nn_ionpairs = n_part/2\ndensity = 0.5\ntime_step = 0.01\ntemp = 1.0\ngamma = 1.0\nl_bjerrum = 7.0\n\nnum_steps_equilibration = 1000\nnum_configs = 500\ninteg_steps_per_config = 1000\n\n# Particle Parameters\ntypes = {\"Anion\": 0, \"Cation\": 1}\nnumbers = {\"Anion\": n_ionpairs, \"Cation\": n_ionpairs}\ncharges = {\"Anion\": -1.0, \"Cation\": 1.0}\nlj_sigmas = {\"Anion\": 1.0, \"Cation\": 1.0}\nlj_epsilons = {\"Anion\": 1.0, \"Cation\": 1.0}\n\nWCA_cut = 2.**(1. / 6.)\nlj_cuts = {\"Anion\": WCA_cut * lj_sigmas[\"Anion\"], \n \"Cation\": WCA_cut * lj_sigmas[\"Cation\"]}\n", "These variables do not change anything in the simulation engine, but\nare just standard Python variables. They are used to increase the\nreadability and flexibility of the script. The box length is not a\nparameter of this simulation, it is calculated from the number of\nparticles and the system density. This allows to change the parameters\nlater easily, e.g. to simulate a bigger system.\nWe use dictionaries for all particle related parameters, which is less error-prone and\nreadable as we will see later when we actually need the values. The parameters here define a purely repulsive, \nequally sized, monovalent salt.\nThe simulation engine itself is modified by changing the\nespressomd.System() properties. We create an instance <tt>system</tt> and\nset the box length, periodicity and time step. The skin depth <tt>skin</tt> \nis a parameter for the link--cell system which tunes its\nperformance, but shall not be discussed here. Further, we activate the Langevin thermostat\nfor our NVT ensemble with temperature <tt>temp</tt> and friction coefficient <tt>gamma</tt>.", "# Setup System\nbox_l = (n_part / density)**(1. / 3.)\nsystem = System(box_l = [box_l, box_l, box_l])\nsystem.seed=42\nsystem.periodicity = [1, 1, 1]\nsystem.time_step = time_step\nsystem.cell_system.skin = 0.3\nsystem.thermostat.set_langevin(kT=temp, gamma=gamma)", "We now fill this simulation box with particles at random positions, using type and charge from our dictionaries.\nUsing the length of the particle list <tt>system.part</tt> for the id, we make sure that our particles are numbered consecutively.\nThe particle type is used to link non-bonded interactions to a certain group of particles.", "for i in range(int(n_ionpairs)):\n system.part.add(\n id=len(system.part), \n type=types[\"Anion\"], \n pos=numpy.random.random(3) * box_l, \n q=charges[\"Anion\"])\nfor i in range(int(n_ionpairs)):\n system.part.add(\n id=len(system.part), \n type=types[\"Cation\"], \n pos=numpy.random.random(3) * box_l, \n q=charges[\"Cation\"])", "Before we can really start the simulation, we have to specify the\ninteractions between our particles. We already defined the Lennard-Jones parameters at the beginning,\nwhat is left is to specify the combination rule and to iterate over particle type pairs. For simplicity, \nwe implement only the Lorentz-Berthelot rules. \nWe pass our interaction pair to <tt>system.non_bonded_inter[*,*]</tt> and set the \npre-calculated LJ parameters <tt>epsilon</tt>, <tt>sigma</tt> and <tt>cutoff</tt>. With <tt>shift=\"auto\"</tt>,\nwe shift the interaction potential to the cutoff so that $U_\\mathrm{LJ}(r_\\mathrm{cutoff})=0$.", "def combination_rule_epsilon(rule, eps1, eps2):\n if rule==\"Lorentz\":\n return (eps1*eps2)**0.5\n else:\n return ValueError(\"No combination rule defined\")\n\ndef combination_rule_sigma(rule, sig1, sig2):\n if rule==\"Berthelot\":\n return (sig1+sig2)*0.5\n else:\n return ValueError(\"No combination rule defined\")\n\n# Lennard-Jones interactions parameters \nfor s in [[\"Anion\", \"Cation\"], [\"Anion\", \"Anion\"], [\"Cation\", \"Cation\"]]:\n lj_sig = combination_rule_sigma(\"Berthelot\",lj_sigmas[s[0]], lj_sigmas[s[1]])\n lj_cut = combination_rule_sigma(\"Berthelot\", lj_cuts[s[0]], lj_cuts[s[1]])\n lj_eps = combination_rule_epsilon(\"Lorentz\", lj_epsilons[s[0]],lj_epsilons[s[1]])\n\n system.non_bonded_inter[types[s[0]], types[s[1]]].lennard_jones.set_params(\n epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift=\"auto\")", "3 Equilibration\nWith randomly positioned particles, we most likely have huge overlap and the strong repulsion will\ncause the simulation to crash. The next step in our script therefore is a suitable LJ equilibration.\nThis is known to be a tricky part of a simulation and several approaches exist to reduce the particle overlap.\nHere, we use a highly damped system (large gamma in the thermostat) and cap the forces of the LJ interaction.\nWe use <tt>system.analysis.mindist</tt> to get the minimal distance between all particles pairs. This value\nis used to progressively increase the force capping. This results in a slow increase of the force capping at\nstrong overlap. At the end, we reset our thermostat to the target values and deactivate the force cap by setting \nit to zero.", "# Lennard Jones Equilibration\nmax_sigma = max(lj_sigmas.values())\nmin_dist = 0.0\ncap = 10.0\n# Warmup Helper: Cold, highly damped system\nsystem.thermostat.set_langevin(kT=temp*0.1, gamma=gamma*50.0)\n\nwhile min_dist < max_sigma:\n #Warmup Helper: Cap max. force, increase slowly for overlapping particles\n min_dist = system.analysis.min_dist([types[\"Anion\"],types[\"Cation\"]],[types[\"Anion\"],types[\"Cation\"]])\n cap += min_dist\n#print min_dist, cap\n system.force_cap=cap\n system.integrator.run(10)\n\n# Don't forget to reset thermostat, timestep and force cap\nsystem.thermostat.set_langevin(kT=temp, gamma=gamma)\nsystem.force_cap=0 ", "ESPResSo uses so-called <tt>actors</tt> for electrostatics, magnetostatics and hydrodynamics. This ensures that unphysical combinations of algorithms are\navoided, for example simultaneous usage of two electrostatic interactions.\nAdding an actor to the system also activates the method and calls necessary\ninitialization routines. Here, we define a P$^3$M object with parameters Bjerrum\nlength and rms force error . This automatically starts a\ntuning function which tries to find optimal parameters for P$^3$M and prints them\nto the screen:", "p3m = electrostatics.P3M(prefactor=l_bjerrum*temp, \n accuracy=1e-3)\nsystem.actors.add(p3m)", "Before the production part of the simulation, we do a quick temperature \nequilibration. For the output, we gather all energies with <tt>system.analysis.energy()</tt>, calculate the \"current\" temperature from the ideal part and print it to the screen along with the total and Coulomb energies. Note that for the ideal gas the temperature is given via $1/2 m \\sqrt{\\langle v^2 \\rangle}=3/2 k_BT$, where $\\langle \\cdot \\rangle$ denotes the ensemble average. Calculating some kind of \"current temperature\" via $T_\\text{cur}=\\frac{m}{3 k_B} \\sqrt{ v^2 }$ you do not obtain the temperature in the system. Only when averaging the squared velocities first one would obtain the temperature for the ideal gas. $T$ is a fixed quantity and does not fluctuate in the canonical ensemble.\nWe integrate for a certain amount of steps with <tt>system.integrator.run(100)</tt>.", "# Temperature Equilibration\nsystem.time = 0.0\nfor i in range(int(num_steps_equilibration/50)):\n energy = system.analysis.energy()\n temp_measured = energy['kinetic'] / ((3.0 / 2.0) * n_part)\n print(\"t={0:.1f}, E_total={1:.2f}, E_coulomb={2:.2f},T={3:.4f}\".format(system.time, energy['total'], \n energy['coulomb'], temp_measured), end='\\r')\n system.integrator.run(200)", "<figure>\n<img src='figures/salt.png' alt='missing' style=\"width: 300px;\"/>\n<center>\n<figcaption>Figure 1: VMD Snapshot of the Salt System</figcaption>\n</figure>\n\n4 Running the Simulation\nNow we can integrate the particle trajectories for a couple of time\nsteps. Our integration loop basically looks like the equilibration:", "# Integration\nsystem.time = 0.0\nfor i in range(num_configs):\n energy = system.analysis.energy()\n temp_measured = energy['kinetic'] / ((3.0 / 2.0) * n_part)\n print(\"t={0:.1f}, E_total={1:.2f}, E_coulomb={2:.2f}, T={3:.4f}\".format(system.time, energy['total'],\n energy['coulomb'], temp_measured), end='\\r')\n system.integrator.run(integ_steps_per_config)\n\n # Internally append particle configuration\n system.analysis.append()", "Additionally, we append all particle configurations in the core with <tt>system.analysis.append()</tt> for a very convenient analysis later on.\n5 Analysis\nNow, we want to calculate the averaged radial distribution functions\n$g_{++}(r)$ and $g_{+-}(r)$ with the <tt>rdf()</tt> command from <tt>system.analysis</tt>:", "# Analysis\n# Calculate the averaged rdfs\nrdf_bins = 100\nr_min = 0.0\nr_max = system.box_l[0]/2.0\nr,rdf_00 = system.analysis.rdf(rdf_type='<rdf>', \n type_list_a=[types[\"Anion\"]],\n type_list_b=[types[\"Anion\"]], \n r_min=r_min,\n r_max=r_max, \n r_bins=rdf_bins)\nr,rdf_01 = system.analysis.rdf(rdf_type='<rdf>',\n type_list_a=[types[\"Anion\"]],\n type_list_b=[types[\"Cation\"]], \n r_min=r_min, r_max=r_max, r_bins=rdf_bins)", "The shown <tt>rdf()</tt> commands return the radial distribution functions for\nequally and oppositely charged particles for specified radii and number of bins. \nIn this case, we calculate the averaged rdf of the stored\nconfigurations, denoted by the chevrons in <tt>rdf_type='$<\\mathrm{rdf}>$'</tt>. Using <tt>rdf_type='rdf'</tt> would simply calculate the rdf of the current particle\nconfiguration. The results are two NumPy arrays containing the $r$ and $g(r)$\nvalues. We can then write the data into a file with standard python output routines.", "with open('rdf.data', 'w') as rdf_fp:\n for i in range(rdf_bins):\n rdf_fp.write(\"%1.5e %1.5e %1.5e\\n\" % \n (r[i], rdf_00[i], rdf_01[i]))", "Finally we can plot the two radial distribution functions using pyplot.", "# Plot the distribution functions\nplt.figure(figsize=(10,6), dpi=80)\nplt.plot(r[:],rdf_00[:], label='$g(r)_{++}$')\nplt.plot(r[:],rdf_01[:], label='$g(r)_{+-}$')\nplt.xlabel('$r$', fontsize=20)\nplt.ylabel('$g(r)$', fontsize=20)\nplt.legend(fontsize=20)\nplt.show()", "6 Task - Real Units\nSo far, the system has arbitrary units and is not connected to any real physical system.\nSimulate a proper NaCl crystal with the force field parameter taken from:\nR. Fuentes-Azcatl and M. Barbosa, Sodium Chloride, NaCl/$\\epsilon$ : New Force Field, J. Phys. Chem. B, 2016, 120(9), pp 2460-2470\n| Ion | $q/\\mathrm{e}$ | $\\sigma/\\mathrm{\\mathring{A}}$ | $(\\epsilon/\\mathrm{k_B})/\\mathrm{K}$ | $m/\\mathrm{u}$ |\n| ------------- | ------------- |------------- |------------- |------------- |\n| Na | +1 | 2.52 | 17.44 | 22.99 |\n| Cl | -1 | 3.85 | 192.45 | 35.453 |\nUse the following system parameters:\n| Parameter | Value |\n| --- | --- |\n| Temperature | $298\\ \\mathrm{K}$ | \n| Fiction Coeff. | $ 10\\ \\mathrm{ps}^{-1}$ | \n| Density | $1.5736\\ \\mathrm{ \\mu \\mathring{A}}^{-3}$ |\n| Bjerrum Length (298 K) | $439.2\\ \\mathrm{\\mathring{A}}$ | \n| Time Step | $2\\ \\mathrm{fs}$ |\nTo make your life more easy, don't try to equilibrate randomly positioned particles,\nbut set them up in a crystal structure close to equilibrium. If you do it right,\nyou don't even need the Lennard-Jones equilibration. \nTo speed things up, don't go further than 1000 particles and use a P$^3$M accuracy of $10^{-2}$.\nYour RDF should look like the plot in figure 2. When you get stuck,\nyou can look at the solution script <tt>/doc/tutorials/02-charged_system/scripts/nacl_units.py</tt> (or <tt>nacl_units_vis.py</tt> with visualization).\n<figure>\n <img src='figures/nacl_units.jpg' alt='missing' style=\"width: 600px;\"/>\n <center>\n <figcaption>Figure 2: Snapshot and RDF of the parameterized NaCl crystal.</figcaption>\n</figure>" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
krischer/pyadjoint
doc/example_dataset.ipynb
bsd-3-clause
[ "Example Data Set used in Pyadjoint\nThis document illustrates where the example data used in Pyadjoint originates from. It uses a set of 3D synthetics from the Shakemovie project and the same event extraced from a 2 second Instaseis database with the AK135 Earth model. Thus we effectively compare the results of a 3D simulation including topography, ellipticity, ... versus a simulation on a 1D background model with a spherical Earth. We will compare data in a period band from 20 to 100 seconds.\nTo establish a more practical terminology, the Shakemovie seismograms will serve as our observed data, whereas the ones from Instaseis will be considered synthetics.\nSource and Receiver\nWe use an event from the GCMT catalog:\n```\nEvent name: 201411150231A\nCMT origin time: 2014-11-15T02:31:50.260000Z\nAssumed half duration: 8.2\nMw = 7.0 Scalar Moment = 4.71e+19\nLatitude: 1.97\nLongitude: 126.42\nDepth in km: 37.3\nExponent for moment tensor: 19 units: N-m\n Mrr Mtt Mpp Mrt Mrp Mtp\nCMT 3.970 -0.760 -3.210 0.173 -2.220 -1.970\n```\nrecorded at station SY.DBO (SY denotes the synthetic data network from the Shakemovie project):\nLatitude: 43.12, Longitude: -123.24, Elevation: 984.0 m\nSetup Variables\nSets up some values we'll need throughout this document.", "import obspy\nimport numpy as np\n\nevent_longitude = 126.42\nevent_latitude = 1.97\nevent_depth_in_km = 37.3\n\nstation_longitude = -123.24\nstation_latitude = 43.12\n\nmax_period = 100.0\nmin_period = 20.0\n\ncmt_time = obspy.UTCDateTime(2014, 11, 15, 2, 31, 50.26)\n\n# Desired properties after the data processing.\nsampling_rate = 1.0\nnpts = 3600", "Map of Source and Receiver", "import matplotlib.pyplot as plt\nplt.style.use(\"ggplot\")\nfrom mpl_toolkits.basemap import Basemap\n\nplt.figure(figsize=(12, 6))\n\n# Equal area mollweide projection.\nm = Basemap(projection=\"moll\", lon_0=180.0, resolution=\"c\")\nm.drawmapboundary(fill_color=\"#cccccc\")\nm.fillcontinents(color=\"white\", lake_color=\"#cccccc\", zorder=0)\n\nm.drawgreatcircle(event_longitude, event_latitude, station_longitude,\n station_latitude, lw=2, color=\"green\")\nm.scatter(event_longitude, event_latitude, color=\"red\", s=500, marker=\"*\",\n latlon=True, zorder=5)\nm.scatter(station_longitude, station_latitude, color=\"blue\", s=400, marker=\"v\",\n latlon=True, zorder=5)\nplt.show()", "Data\n\"Raw\" data.", "shakemovie_data = obspy.read(\"../src/pyadjoint/example_data/shakemovie_data.mseed\")\ninstaseis_data = obspy.read(\"../src/pyadjoint/example_data/instaseis_data.mseed\")\n\nprint(shakemovie_data)\nprint(instaseis_data)", "Data Processing\nBoth data and synthetics are processed to have similar spectral content and to ensure they are sampled at the same points in time. The processing applied is similar to the typical preprocessing workflow applied to data in full waveform inversions using adjoint techniques. This example lacks instrument removal as both data samples are synthetics.", "from obspy.signal.invsim import c_sac_taper\nfrom obspy.core.util.geodetics import gps2DistAzimuth\n\nf2 = 1.0 / max_period\nf3 = 1.0 / min_period\nf1 = 0.8 * f2\nf4 = 1.2 * f3\npre_filt = (f1, f2, f3, f4)\n\ndef process_function(st):\n st.detrend(\"linear\")\n st.detrend(\"demean\")\n st.taper(max_percentage=0.05, type=\"hann\")\n\n # Perform a frequency domain taper like during the response removal\n # just without an actual response...\n for tr in st:\n data = tr.data.astype(np.float64)\n\n # smart calculation of nfft dodging large primes\n from obspy.signal.util import _npts2nfft\n nfft = _npts2nfft(len(data))\n\n fy = 1.0 / (tr.stats.delta * 2.0)\n freqs = np.linspace(0, fy, nfft // 2 + 1)\n\n # Transform data to Frequency domain\n data = np.fft.rfft(data, n=nfft)\n data *= c_sac_taper(freqs, flimit=pre_filt)\n data[-1] = abs(data[-1]) + 0.0j\n # transform data back into the time domain\n data = np.fft.irfft(data)[0:len(data)]\n # assign processed data and store processing information\n tr.data = data\n\n st.detrend(\"linear\")\n st.detrend(\"demean\")\n st.taper(max_percentage=0.05, type=\"hann\")\n\n st.interpolate(sampling_rate=sampling_rate, starttime=cmt_time,\n npts=npts)\n\n _, baz, _ = gps2DistAzimuth(station_latitude, station_longitude,\n event_latitude, event_longitude)\n\n components = [tr.stats.channel[-1] for tr in st]\n if \"N\" in components and \"E\" in components:\n st.rotate(method=\"NE->RT\", back_azimuth=baz)\n\n return st\n\n# From now one we will refer to them as observed data and synthetics.\nobserved = process_function(shakemovie_data.copy())\nsynthetic = process_function(instaseis_data.copy())\n\nprint(observed)\nprint(synthetic)", "Data Plots\nWe first define a function to plot both data sets.", "from obspy.core.util import geodetics\nfrom obspy.taup import getTravelTimes\n\ndef plot_data(start=0, end=1.0 / sampling_rate * npts, show_tts=False):\n start, end = int(start), int(end)\n plt.figure(figsize=(12, 6))\n plt.subplot(311)\n\n obs_z = observed.select(component=\"Z\")[0]\n syn_z = synthetic.select(component=\"Z\")[0]\n obs_r = observed.select(component=\"R\")[0]\n syn_r = synthetic.select(component=\"R\")[0]\n obs_t = observed.select(component=\"T\")[0]\n syn_t = synthetic.select(component=\"T\")[0]\n \n y_range = [obs_z.data[start: end].min(), obs_z.data[start: end].max(),\n syn_z.data[start: end].min(), syn_z.data[start: end].max(),\n obs_r.data[start: end].min(), obs_r.data[start: end].max(),\n syn_r.data[start: end].min(), syn_r.data[start: end].max(),\n obs_t.data[start: end].min(), obs_t.data[start: end].max(),\n syn_t.data[start: end].min(), syn_t.data[start: end].max()]\n y_range = max(map(abs, y_range))\n y_range *= 1.1\n \n dist_in_deg = geodetics.locations2degrees(\n station_latitude, station_longitude,\n event_latitude, event_longitude)\n tts = getTravelTimes(dist_in_deg, event_depth_in_km, model=\"ak135\")\n x_range = end - start\n tts = [_i for _i in tts\n if (start + 0.05 * x_range) < _i[\"time\"] < (end - 0.05 * x_range)]\n \n def plot_tts():\n for _i, tt in enumerate(tts):\n f = 1 if _i % 2 else -1\n va = \"top\" if f is 1 else \"bottom\"\n plt.text(tt[\"time\"], f * y_range * 0.96, tt[\"phase_name\"],\n color=\"0.2\", ha=\"center\", va=va, weight=\"900\",\n fontsize=8)\n \n plt.plot(obs_z.times(), obs_z.data, color=\"black\", label=\"observed\")\n plt.plot(syn_z.times(), syn_z.data, color=\"red\", label=\"synthetic\")\n plt.legend(loc=\"lower left\")\n if show_tts:\n plot_tts()\n plt.xlim(start, end)\n plt.ylim(-y_range, y_range)\n plt.ylabel(\"Displacement in m\")\n plt.title(\"Vertical component\")\n\n\n plt.subplot(312)\n plt.plot(obs_r.times(), obs_r.data, color=\"black\", label=\"observed\")\n plt.plot(syn_r.times(), syn_r.data, color=\"red\", label=\"synthetic\")\n plt.legend(loc=\"lower left\")\n if show_tts:\n plot_tts()\n plt.xlim(start, end)\n plt.ylim(-y_range, y_range)\n plt.ylabel(\"Displacement in m\")\n plt.title(\"Radial component\")\n\n plt.subplot(313)\n\n plt.plot(obs_t.times(), obs_t.data, color=\"black\", label=\"observed\")\n plt.plot(syn_t.times(), syn_t.data, color=\"red\", label=\"synthetic\")\n plt.legend(loc=\"lower left\")\n if show_tts:\n plot_tts()\n plt.ylabel(\"Displacement in m\")\n plt.xlim(start, end)\n plt.ylim(-y_range, y_range)\n plt.xlabel(\"Seconds since event\")\n plt.title(\"Transverse component\")\n\n plt.tight_layout()\n\n plt.show();", "Plot of All Data", "plot_data()", "Plot of First Arrivals", "plot_data(700, 1200, show_tts=True)", "Plot of Some Later Arrivals", "plot_data(1400, 1900, show_tts=True)\n\nplot_data(2000, 3000, show_tts=True)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
jdnz/qml-rg
Tutorials/Python_for_Science.ipynb
gpl-3.0
[ "1. Introduction\nThe unification of numerical and linear algebra packages under the umbrella of numpy/scipy about a decade ago spawned a slew of scientific packages for Python. Ever since, the Python scientific ecosystem has been evolving rapidly and things often break backwards compatibility. We will use Matplotlib 2.0, Scikit-learn 0.18, and QuTiP 4.1 in this tutorial. Import everything except Seaborn.", "import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport sklearn\nimport qutip\nfrom skimage import io\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\nfrom qutip import destroy, basis, steadystate, expect, mcsolve, mesolve, \\\n thermal_dm, plot_fock_distribution, matrix_histogram, hinton, tensor\n%matplotlib inline\nprint(\"Matplotlib:\", matplotlib.__version__,\n \"\\nScikit-learn:\", sklearn.__version__,\n \"\\nQuTiP:\", qutip.__version__)", "Notice the line that starts with %. This is a 'magic command' specific to Jupyter. It ensures that images will be plotted inline, instead of popping up in a window. You can look at all magic commands by entering %quickref. Some are useful, although most of them are not. The magic commands are not part of Python, so calling them in a script will throw an error. Keep this in mind when you copy code from a notebook.\n2. Plotting\nMaking nice plots is an involved and painful task. A few paradigms emerged over the decades, each with its own advantages and disadvantages. We restict our attention to plotting functions and statistical data. R as a language for statistical processing is popular in no small part due to its plotting module, ggplot2, which implements a school of plotting called grammar of graphics. This is a declarative approach, subsequent statements added to the plot define the final looks. Python's main plotting module, a behemoth called Matplotlib, is object-oriented: the figure is made up of a hierarchy of classes, and the properties of these will decide the look of the final image. Matplotlib is complex and you will often get the impression that it was designed to make plotting simple things difficult, and its default settings do not produce the most beautiful plots on the planet. The module Seaborn wraps around Matplotlib, and for a restricted set of plot types, it makes your task very easy. It also changes the horrific defaults of Matplotlib, so importing will change the look of all of your plots, whether they were done with Matplotlib calls or with Seaborn. This is why we did not import it in the first cell of this notebook. Seaborn also makes plotting statistical data easy and it interoperates marvellously with the statistical package Pandas. Python, of course, also has a declarative plotting module; try Altair, and see this write-up why it is interesting in the first place. Here we will discuss only the object-oriented approach.\n2.1 Matplotlib\n2.1.1 The basics\nThis is the bare minimum you need to plot a function:", "x = np.linspace(0, 5, 10)\nplt.plot(x, x**2);", "We imported the module matplotlib.plot as plt, and we call a function of it called plot to plot the square function. You always plot discrete points: x is a numpy array containing ten points as a linear approximation between zero and five. On closer inspection, the curve is not smooth: this is because ten points are not enough for the illusion of smoothness. Let us add some more points, labels for the axes, and a title for the figure:", "x = np.linspace(0, 5, 100)\ny = x**2\nplt.plot(x, y)\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('The most exciting function ever, full stop.');", "The order in which you add the decorations to your figure does not matter. The figure is not actually created until you execute the cell. Actually, the execution of the cell just triggers the call of the function plt.show(), which instructs Matplotlib to draw the figure and display it. In a Python script, you would always call plt.show() manually. Let us plot the cube function too, and call plt.show() manually:", "x = np.linspace(0, 5, 100)\ny1 = x**2\ny2 = x**3\nplt.plot(x, y1)\nplt.plot(x, y2)\nplt.xlabel('x')\nplt.ylabel('y')\nplt.show()", "Notice the difference with this case:", "plt.plot(x, y1)\nplt.xlabel('x')\nplt.ylabel('y')\nplt.show()\nplt.xlabel('x')\nplt.ylabel('y')\nplt.plot(x, y2)\nplt.show()", "The plt.show() resets all settings, so for the second figure, you must set the axes again.\nInstead of showing the plot on the screen, you can write them to a file, which will also trigger Matplotlib to draw the figure. If you export it to PDF, it will be as scale-invariant as it can possibly be, so you can readily insert them in your LaTeX manuscripts.", "plt.plot(x, y1)\nplt.xlabel('x')\nplt.ylabel('y')\nplt.savefig(\"whatever.pdf\")\nplt.close()", "2.1.2 Object-oriented paradigm\nThe stuff that you see displayed is composed of a hierarchical structure of components. On the top level, it is an instance of the Figure class. This is what plt.plot() creates for you, with all the other underlying structures within; this function is for your convenience to avoid dealing with classes if you want a simple plot. The structures in the hierarchy include the area where you draw, which is technically called the Axes class. You may have more than one Axes if you have subplots or embedded plots. Axes than have x and y axes, which in turn have a scale, ticks, labels, and so on. If you have a single Axes class instantiated, like in the examples below, you can access and change most parts of the hierarchy like you did above with the x and y labels and the figure title. If you want to do anything non-trivial, you have to compose the figure and its components yourself. The examples in this section are mainly from this tutorial. Let us instantiate an object of the figure class, get an area to draw on, and plot the same thing:", "fig = plt.figure()\nax = fig.add_subplot(111)\nax.plot(x, y)\nax.set_xlabel('x')\nax.set_ylabel('y')\nplt.show()", "Armed with this knowledge, we can do inserts:", "fig = plt.figure()\naxes1 = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # main axes\naxes2 = fig.add_axes([0.2, 0.5, 0.4, 0.3]) # insert axes\n\n# main figure\naxes1.plot(x, y1, 'r')\naxes1.set_xlabel('x')\naxes1.set_ylabel('y')\naxes1.set_title('Square function in red')\n\n# insert\naxes2.plot(x, y2, 'b')\naxes2.set_xlabel('x')\naxes2.set_ylabel('y')\naxes2.set_title('Cube function in blue')\nplt.show()", "You can also do aribtrary grids of subplots. The function plt.subplots conveniently creates you the figure object and returns it to you along with the axes:", "fig, axes = plt.subplots(ncols=2)\ny = [y1, y2]\nlabels = [\"Square function\", \"Cube function\"]\ncolors = ['r', 'b']\nfor i, ax in enumerate(axes):\n ax.plot(x, y[i], colors[i])\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_title(labels[i]) \nfig.tight_layout()", "Matplotlib handles LaTeX reasonably well, just put things between $ signs. For instance, we can a fancy legend:", "fig, ax = plt.subplots()\nax.plot(x, y1, label=r\"$y = x^2$\")\nax.plot(x, y2, label=r\"$y = x^3$\")\nax.legend(loc=2) # upper left corner\nax.set_xlabel(r'$x$')\nax.set_ylabel(r'$y$')\nplt.show()", "You need the leading r in the strings to avoid some nastiness with backslashes.\nThe rest is all about exploring the parameter space. Here we manually create a grid (this is necessary if we mix 2D, 3D or polar coordinates), and plot a bunch of things that Matplotlib can do. For more examples, refer to the gallery.", "# Some new data will be necessary\nn = np.random.randn(100000)\nt = np.linspace(0, 2 * np.pi, 100)\nX, Y = np.meshgrid(t, t)\nZ = (2.7 - 2 * np.cos(Y) * np.cos(X) - 0.7 * np.cos(np.pi - 2*Y)).T\n\n# The actual plot\nfig = plt.figure(figsize=(12, 6))\naxes = [[],[]]\naxes[0].append(fig.add_subplot(2, 4, 1))\naxes[0][0].scatter(x, x + 0.25*np.random.randn(len(x)))\naxes[0][0].set_title(\"Scatter\")\naxes[0].append(fig.add_subplot(2, 4, 2))\naxes[0][1].step(x, y1, lw=2)\naxes[0][1].set_title(\"Step\")\naxes[0].append(fig.add_subplot(2, 4, 3))\naxes[0][2].bar(x, y1, align=\"center\", width=0.5, alpha=0.5)\naxes[0][2].set_title(\"Bar\")\naxes[0].append(fig.add_subplot(2, 4, 4))\naxes[0][3].fill_between(x, y1, y2, color=\"green\", alpha=0.5);\naxes[0][3].set_title(\"Fill between\");\naxes[1].append(fig.add_subplot(2, 4, 5))\naxes[1][0].hist(n, bins=100)\naxes[1][0].set_title(\"Histogram\")\naxes[1][0].set_xlim((min(n), max(n)))\naxes[1].append(fig.add_subplot(2, 4, 6))\np = axes[1][1].pcolor(X/(2*np.pi), Y/(2*np.pi), Z, cmap=matplotlib.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max())\naxes[1][1].set_title(\"Color map\")\nfig.colorbar(p, ax=axes[1][1])\naxes[1].append(fig.add_subplot(2, 4, 7, projection='3d'))\naxes[1][2].plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=matplotlib.cm.coolwarm, linewidth=0, antialiased=False)\naxes[1][2].set_title(\"Surface plot\")\naxes[1].append(fig.add_subplot(2, 4, 8, polar=True))\naxes[1][3].plot(t, t, color='blue', lw=3);\naxes[1][3].set_title(\"Polar coordinates\")\nfig.tight_layout()\nplt.show()", "Exercise 1. Create a three by three grid. Put Lluis Torner in the center. Surround him with aesthetically pleasing functions in the remaining subplots. Hint: io.imread(\"http://www.icfo.eu/images/static/director_LluisTorner.jpg\") will get you a photo of him, and the method imshow will plot it.\n2.2 Seaborn and Pandas\nSeaborn is primarily meant for statistical plotting, but it also improves the defaults of all Matplotlib figures.\n2.2.1 Side effect of importing it.\nWitness the magic of it:", "plt.plot(x, x**2)\nplt.show()\nimport seaborn as sns\nplt.plot(x, x**2)\nplt.show()", "Yes, importing a package should not have severe side effects. On the other hand, this is Python, not Haskell, so let us rejoice at this sheer opportunism.\n2.2.2 Add Pandas\nPandas turns Python into a competitor to R. It allows you to do a wide-scale of statistical operations, but even more importantly, it makes low-level data processing chores easy. Here we load the standard Iris dataset via Scikit-learn and convert it to a Pandas dataframe, which is the key data structure of the package.", "iris = load_iris()\niris = pd.DataFrame(data=np.c_[iris['data'], iris['target']],\n columns= iris['feature_names'] + ['target'])\niris.head()", "You can access individual columns by indexing with the name of the column:", "iris[\"sepal length (cm)\"].head()", "We will use seaborn for some basic visualization", "sns.jointplot(x=\"sepal length (cm)\", y=\"sepal width (cm)\", data=iris, size=5);", "Let us define an array with all the names of the features and plot their correlations.", "features = iris.columns.values[:-1]\nsns.pairplot(iris, vars=features, hue=\"target\", size=3);", "Exercise 2. Plot the histogram of all four features. First, instantiate a Matplotlib figure in a one by four grid, and then pass the matching axes to Seaborn's distplot function that draws the histograms. A figsize=(14, 4) is a recommended parameter to plt.subplots, otherwise the figure will be too squished. Use zip to iterate over the axes and the features simultaneously.\n3. Modelling\nPython is a comfortable environment for prototyping ideas, and since it also acts as a glue language, you will probably have most of the scaffolding in Python for your actual calculations too. Many libraries that are primarily meant for prototyping are actually efficient enough to cover decent real-world use cases, and your prototype can be your final model. This is especially the case if your numpy/scipy packages were compiled with an efficient BLAS/LAPACK implementation, which is exactly what Anaconda gives you with MKL built in. For machine learning prototypes, Scikit-learn is great: it covers most of the important algorithms of the non-deep learning type. For quantum physics, QuTiP is highly recommended: it is still actively developed, its efficiency is improving day by day, and it covers a wide range of problems, including functions related to quantum info and open quantum systems.\n3.1 Machine learning prototypes\nThis section is based on the examples in the book Python Machine Learning. \nAt the end of the day, most machine learning algorithms will output a function that we can use for any data instance. To characterize the learning algorithm, we can plot the decision boundaries that this function produces. The following helper function plots this decision function along the first two dimensions in the data set:", "def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):\n\n # setup marker generator and color map\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = matplotlib.colors.ListedColormap(colors[:len(np.unique(y))])\n\n # plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, \n c=cmap(idx), marker=markers[idx], label=cl)\n\n # highlight test samples\n if test_idx:\n # plot all samples\n X_test, y_test = X[test_idx, :], y[test_idx]\n\n plt.scatter(X_test[:, 0], X_test[:, 1], c='', alpha=1.0,\n linewidths=1, marker='o', s=55, label='test set')", "We reload the Iris data set:", "iris = load_iris()\nX = iris.data[:, [2, 3]]\ny = iris.target\nprint('Class labels:', np.unique(y))", "To avoid overfitting, we split the data set in a training and validation part. This is a static random split, not something you would use in 10x random cross-validation.", "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)", "We standardize the distribution of the features of the data set. Some kind of normalization or standardization is usually a good idea. Certain learning models work well with data vectors of norm 1, for instance. Here we choose standardization because the physical size parameters of the iris species actually follows a normal distribution.", "sc = StandardScaler()\nsc.fit(X_train)\nX_train_std = sc.transform(X_train)\nX_test_std = sc.transform(X_test)\nX_combined_std = np.vstack((X_train_std, X_test_std))\ny_combined = np.hstack((y_train, y_test))", "The dumbest model we can possibly train is a neural network of a single neuron, trained by stochastic gradient descent. Even this simple model misses only four instances:", "lr = LogisticRegression(C=1000.0, random_state=0)\nlr.fit(X_train_std, y_train)\nplot_decision_regions(X_combined_std, y_combined,\n classifier=lr, test_idx=range(105, 150))\nplt.xlabel('petal length [standardized]')\nplt.ylabel('petal width [standardized]')\nplt.legend(loc='upper left')\nplt.tight_layout()\nplt.show()", "The decision boundary is linear. \nNotice the C parameter in the instantiation of the logistic regression class. This is not a parameter, but a hyperparameter: the training algorithm (the same stochastic gradient descent as in the perceptron) does not optimize over it. It is our task to find a good value for it. The objective function we are optimizing for logistic regression is\n$$J(\\mathbf{w}) = \\sum_{i=1}^N\\left[-y_i \\log(\\phi(z_i))-(1-y_i)\\log(1-\\phi(z_i))\\right] + \\frac{1}{2C}||w||^2$$,\nwhere $z = w^\\top x$. By increasing the regularization term $\\frac{1}{2C}$, we get a sparser model. In our case, it means that fewer features will be factored in. We can plot this as follows:", "weights, params = [], []\nfor c in np.arange(-5, 5):\n lr = LogisticRegression(C=10.0**c, random_state=0)\n lr.fit(X_train_std, y_train)\n weights.append(lr.coef_[1])\n params.append(10.0**c)\nweights = np.array(weights)\nplt.plot(params, weights[:, 0], label='petal length')\nplt.plot(params, weights[:, 1], linestyle='--', label='petal width')\nplt.ylabel('weight coefficient')\nplt.xlabel('C')\nplt.legend(loc='upper left')\nplt.xscale('log')\nplt.show()", "This regularization term makes machine learning very different from statistics, at least as far as structural risk minimization goes. In general, sparser model will have better generalization properties, that is, they are less prone to overfitting. Since there is no explicit way to optimize over the hyperparameter, you typically do something like grid search.\nExercise 3. Study the decision boundary for the above ten choices of $C$.\n3.2 Quantum simulations\nWe take an example from the QuTiP documentation (Section 3.6.5). It is a system that reaches a steady state is a harmonic oscillator coupled to a thermal environment. The initial state is the $|10\\rangle$ number state, and it is weakly coupled to a thermal environment characterized by an average particle expectation value of $\\langle n\\rangle = 2$.", "N = 20\na = destroy(N)\nH = a.dag() * a\npsi0 = basis(N, 10) # initial state\nkappa = 0.1 # coupling to oscillator", "Next we define the collapse operators:", "n_th_a = 2 # temperature with average of 2 excitations\nrate = kappa * (1 + n_th_a)\nc_op_list = [np.sqrt(rate) * a] # decay operators\nrate = kappa * n_th_a\nc_op_list.append(np.sqrt(rate) * a.dag()) # excitation operators", "We calculate the steady state and the particle number in the steady state:", "final_state = steadystate(H, c_op_list)\nfexpt = expect(a.dag() * a, final_state)", "We calculate the time evolution over a hundred points with two methods: by the Monte Carlo method and by solving the master equation:", "tlist = np.linspace(0, 50, 100)\n# monte-carlo\nmcdata = mcsolve(H, psi0, tlist, c_op_list, [a.dag() * a], ntraj=100)\n# master eq.\nmedata = mesolve(H, psi0, tlist, c_op_list, [a.dag() * a])\n\nplt.plot(tlist, mcdata.expect[0], tlist, medata.expect[0], lw=2)\nplt.axhline(y=fexpt, color='r', lw=1.5)\nplt.ylim([0, 10])\nplt.xlabel('Time', fontsize=14)\nplt.ylabel('Number of excitations', fontsize=14)\nplt.legend(('Monte Carlo', 'Master Equation', 'Steady State'))\nplt.title('Decay of Fock state $\\left|10\\\\rangle\\\\right.$' + ' in a thermal environment with $\\langle n\\\\rangle=2$')\nplt.show()", "Exercise 4. Improve the Monte Carlo simulation to approximate the master equation closer. Typing mcsolve? will give you a detailed help on the parametrization of the solver.\nQuTiP has built-in functions to work with thermal states. Let us consider a state that is on average occupied by two photons:", "rho_thermal = thermal_dm(N, 2)\n\nfig, axes = plt.subplots(1, 3, figsize=(12,3))\naxes[0].matshow(rho_thermal.data.toarray().real)\naxes[0].set_title(\"Matrix plot\")\naxes[1].bar(np.arange(0, N)-.5, rho_thermal.diag())\naxes[1].set_xlim([-.5, N])\naxes[1].set_title(\"Diagonal\")\nplot_fock_distribution(rho_thermal, fig=fig, ax=axes[2])\naxes[2].set_title(\"Fock number distribution\")\nplt.show()", "Exercise 5. Create and study the maximally mixed state of dimension $N$. Here are three possible ways to do it:\n1. The Cheap Way: import the function maximally_mixed_dm from QuTiP.\n2. The Way of the Boson: use the previous function thermal_dm and increase the average particle number until you converge to the maximally mixed state.\n3. The Church of Nonlocality Way: trace out half of a maximally entangled state. For qubits, you could create a Bell pair with (tensor(basis(2, 0), basis(2, 0)) + tensor(basis(2, 1), basis(2, 1))).unit() and then trace out either party. If you generalize this to $N$ dimensions, you get the solution. Bonus points are awarded for simulating the Unruh effect.\n4. Writing good code\nThe ideal scientific code is written in good style, it is correct, and it is fast. Correctness is up to you to verify. Here we will mainly focus on style, with a few words on efficiency. Unfortunately, Jupyter does not give you tools to cultivate good habits in writing Python. For that, you have to go to Spyder (or to another advanced editor). \nFirst and foremost, you should follow the PEP8 recommendations. They are, of course, not easy to keep in mind. Open Spyder and go to Tools->Preferences. Then, under Editor, choose the tab Code Introspection/Analysis. Tick the Real-time code style analysis box. By enabling this, you will see yellow warnings every time you violate PEP8 in a line, and mousing over will tell you what the problem is.\nThe next stage is linting. Linting is a form of static code analysis that is able to point out potential problems and errors, as well as giving you further style advice. It static in the sense that it does not run your code to find errors. In Spyder, press F8 to run the analysis. It will give you a numerical score of how good your code is, ranging from $-\\infty$ to 10. The problems are sorted in categories. You should pay special attention to errors and warnings.\nExercise 6. Pick a homework solution and start fixing it up. Make five-six improvements. Run Pylint before and after.\nIf your code is pretty and you are sure that it is correct, then and only then you can start improving its run time. As Donald Knuth said, \"premature optimization is the root of all evil\", so do not waste any effort on speeding up your code before you know that the results are what to be expected (and your code is pretty).\nPython has a reputation for being slow, but it is actually not too bad, and the problem can often be circumvented. As a matter of fact, it has a built-in routine for timing your operations:", "a = [i for i in range(1000)]\nb = np.array(a)\n%timeit sum(a)\n%timeit sum(b)\n%timeit np.sum(a)\n%timeit np.sum(b)", "When you try to improve the speed of your code, you must ensure two things:\n\n\nYour code remains correct. Write some tests that verify the results. The more tests you have the better. For non-trivial projects, please make use of Python's excellent unit testing framework. Unit tests are your best friends, yet scientist seldom ever use them. In industry, you cannot possibly work on anything without providing unit tests.\n\n\nYou focus on the time-critical part. There is no point in improving the speed of a chunk of code that is executed once, taking up a second, as opposed to improving a function that gets called a hundred thousand times, taking a second each time. Finding the time critical parts is called profiling. Note that profiling is dynamic, that is, you must execute your code.\n\n\nSpyder has a built-in profiler. To run it on the current file, press F10. Make sure it is a file that executes fast. Then it gives you a hierarchical call structure telling you what was called and how often, and how long it took.\nExercise 7. Copy this to an empty file, profile it, and improve its run time.", "import numpy as np\n\n\ndef dummy_test_for_slow_function():\n \"\"\"Super-stupid test for the function\"\"\"\n np.testing.assert_almost_equal(slow_function(), 157687.67990470183)\n\n\ndef slow_function():\n \"\"\"This function could see a fair bit of improvement\"\"\"\n total = 0\n for i in range(1000):\n a = np.array([np.sqrt(j) for j in range(1000)])\n b = a/(i+1)\n total += sum(b)\n return total\n\n# We run the test\ndummy_test_for_slow_function()", "5. Open science\nOpen science is an umbrella term to label a set of initiatives that are meant to liberate science from the shackles of academic publishers and software licences, to provide access to research results to the very taxpayers who paid for it, to encourage networking and to do credible research with contemporary tools. Open access publishing, academic social networks, open source code repositories, and open access data all belong here, but here we limit ourselves to code and data.\nThings you want to achieve by making your code and data available:\n\nReproduceability of results\n\nAt all. Most numerical results are barely reproduceable, but usually they are not. Not even to the original authors.\n\n\nWith ease. If you have to struggle to compile something for a week just to start a calculation, that is not particularly attractive.\n\n\nDiscoverability.\n\n\nMore citations, which is the ultimate goal of being a scientist today. Points 1) and 2) should yield this.\n\n\nStages:\n\nALWAYS choose a license. Without a license, nobody will know what they can legally do or expect from your code. Technically speaking, without a license, code dumped online is not open source. When you create a new repository on GitHub, you are automatically offered a handful licenses. Unfortunately, this is only optional, but please choose one for every repo you create.\n\nWhen in doubt: GNU Public License Version 3 (GPLv3). This is a 'contagious' license: if somebody builds on your code, it must also be released and it must be released under GPL. This is a bad choice of license If you want your code to be used by commercial entities. In this case, a BSD or Apache-style license works better. On a related note, text is also always released under some form of a license. For instance, when you upload a manuscript to arXiv, you are forced to use a license. Open access publishers typically choose a Creative Commons license, which is as author-friendly as it gets. For texts that you publish online that are not manuscripts, you should also consider a Creative Commons license. Data is also typically released under a Creative Commons license.\n\n\nProvide decent documentation. Raw code without any code comments is not especially helpful. Not for others, and not for your future self.\n\n\nIf it is a one-off piece of code that does some calculation, spend an hour writing it up in a notebook, if you developed the code in a notebook-friendly language. If the helper functions are too long, you can collect those in a separate file that you import in the notebook. If a notebook is too much, write a gist that is decently commented. If your code is complex, includes many files, it is an entire module or package, or generally it is not notebook-friendly, then create a repository and add a readme file to tell what it does and how. Use Markdown format for the readme to have an easy way of formatting code within the document. The text parts of notebooks also use Markdown.\n\n\nWrite documentation for functions that are user-facing. Often a one-liner is enough, but you can make the extra effort of writing up the documentation for all arguments. In Julia, defining the types of the arguments and providing the documentation go hand-in-hand, establishing a solid best practice. In Python, you can embed tests in the function docstrings, which is insanely handy.\n\n\nIf the thing that you developed is a library, package, or module, your documentation would probably longer. Awesome tools like Sphinx for Python code and Doxygen for C++ help you write beautiful documentation that yields a PDF manual or HTML pages. There are $N+1$ places to host the result, e.g. ReadTheDocs, which also integrates neatly with GitHub: every time you release a new version, ReadTheDocs will automatically update the documentation to the latest variant. GitHub itself offers a way to host documentation via GitHub pages: create a branch in your repository called gh-pages, and the HTML files you put there will appear at https://yourusername.github.io/reponame.\n\n\nChoose a place of sharing. You want to ensure that your work is visible.\n\n\nIn the case of code, the go-to place is GitHub, so this is pretty straightforward. You can't get more visible than that, plus GitHub integrates with everything else under the sun, including things we already mentioned (e.g. ReadTheDocs) and things we did not (like continuous integration, project management, code quality, cloud deployment, and a hundred others; check GitHub Integrations). There is one distinct disadvantage that we should keep in mind, though: GitHub is a private entity. They are out to make money, and not to do charity to academics. They are based in the US, so they have no oversight on how much metadata they can harvest about you and how they use it. Furthermore, they can go bankrupt or change their business model, which might render your repos inaccessible. However, they cannot 'steal' your code, since it is released under a license of your choice.\n\n\nIn the case of data, the options are more scattered. My recommendation goes for Zenodo. It is funded by CERN and OpenAIRE, and it is hosted in CERN itself on the same infrastructure where the 25PBytes/year of data goes from LHC. The longevity of your data there is ensured, and your metadata stays in Europe.\n\n\nGet a DOI for code and data. Zenodo provides a DOI for everything you upload there. It also integrates with GitHub, so getting a DOI for code hosted on GitHub is $\\epsilon$ effort. If your code is a library, package, or module, perhaps you want to write up a paper on it and get a DOI for that. In my (PW) experience, it is not worth it. Your publication outlets are limited. In physics, you can go for the Journal of Computational Physics or Computer Physics Communications. Both have short review cycles and decent Impact Factors, but they are owned by the Beast (Elsevier). In machine learning, you have two open access outlets that have pretty good indicators, plus they are free to publish in. If you can make it in the Open Source Software track of the Journal of Machine Learning Research, that means a lot. The other option is the Journal of Statistical Software, where it took two years to review a manuscript, and in the year that passed since the acceptance decision, the page-proofs still did not arrive. For mathematical stuff, there is the ACM Transactions on Mathematical Software, which takes about a year to review a paper, and it is by the ACM, which is an organization that is even more retarded than the APS. An additional problem with all of these options is that you cannot really modify the library or the documentation you provided in the paper, whereas your library will probably evolve. So you might as well save yourself the trouble, get a DOI from Zenodo, and let the documentation and the links update automatically with each release on GitHub.\n\n\nExercise 8. For the next paper you write that includes even a single line of code for numerical or symbolic calculations, make a computational appendix on GitHub as a gist, as a notebook, or as a repository. You might as well include a link here by sending a pull request: since the incoming link count will increase to your code, it will rank higher in search engines, and your work will be a notch easier to discover." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
jsgreenwell/teaching-python
tutorial_files/presentations/list_comp_example.ipynb
mit
[ "Example of performing Vector mathmatical function using Python List structures\n\nVector methods to be created:\n* Sum vectors\n * Add vector elements of same sized vectors\n * Return resulting vector\n* Subtract vectors\n * Subtract vector elements of same sized vectors\n * Return resulting vector\n* Product of vectors\n * Product of components of vectors\n * Return resulting vector\n* Product of vector and scalar\n * Return scalar product of each element of vector\n* Mean of vectors\n * Sum Vector method / number of elements for each element (or 1/len scalar multiply)\n* Dot Product\n * Sum of component wise products\n * Multiply vectors\n * Sum vectors\n * Return resulting vector\n\nTeaching notes delete when finished\nRemember to explain that in the real world numpy and other libraries would be used to do this\n\nFor teaching list methods\nParticuliarly allows for a number of list comprehensions to be explained\nBasic Class definition and issues\nStart with just calling a definition directly (which will Error with a not found)\nShow how adding self.function_name() works and explain\nMove into using decorators\nStart with a vector with a small number of elements\nSo students can do calculations in their heads and follow along", "class vector_math:\n '''\n This is the base class for vector math - which allows for initialization with two vectors.\n '''\n \n def __init__(self, vectors = [[1,2,2],[3,4,3]]):\n self.vect1 = vectors[0]\n self.vect2 = vectors[1]\n \n def set_vects(self, vectors):\n self.vect1 = vectors[0]\n self.vect2 = vectors[1]\n \n def sum_vects(self):\n return [x + y for x, y in zip(self.vect1, self.vect2)]\n \n def sub_vects(self):\n # default should be [-2,-2,-1]\n return [x - y for x, y in zip(self.vect1, self.vect2)]\n # Can expand out to for x, y in zip: ... to show what it and sum do\n \n def multi_vects(self):\n #default should be [3,8,6]\n return [x * y for x, y in zip(self.vect1, self.vect2)]\n \n def multi_scalar(self, scalar, vect):\n return [e * scalar for e in vect]\n # Show difference between just element * number and using tuple from zip()\n \n def multi_scalar_l(self, scalar, vect):\n return lambda e: e * scalar, vect\n \n def mean_vects(self):\n mean_vect = self.sum_vects()\n return self.multi_scalar(1/len(mean_vect), mean_vect)\n \n def dot_product(self):\n return sum(self.multi_vects())\n \nvect = vector_math()\n\nsum_vect = vect.sum_vects()\nprint(\"Sum of vectors = {}\".format(sum_vect))\n\nprint(\"Subtraction of vectors = {}\".format(vect.sub_vects()))\nprint(\"Product of vectors = {}\".format(vect.multi_vects()))\nprint(\"Product of Sum of vectors and 2 = {}\\n\".format(vect.multi_scalar(2, sum_vect)))\n# Yep can still use character returns and others in format\n\nprint(\"Average of vectors = {}\".format([\"{:.2f}\".format(e) for e in vect.mean_vects()]))\n# Now there are other ways to reduce the decimal places but this was just to show a nested format call\n\n# TODO: Consider adding timeit to show difference between calling multi_scalar directly and calling mean_vect:\n#print(\"Average of vectors through calling scalar = {}\".format(\n# [\"{:.2f}\".format(e) for e in vect.multi_scalar(1/len(sum_vect), sum_vect)]))\n\nprint(\"The Dot Product is {}\".format(vect.dot_product()))", "Other vector operations that could be done", "from math import sqrt\n\n# Using the vect variables showing without functions\nsum_of_squares = sum([x * y for x, y in zip(vect.vect1, vect.vect1)])\nmagnitude = sqrt(sum_of_squares)\ndistance = sqrt(sum([(x - y) ** 2 for x, y in zip(vect.vect1, vect.vect2)]))\n\nprint(\"Sum of Squares is {}\".format(sum_of_squares))\nprint(\"Magnitude is {:.2f}\".format(magnitude))\nprint(\"Distance is {}\".format(distance))", "List Comprehensions are Powerful tools in Python\nExpect to see them throughout code one has to maintain but also understand they are not always the optimal solution\nWhen an iteration is needed to build a composite value, list comprehensions are considered the most readable or understandable way to achieve this. Loops may be used instead if one wants the \"side effect\" of an interation while functional tools may be used if optimization and code speed is important.\nFor instance, the above examples could also have been performed with an annoymous lambda or reduce, like:\ndef multi_scalar(self, vect, scalar):\n return lambda e: e * scalar, vect\n\nIn this case, the lambda would be faster by a minimal amount and actually have one less function call - which are expensive in Python. This is not always true as the need for an increasing amount of functional methods can change both the speed and amount of function call required. code example is below", "import dis\nimport time\n\n# For instruction - shows disassemble of methods and performs quick time check\n\nvect = [2,3,3,3,4,5,6,6,4,3,2,1,3,4,5,6,4,3,2,1,3,4,5,6,4,3,2]\n\nt1 = time.time()\n\nprint(\"list comp\")\ndis.dis(compile(\"[e * 2 for e in vect]\", '<stdin>', 'exec'))\n\nd_l = time.time() - t1\nprint(d_l)\n\nt2 = time.time()\n\nprint(\"\\n\\n\\nlambda\")\ndis.dis(compile(\"lambda e: e * 2, vect\", '<stdin>', 'exec'))\n\nd_lam = time.time() - t2\nprint(d_lam)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
VVard0g/ThreatHunter-Playbook
docs/notebooks/windows/05_defense_evasion/WIN-180719170510.ipynb
mit
[ "DLL Injection via CreateRemoteThread and LoadLibrary\nMetadata\n| Metadata | Value |\n|:------------------|:---|\n| collaborators | ['@Cyb3rWard0g', '@Cyb3rPandaH'] |\n| creation date | 2018/07/19 |\n| modification date | 2020/09/20 |\n| playbook related | [] |\nHypothesis\nAdversaries might be injecting a dll to another process to execute code via CreateRemoteThread and LoadLibrary functions.\nTechnical Context\nGet Handle to Target Processs\nThe malware first needs to target a process for injection (e.g. svchost.exe).\nThis is usually done by searching through processes by calling a trio of Application Program Interfaces (APIs) > CreateToolhelp32Snapshot, Process32First, and Process32Next.\nAfter finding the target process, the malware gets the handle of the target process by calling OpenProcess.\nThere are two processes involved in this attack > your DLLInjector process (Process A), and the remote process you want to inject with a DLL (Process B).\nTo interact with the remote process, Process A must call OpenProcess() while passing the remote process process ID as an argument. OpenProcess will then return to Process A a Handle to Process B.\nHaving a Handle to the remote process allows Process A to interact with it in powerful ways. Process A can allocate memory, write memory, and create an execution thread in Process B by calling functions like VirtualAllocEx, WriteProcessMemory, and CreateRemoteThread and passing the Handle to Process B as an argument to those functions.\nGet address of the LoadLibraryA function\nKernel32.dll is loaded into every Windows process, and within it is a useful function called LoadLibrary.\nWhen LoadLibrary is called in a certain process, it maps a DLL into that process.\nLoadLibrary needs to know what DLL to load, so you need to provide it the path to the DLL on your system.\nLoadLibrary will then find the DLL at that path and load that DLL into memory for you.\nNote > LoadLibraryA is the function name. \"A\" means you provide the DLL path as an ASCII string.\nAllocate Memory for DLL\nWhy do we write the DLL path to Process B using VirtualAllocEx and then WriteRemoteMemory? This is because LoadLibrary needs to know what DLL you want to inject.\nThe string it accepts as a parameter needs to be present in Process B memory.\nThe malware calls VirtualAllocEx to have a space to write the path to its DLL.\nThe malware then calls WriteProcessMemory to write the path in the allocated memory.\nExecute Code\nFinally, to have the code executed in another process, the malware calls APIs such as CreateRemoteThread, NtCreateThreadEx, or RtlCreateUserThread.\nThe latter two are undocumented. However, the general idea is to pass the address of LoadLibrary to one of these APIs so that a remote process has to execute the DLL on behalf of the malware.\nThe CreateRemoteThread function creates a thread in the virtual address space of an arbitrary process.\nUse CreateRemoteThread to create a remote thread starting at the memory address (which means this will execute LoadLibrary in the remote process).\nBesides the memory address of the remote function you want to call, CreateRemoteThread also allows you to provide an argument for the function if it requires one.\nLoadLibrary wants the memory address of where you wrote that DLL path from earlier, so provide CreateRemoteThread that address as well.\nOffensive Tradecraft\nThis technique is one of the most common techniques used to inject malware into another process.\nThe malware writes the path to its malicious dynamic-link library (DLL) in the virtual address space of another process, and ensures the remote process loads it by creating a remote thread in the target process.\nSecurity Datasets\n| Metadata | Value |\n|:----------|:----------|\n| docs | https://securitydatasets.com/notebooks/atomic/windows/defense_evasion/SDWIN-190518221344.html |\n| link | https://raw.githubusercontent.com/OTRF/Security-Datasets/master/datasets/atomic/windows/defense_evasion/host/empire_dllinjection_LoadLibrary_CreateRemoteThread.zip |\nAnalytics\nInitialize Analytics Engine", "from openhunt.mordorutils import *\nspark = get_spark()", "Download & Process Security Dataset", "sd_file = \"https://raw.githubusercontent.com/OTRF/Security-Datasets/master/datasets/atomic/windows/defense_evasion/host/empire_dllinjection_LoadLibrary_CreateRemoteThread.zip\"\nregisterMordorSQLTable(spark, sd_file, \"sdTable\")", "Analytic I\nLook for any use of the CreateRemoteThread function to create a remote thread starting at the memory address (which means this will execute LoadLibrary in the remote process)\n| Data source | Event Provider | Relationship | Event |\n|:------------|:---------------|--------------|-------|\n| Process | Microsoft-Windows-Sysmon/Operational | Process wrote to Process | 8 |", "df = spark.sql(\n'''\nSELECT `@timestamp`, Hostname, SourceImage, TargetImage\nFROM sdTable\nWHERE Channel = \"Microsoft-Windows-Sysmon/Operational\"\n AND EventID = 8\n AND lower(StartModule) LIKE \"%kernel32.dll\"\n AND StartFunction = \"LoadLibraryA\"\n'''\n)\ndf.show(10,False)", "Analytic II\nYou can look for the same file being created and loaded. The process that creates the file and loads the file are not the same.\n| Data source | Event Provider | Relationship | Event |\n|:------------|:---------------|--------------|-------|\n| Module | Microsoft-Windows-Sysmon/Operational | Process loaded Dll | 7 |\n| File | Microsoft-Windows-Sysmon/Operational | Process created File | 11 |", "df = spark.sql(\n'''\nSELECT f.`@timestamp` AS file_date, m.`@timestamp` AS module_date, f.Hostname, f.Image AS file_image, m.Image AS module_image, m.ImageLoaded, f.TargetFilename\nFROM sdTable f\nINNER JOIN (\n SELECT `@timestamp`,Hostname,Image,ImageLoaded,TargetLogonId,IpAddress\n FROM sdTable\n WHERE Channel = \"Microsoft-Windows-Sysmon/Operational\"\n AND EventID = 7\n ) m\nON f.TargetFilename = m.ImageLoaded\nWHERE f.Channel = \"Microsoft-Windows-Sysmon/Operational\"\n AND f.EventID = 11\n AND f.Hostname = m.Hostname\n'''\n)\ndf.show(10,False)", "Known Bypasses\n| Idea | Playbook |\n|:-----|:---------|\n| Instead of passing the address of the LoadLibrary, adversaries can copy the malicious code into an existing open process and cause it to execute (either via a small shellcode, or by calling CreateRemoteThread) via a technique known as PE injection.\nThe advantage of this is that the adversary does not have to drop a malicious DLL on the disk.\nSimilar to the basic dll injection technique, the malware allocates memory in a host process (e.g. VirtualAllocEx), and instead of writing a \"DLL path\" it writes its malicious code by calling WriteProcessMemory. | None |\nFalse Positives\nNone\nHunter Notes\n\nLooking for CreateRemoteThread APIs with LoadLibrary functions might return several entries in your environment. I recommend to stack the values of the source and target processes or user to baseline your environmennt.\nLook for processes loading files that have just been created on disk (i.e 1min time window). Stack the values of the processes and files involved. You can tag the files as signed or unsigned depending on the information provided in the security events.\n\nHunt Output\n| Type | Link |\n| :----| :----|\n| Sigma Rule | https://github.com/SigmaHQ/sigma/blob/master/rules/windows/create_remote_thread/sysmon_createremotethread_loadlibrary.yml |\nReferences\n\nhttps://www.endgame.com/blog/technical-blog/ten-process-injection-techniques-technical-survey-common-and-trending-process\nhttps://resources.infosecinstitute.com/using-createremotethread-for-dll-injection-on-windows/\nhttps://arvanaghi.com/blog/dll-injection-using-loadlibrary-in-C/\nhttps://github.com/EmpireProject/Empire/blob/master/data/module_source/code_execution/Invoke-DllInjection.ps1#L249\nhttps://github.com/EmpireProject/Empire/blob/master/data/module_source/code_execution/Invoke-DllInjection.ps1#L291\nhttps://github.com/EmpireProject/Empire/blob/master/data/module_source/code_execution/Invoke-DllInjection.ps1#L295\nhttps://github.com/EmpireProject/Empire/blob/master/data/module_source/code_execution/Invoke-DllInjection.ps1#L303\nhttps://github.com/EmpireProject/Empire/blob/master/data/module_source/code_execution/Invoke-DllInjection.ps1#L307\nhttps://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibrarya" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
PyDataMadrid2016/Conference-Info
workshops_materials/20160408_1100_Pandas_for_beginners/tutorial/EN - Tutorial 05 - Combining-grouping-aggregating.ipynb
mit
[ "Again, read some wind data", "# First, the imports\nimport os\nimport datetime as dt\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom IPython.display import display\n\nnp.random.seed(19760812)\n%matplotlib inline", "We read a file of wind data", "# read file 'mast.txt'\nipath = os.path.join('Datos', 'mast.txt')\n\ndef dateparse(date, time):\n YY = 2000 + int(date[:2])\n MM = int(date[2:4])\n DD = int(date[4:])\n hh = int(time[:2])\n mm = int(time[2:])\n \n return dt.datetime(YY, MM, DD, hh, mm, 0)\n \n\ncols = ['Date', 'time', 'wspd', 'wspd_max', 'wdir',\n 'x1', 'x2', 'x3', 'x4', 'x5', \n 'wspd_std']\nwind = pd.read_csv(ipath, sep = \"\\s*\", names = cols, \n parse_dates = {'Timestamp': [0, 1]}, index_col = 0,\n date_parser = dateparse)", "We read a second file of simulated environmental data", "# read file 'model.txt'\nipath = os.path.join('Datos', 'model.txt')\n\nmodel = pd.read_csv(ipath, sep = \"\\s*\", skiprows = 3,\n parse_dates = {'Timestamp': [0, 1]}, index_col = 'Timestamp')\n\nfor c in ['x1','x2','x3','x4','x5']: # remove unnecesary columns\n _ = wind.pop(c)\nwind.head(3)\n\nmodel.head(3)\n\nwind['Timestamp'] = wind.index\nprint(wind['Timestamp'].diff().min())\ndel wind['Timestamp']\n\nmodel['Timestamp'] = model.index\nprint(model['Timestamp'].diff().min())\ndel model['Timestamp']", "We have data with a time frequency of 10 minutes (wind) vs the second file data (model) with a time frequency of 1 hour.\nParenthetical remark: axis 101\nIn some occasions we will find a keyword called axis. Let's see in a moment how it works in pandas to try to avoid some issues:\nPosibilities\n\naxis = 0 (acts over rows)\naxis = 1 (acts over columns)\n<span style=\"color:#888\">axis = 2 (only for Panel)</span>\n\n\n(source: http://stackoverflow.com/a/25774395/5216568).\n<br>\n<div class=\"alert alert-info\">\n<p><b>Flashcard:</b></p> \n<p><a href=\"https://www.reddit.com/r/pystats/comments/2z0xbc/pandas_axis0_or_axis1_not_intuitive_for_you_use/cpev7x9\">Easiest way to remember is that \"1\" looks like a column!</a></p>\n<p><b>Other options:</b></p> \n<p>One option would be to use `axis = 'index'` (similar to `axis = 0`) or `axis = 'columns'` (similar to `axis = 1`) for `DataFrame`s. In the case of `Panel`s we would have `items`, `minor`, `major` (similar to options 0, 1 or 2).</p>.\n<p>For a `DataFrame`s you could also use `index = 'rows'`, I think it is more evident than `'index'` but I don't recommend it as it is not documented.</p>\n<p>Also, using `'index'`, `'rows'`, `'columns'`,..., can be confuse as in a lot of places we will find keywordscalled like this.</p>.\n</div>\n\nBut, what is the meaning of 'acts over rows/columns'. Let's see some simple examples to check if it is clear:", "df = pd.DataFrame(np.array([[1, 10], [2, 20], [3,30]]), columns = ['A', 'B'])\ndf", "If we don't use the axis keyword explicitly, by default, operations are over rows (axis = 0), i.e., it uses all the elements of a column:", "df.sum()\n\n# The previous example would be similar to\ndf.sum(axis = 0)", "If we want to obtain the result on each row, i.e., all the elements of all columns in a row, we should add axis = 1:", "df.sum(axis = 1)", "Another example:", "df < 10\n\n(df < 10).all()\n\n(df < 10).all(axis = 'columns') # instead of axis = 1 we use axis = 'columns'\n\n# test operations of a DatFrame using axis = 0, 1, 'index', rows', columns'\n", "I hope you have a clear idea now about how it works.\nMerging/combining pandas data structures\nWhat we will see now is not evident and, in some cases, it is convenient to know how relational algebral works to better understand what it is happening.\nCombining data using concat", "new = pd.concat([wind, model], axis = 0, join = 'outer')\n\nnew.head(5)\n\nnew.tail(5)\n\nnew.loc['2014/01/01 00:00':'2014/01/01 02:00']", "", "new = pd.concat([wind, model], axis = 1, join = 'inner')\n\nnew.head(5)\n\nnew.loc['2014/01/01 00:00':'2014/01/01 02:00']", "concat allows us to 'merge' pandas data structures using rows and columns. \nWhat we have seen is not clear!!!??? And you didn't asked!!!???\nLet's see a simpler example:", "df1 = pd.DataFrame(np.random.randn(10,2), \n columns = ['A', 'B'], \n index = np.arange(10))\ndf2 = pd.DataFrame(np.random.randn(4,3), \n columns = ['A', 'B', 'C'], \n index = np.arange(8, 12))\n\ndf1\n\ndf2\n\nnew = pd.concat([df1, df2], axis = 0, join = 'inner')\nnew\n\nnew = pd.concat([df1, df2], axis = 1, join = 'inner')\nnew", "In general, I use this last option with different column names as it is what I want to do...\nConcatenating using the append method\nWe can get something similar to the previous examples using the append method of the data structures:", "wind.append(model)", "In general, this is not what I want to do. What I want to do is a merge with some logics and to do so we could use pd.merge...\nUsing pd.merge as in a SQL database", "pd.merge(wind, model, left_index = True, right_index = True, how = 'inner').head(5)\n\n(pd.merge(wind, model, left_index = True, right_index = True, how = 'inner') == \n pd.concat([wind, model], axis = 1, join = 'inner')).all().all()", "Imagine we want to merge two DataFrames using columns:", "df1 = pd.DataFrame(\n np.array([\n np.arange(1, 11),\n np.random.choice([1,2,3], size = 10),\n np.arange(1, 11) * 10\n ]).T,\n columns = ['A', 'col', 'B']\n)\ndf2 = pd.DataFrame(\n np.array([\n np.arange(11, 21),\n np.random.choice([1,2,3], size = 10),\n np.arange(1, 11) * 100\n ]).T,\n columns = ['A', 'col', 'B']\n)\ndisplay(df1)\ndisplay(df2)\n\npd.merge(df1, df2, on = ['col'])\n\n# Play with it with pd.merge keywords to become more comfortable with it\n", "Combining using the join method\nMore about the same. The join method helps us to combine pandas data structures. Some quick examples:", "wind.join(model).head(10)\n\nmodel.join(wind).head(10)\n\njoinA = wind.join(model, how = 'inner') \njoinB = model.join(wind, how = 'inner').loc[:,joinA.columns]\n(joinA == joinB).all().all()", "Grouping\nWe can group information of our data structures in a simple way using the groupby method. In general, here we follow a strategy of split-apply-combine. What we do is, first separate the initial dataset in groups of interest, over each group we apply some calculations and, finally, the results obtained on each group is combined in a new data structure.", "wind['month'] = wind.index.month\nwind.iloc[[0, 1000, 10000, 30000]]\n\nwind.groupby(by = 'month').mean()\n\nwind.groupby(by = [wind.index.year, 'month']).mean()\n\ndel wind['month']\n\n# Play grouping \n# (obtain daily mean wind speed, \n# mean wind speed on Tuesdays when wind direction is between 300º and 360º,...)\n", "Let's see what groupby returns", "grouped = wind.groupby(by=wind.index.month)\n\nimport inspect\ninfo = inspect.getmembers(grouped, predicate=inspect.ismethod)\n\nfor stuff in info:\n print(stuff[0])\n\ngrouped\n\ngrouped.ngroups\n\ngrouped.groups.keys()\n\ngrouped.get_group(2)", "pandas.core.groupby.DataFrameGroupBy is like a dict with superpowers!!!\nReshaping/transforming our data structures\n<div class=\"alert alert-info\">\n<p>Most of this section has been extracted from <a href=\"https://nikolaygrozev.wordpress.com/2015/07/01/reshaping-in-pandas-pivot-pivot-table-stack-and-unstack-explained-with-pictures/\">excellent article</a>\n<em>Reshaping in Pandas – Pivot, Pivot-Table, Stack and Unstack explained with Pictures</em> by <b>Nikolay Grozev</b>.</p>\n<p>Kudos to Nikolay.</p>\n<p>Kudos to me because a followed the <a href=\"https://en.wikipedia.org/wiki/Don't_repeat_yourself\">DRY</a> and <a href=\"https://en.wikipedia.org/wiki/KISS_principle\">KISS</a> principles.</p>\n</div>\n\nReshaping allows us to change our data structure in a new one to perform new analyses on the new recombined data.\nPivot\nWe obtain a new table derived from our initial data table. For instance, imagine I want to obtain monthly mean wind speed on each year.", "wind['year'] = wind.index.year\nwind['month'] = wind.index.month\ntmp = wind.groupby(by = ['year', 'month']).mean()\ndel wind['year']\ndel wind['month']\ntmp\n\ntmp['year'] = tmp.index.get_level_values(0)\ntmp['month'] = tmp.index.get_level_values(1)\ntmp\n\ntmp.pivot(index = 'year', columns = 'month', values='wspd')\n\n# Get the yearly mean wind speed \n# starting from tmp.pivot(index = 'year', columns = 'month', values='wspd')\n", "Pivoting using several columns:", "tmp = wind.groupby(by = [wind.index.year, wind.index.month])\ntmp = tmp.agg({'wspd': np.mean, 'wspd_max': 'max'})\ntmp.reset_index(inplace = True)\ntmp\n\ntmp.pivot(index = 'level_1', columns = 'level_0')\n\ntmp.pivot(index = 'level_1', columns = 'level_0').columns", "What happens if we combine repeated indexes. For instance:", "from collections import OrderedDict\ntable = OrderedDict((\n (\"Item\", ['Item0', 'Item0', 'Item0', 'Item1']),\n ('CType',['Gold', 'Bronze', 'Gold', 'Silver']),\n ('USD', ['1$', '2$', '3$', '4$']),\n ('EU', ['1€', '2€', '3€', '4€'])\n))\ndf = pd.DataFrame(table)\ndf", "(source: https://nikolaygrozev.files.wordpress.com/2015/07/pivoting_simple_error.png)", "pivoted = df.pivot(index='Item', columns='CType', values='USD')", "pivot_table to the rescue to solve the previous error\nThe previous error can be solved using pivot_table that is more flexible than pivot:", "table = OrderedDict((\n (\"Item\", ['Item0', 'Item0', 'Item0', 'Item1']),\n ('CType',['Gold', 'Bronze', 'Gold', 'Silver']),\n ('USD', [1, 2, 3, 4]),\n ('EU', [1.1, 2.2, 3.3, 4.4])\n))\ndf = pd.DataFrame(table)\npivoted = df.pivot_table(index='Item', columns='CType', values='USD', aggfunc=np.min)\npivoted", "Stack and Unstack\nWe will see it briefly to maintain it simple. It involves the uses of MultiIndex that I want to avoid today.\n\n(source: https://nikolaygrozev.files.wordpress.com/2015/07/stack-unstack1.png)\nDocs for stack.\nDocs for unstack.\nRecipes for stack/unstack." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
boada/planckClusters
analysis_ir/notebooks/04b. Understand LF.ipynb
mit
[ "%matplotlib inline\n#%matplotlib widget\nfrom astropy.cosmology import LambdaCDM\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm \nfrom astropy import constants as const\nimport astropy.units as u\nfrom scipy.integrate import quad\nimport ezgal # BC03 model maker\nimport os", "Setup Cosmology", "cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7, Tcmb0=2.725)", "Create Stellar Population", "# check to make sure we have defined the bpz filter path\nif not os.getenv('EZGAL_FILTERS'):\n os.environ['EZGAL_FILTERS'] = (f'{os.environ[\"HOME\"]}/Projects/planckClusters/MOSAICpipe/bpz-1.99.3/FILTER/')\n\nmodel = ezgal.model('bc03_ssp_z_0.02_salp.model')\nmodel = model.make_exponential(1)\nmodel.set_cosmology(Om=cosmo.Om0, Ol=cosmo.Ode0, h=cosmo.h, w=cosmo.w(0))\n \nmodel.add_filter('g_MOSAICII.res', name='g')\nmodel.add_filter('r_MOSAICII.res', name='r')\nmodel.add_filter('i_MOSAICII.res', name='i')\nmodel.add_filter('z_MOSAICII.res', name='z')\nmodel.add_filter('K_KittPeak.res', name='K')\n\n# Blanton 2003 Normalization\nMr_star = -20.44 + 5 * np.log10(cosmo.h) # abs mag.\n# set the normalization\nmodel.set_normalization('sloan_r', 0.1, Mr_star, vega=False) ", "Calculate a few things to get going.", "# desired formation redshift\nzf = 6.0\n# fetch an array of redshifts out to given formation redshift\nzs = model.get_zs(zf)\n \n# Calculate some cosmological stuff\nDM = cosmo.distmod(zs)\ndlum = cosmo.luminosity_distance(zs)", "Define the functions that we'll need\nNeed to compute the cluster volume...\n$M_{vir} = 4/3 \\pi r^3_{vir} \\rho_c(r<r_{vir}) = 4/3 \\pi r^3_{vir} \\Delta_c \\rho_c$\nif we let $\\Delta_c = 200$ then \n$M_{200} = 4/3 \\pi r^3_{200} 200 \\rho_c$ with $\\rho_c = \\frac{3H(z)^2}{8\\pi G}$\nor just $M_{200} = V_{200}200\\rho_c$. So we'll make a function to calculate $\\rho_c$. And we'll make use of the astropy units package to do all the unit analysis for us.\nDon't forget that $H(z) = H_0E(z)$ \nWe also need to integrate the Schechter luminosity functions..\nThe Schechter Function:\nFor Luminosity:\n$\\Phi(L) = \\phi^\\star \\frac{L}{L_\\star}^\\alpha e^{-\\frac{L}{L_\\star}}$\nFor Magnitudes:\n$\\Phi(M) = \\phi^\\star\\frac{2}{5}log(10) (10^{\\frac{2}{5}(M_\\star - M)})^{\\alpha+1} e^{-10^{\\frac{2}{5}(M_\\star - M)}}$", "def rho_crit(z, cosmo):\n # convert G into better units:\n G = const.G.to(u.km**2 * u.Mpc/(u.M_sun * u.s**2))\n return 3 / (8 * np.pi * G) * cosmo.H0**2 * cosmo.efunc(z)**2 # Mpc^3\n\ndef schechterL(luminosity, phiStar, alpha, LStar): \n \"\"\"Schechter luminosity function.\"\"\" \n LOverLStar = (luminosity/LStar) \n return (phiStar/LStar) * LOverLStar**alpha * np.exp(- LOverLStar) \n\ndef schechterM(magnitude, phiStar, alpha, MStar): \n \"\"\"Schechter luminosity function by magnitudes.\"\"\"\n \n # check to make sure things are all the same size\n if isinstance(phiStar, float) and isinstance(magnitude, np.ndarray):\n phiStar = np.ones_like(magnitude) * phiStar\n if isinstance(alpha , float) and isinstance(magnitude, np.ndarray):\n alpha = np.ones_like(magnitude) * alpha\n if isinstance(MStar, float) and isinstance(magnitude, np.ndarray):\n MStar = np.ones_like(magnitude) * MStar\n \n MStarMinM = 0.4 * (MStar - magnitude)\n return (0.4 * np.log(10) * phiStar * 10.0**(MStarMinM * (alpha + 1.)) * np.exp(-10.**MStarMinM))", "Functions to calculate everything at a specific redshift", "def calc_Maglim(mlim, redshift, filter='i', cosmo=cosmo, model=model):\n return mlim - cosmo.distmod(redshift).value - model.get_kcorrects(zf, filters=filter, zs=redshift)\ndef calc_phistar(redshift, cosmo=cosmo):\n return 3.6 * cosmo.efunc(redshift)**2\ndef calc_alpha(redshift):\n return -1.05 * (1 + redshift)**(-2 / 3)\ndef calc_fr(redshift):\n return 0.8 * (1 + redshift)**(-1 / 2)", "Start Calculating things", "# So now we are going to calculate the volumes as a function of z\n\n#M200 = mass_func(zarr) * u.solMass\n\nM200 = 1e15 * u.solMass\nV200 = M200/ (200 * rho_crit(zs, cosmo))\n\n# Calculate the M_star values\nMstar = model.get_absolute_mags(zf, filters='i', zs=zs)\n\n# calculate the abs mag of our limiting magnitude as a function of z\nmlim = 23.5\n#Mlim = Mstar - 2.5 * np.log10(0.4)\nMlim = mlim - cosmo.distmod(zs).value - model.get_kcorrects(zf, filters='i', zs=zs)\n\n# Here are the Schechter function stuff from Liu et al.\nphi_star = 3.6 * cosmo.efunc(zs)**2\nalpha = -1.05 * (1 + zs)**(-2/3)\nfr = 0.8*(1 + zs)**(-1/2)\n\nLF = []\nfor phi, a, M_star, M_lim in zip(phi_star, alpha, Mstar, Mlim):\n if M_lim < M_star - 2.5 * np.log10(0.4):\n Mlimit = M_lim\n else:\n Mlimit = M_star - 2.5 * np.log10(0.4)\n y, err = quad(schechterM, -30, Mlimit, args=(phi, a, M_star))\n LF.append(y)\n\nplt.figure()\nplt.plot(zs, (LF * V200.value + 1) * fr)\nax = plt.gca()\nax.set_yticks(np.arange(0, 75, 10))\nplt.xlim(0.1, 5)\nplt.ylim(0, 80)\nplt.xlabel('redshift')\nplt.ylabel('N (r < r200)')\nplt.grid()\n\ncalc_Maglim(23.5, 0.2)\n\nmagarr = np.arange(-30, -14, 0.2)\n\nredshifts = np.arange(0.2, 2, 0.2) \n# make some colored lines for the plots\ncm_subset = np.linspace(0.2, 0.8, len(redshifts))\ncolors = [cm.bone(x) for x in cm_subset]", "The figure below is where we let both $\\phi^\\star$ and $\\alpha$ evolve with redshift.\nThe vertical lines show the faint limit to our integral. In all cases we are integrating to $M=-30$ mag. So in the function above, we are integrating from the left to the right in this figure.", "plt.figure(figsize=(8,6))\nfor z, c in zip(redshifts, colors):\n plt.plot(magarr, schechterM(magarr, calc_phistar(z), calc_alpha(z), model.get_absolute_mags(zf, filters='i', zs=z)), c=c)\n plt.axvline(calc_Maglim(23.5, z), c=c, label=f'{z:.2}')\nplt.xlabel('Abs Magnitude')\nplt.ylabel('$\\Phi(M)$ [Ngal $Mpc^{-3}$]')\nplt.semilogy()\nplt.ylim(0.01, 20)\nplt.legend(loc='upper left')", "The figure below is where we fix both $\\phi^\\star$ and $\\alpha$ at some low redshift value, and do not allow them to evolve with redshift.\nHere I've fixed them at $z=0.2$.\nIn my opinion, this looks a lot more like what we would expect for a LF. It still doesn't fully capture the evolution of the LF that I'd expect. I'd expect the", "plt.figure(figsize=(8,6))\nfor z, c in zip(redshifts, colors):\n plt.plot(magarr, schechterM(magarr, calc_phistar(0.2), calc_alpha(z), model.get_absolute_mags(zf, filters='i', zs=z)), c=c)\n plt.axvline(calc_Maglim(23.5, z), c=c, label=f'{z:.2}')\nplt.xlabel('Abs Magnitude')\nplt.ylabel('$\\Phi(M)$ [Ngal $Mpc^{-3}$]')\nplt.semilogy()\nplt.ylim(0.01, 10)\nplt.legend(loc='upper left')\n\nLF = []\nfor phi, a, M_star, M_lim in zip(phi_star, alpha, Mstar, Mlim):\n if M_lim < M_star - 2.5 * np.log10(0.4):\n Mlimit = M_lim\n else:\n Mlimit = M_star - 2.5 * np.log10(0.4)\n y, err = quad(schechterM, -30, Mlimit, args=(7, -0.8, M_star))\n #print(M_star - M_lim, y)\n LF.append(y)", "Here is the above figure, with $\\phi^\\star$ and $\\alpha$ fixed to their $z=0.2$ values.", "plt.figure()\nplt.plot(zs, (LF * V200.value + 1) * fr)\nax = plt.gca()\n#ax.set_yticks(np.arange(0, 75, 10))\nplt.xlim(0.1, 2)\n#plt.ylim(0, 80)\nplt.xlabel('redshift')\nplt.ylabel('N (r < r200)')\nplt.axhline(3)\nplt.grid()\n\nLF * V200.value + 1\n\nMpiv = 6e14\nzpiv = 0.46\n\nfr2 = 0.68 * (M200.value / Mpiv)**-0.1 * ((1 + zs)/ (1 + zpiv))** -0.65\n\nplt.plot(fr, fr2)\nplt.plot(fr, fr3)\nplt.plot([0,1], [0,1], c='k')\n\nfrom scipy.special import erf\n\nN200 = LF * V200.value + 1\n\ngz = 0.206 - 0.371 * zs\nhz = -3.6 + 25.8 * zs\n\nfr3 = gz * erf(np.log10(N200) - np.log10(hz)) + 0.69\n\nfr3\n\nfr2\n\ncalc_phistar(0.2)\n\nphi_star" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
srnas/barnaba
manuscript_figures/01_figure.ipynb
gpl-3.0
[ "Figure 1\nHere, we plot the eRMSD and RMSD over time as shown in figure 1. We color the different datapoints according to the annotation: blue if the stem is formed and red if the stem is formed and the non-canonical interaction between U6-G9 is present. \nThe first step is to read the pickles:", "import pickle \n\n# read ermds pickle\nfname = \"ermsd.p\"\nprint \"# reading pickle %s\" % fname, \nermsd = pickle.load(open(fname, \"r\"))\nprint \" - shape \", ermsd.shape\n\n# Read rmsd pickle\nfname = \"rmsd.p\"\nprint \"# reading pickle %s\" % fname,\nrmsd = pickle.load(open(fname, \"r\"))\nprint \" - shape \", rmsd.shape\n\n# Read annotation pickle\nfname = \"pairs.p\"\nprint \"# reading pickle %s\" % fname, \npairings,res = pickle.load(open(fname, \"r\"))\nprint \" - shape \", len(pairings)\n\n# Read dotbracket pickle\nfname = \"dotbracket.p\"\nprint \"# reading pickle %s\" % fname,\ndotbr,res = pickle.load(open(fname, \"r\"))\nprint \" - shape \", len(dotbr)\n", "Now we create a list of length n=20000, where n is the lenght of the simulation. The entry is 2 if the stem is formed and SWt present, 1 if stem is formed and 0 otherwise.", "\n# bin structures according to annotation:\n# bins_anno[j] = 2 if stem is formed and SWt present\n# bins_anno[j] = 1 if stem is formed\n# bins_anno[j] = 0 otherwise\nbins_anno = [0]*len(pairings)\nfor j in range(0,len(pairings)):\n # if the stem is fully formed add 1\n if(dotbr[j] == \"(((((....)))))\"):\n bins_anno[j] += 1\n # search through the list if SWt between U6 and G9 is present. \n for p in range(len(pairings[j][0])):\n res1 = res[pairings[j][0][p][0]]\n res2 = res[pairings[j][0][p][1]]\n interaction = pairings[j][1][p]\n if(res1==\"U_6_0\" and res2==\"G_9_0\" and interaction==\"SWt\"):\n bins_anno[j] += 1\n\n", "We are now ready to plot the data: time series and histogram on the right.", "# Import numpy, matbplotlib and seaborn. \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_style(\"white\")\n\n# define colors: gray, blue and red \nflatui = [ \"#95a5a6\", \"#3498db\", \"#e74c3c\"]\ncmap = sns.color_palette(flatui)\ncol = [flatui[j] for j in bins_anno]\n\n# define figure size \nplt.figure(figsize=(7,6))\nplt.subplots_adjust(left=0.1, bottom=0.1, right=0.97, top=0.97,\n wspace=None, hspace=None)\nax1 = plt.subplot2grid((2, 3), (1, 0), colspan=2)\n\n# plot eRMSD over time \nxx = np.linspace(0,180,ermsd.shape[0])\nax1.scatter(xx,ermsd,c=col,s=1)\n\n# set limits and labels\nax1.set_xlim(-1,181)\nax1.set_ylabel(\"eRMSD from native\")\nax1.set_ylim(0,2.5)\nax1.set_xlabel(r\"Time ($\\mu$s)\")\n\n# Define new axis and do histograms for the three categories \nax2 = plt.subplot2grid((2, 3), (1, 2))\nbins = np.linspace(0,2.5,100)\nhh1,ee1 = np.histogram([ermsd[e] for e in range(ermsd.shape[0]) if bins_anno[e]==0],bins=bins)\nhh2,ee2 = np.histogram([ermsd[e] for e in range(ermsd.shape[0]) if bins_anno[e]==1],bins=bins)\nhh3,ee3 = np.histogram([ermsd[e] for e in range(ermsd.shape[0]) if bins_anno[e]==2],bins=bins)\n# do horizontal barplot with left padding\nax2.barh(0.5*(ee1[1:]+ee1[:-1]),hh1,color=flatui[0],height=0.026,linewidth=0)\nax2.barh(0.5*(ee1[1:]+ee1[:-1]),hh2,color=flatui[1],height=0.026,left=hh1,linewidth=0)\nax2.barh(0.5*(ee1[1:]+ee1[:-1]),hh3,color=flatui[2],height=0.026,left=hh1+hh2,linewidth=0)\n# set labels, limits\nax2.set_xlabel(\"Count\")\nax2.set_ylim(0,2.5)\n# draw line at eRMSD = 0.7\nax2.axhline(0.7,ls='--',c='k',lw=1)\n\n# now do the same as above, but for RMSD\nax3 = plt.subplot2grid((2, 3), (0, 0), colspan=2)\n# plot time series\nax3.scatter(xx,rmsd,c=col,s=1)\nax3.set_xlim(-1,181)\nax3.set_ylim(0,2.2)\nax3.set_ylabel(\"RMSD from native (nm)\")\n\n# histogram \nax4 = plt.subplot2grid((2, 3), (0, 2))\nbins = np.linspace(0,2.2,100)\nhh1,ee1 = np.histogram([rmsd[e] for e in range(ermsd.shape[0]) if bins_anno[e]==0],bins=bins)\nhh2,ee2 = np.histogram([rmsd[e] for e in range(ermsd.shape[0]) if bins_anno[e]==1],bins=bins)\nhh3,ee3 = np.histogram([rmsd[e] for e in range(ermsd.shape[0]) if bins_anno[e]==2],bins=bins)\nax4.barh(0.5*(ee1[1:]+ee1[:-1]),hh1,color=flatui[0],height=0.025,linewidth=0)\nax4.barh(0.5*(ee1[1:]+ee1[:-1]),hh2,color=flatui[1],height=0.025,left=hh1,linewidth=0)\nax4.barh(0.5*(ee1[1:]+ee1[:-1]),hh3,color=flatui[2],height=0.025,left=hh1+hh2,linewidth=0)\n\n# set limits and draw line at 0.23 nm\nax4.axhline(0.23,ls='--',c='k',lw=1)\nax4.set_ylim(0,2.2)\n\nplt.show()\n#plt.savefig(\"figure1.png\",dpi=600)\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
fastai/fastai
nbs/18_callback.fp16.ipynb
apache-2.0
[ "#|hide\n#|skip\n! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab\n\n#|all_cuda\n\n#|export\nfrom __future__ import annotations\nfrom fastai.basics import *\nfrom fastai.callback.progress import *\n\nfrom torch.cuda.amp import GradScaler,autocast\nfrom torch.cuda.amp.grad_scaler import OptState\n\n#|default_exp callback.fp16\n\n#|hide\nfrom fastai.test_utils import *\nfrom nbdev.showdoc import *", "Mixed precision training\n\nCallback and utility functions to allow mixed precision training \n\nA little bit of theory\nA very nice and clear introduction to mixed precision training is this video from NVIDIA.\nWhat's half precision?\nIn neural nets, all the computations are usually done in single precision, which means all the floats in all the arrays that represent inputs, activations, weights... are 32-bit floats (FP32 in the rest of this post). An idea to reduce memory usage (and avoid those annoying cuda errors) has been to try and do the same thing in half-precision, which means using 16-bits floats (or FP16 in the rest of this post). By definition, they take half the space in RAM, and in theory could allow you to double the size of your model and double your batch size.\nAnother very nice feature is that NVIDIA developed its latest GPUs (the Volta generation) to take fully advantage of half-precision tensors. Basically, if you give half-precision tensors to those, they'll stack them so that each core can do more operations at the same time, and theoretically gives an 8x speed-up (sadly, just in theory).\nSo training at half precision is better for your memory usage, way faster if you have a Volta GPU (still a tiny bit faster if you don't since the computations are easiest). How do we do it? Super easily in pytorch, we just have to put .half() everywhere: on the inputs of our model and all the parameters. Problem is that you usually won't see the same accuracy in the end (so it happens sometimes) because half-precision is... well... not as precise ;).\nProblems with half-precision:\nTo understand the problems with half precision, let's look briefly at what an FP16 looks like (more information here).\n\nThe sign bit gives us +1 or -1, then we have 5 bits to code an exponent between -14 and 15, while the fraction part has the remaining 10 bits. Compared to FP32, we have a smaller range of possible values (2e-14 to 2e15 roughly, compared to 2e-126 to 2e127 for FP32) but also a smaller offset.\nFor instance, between 1 and 2, the FP16 format only represents the number 1, 1+2e-10, 1+22e-10... which means that 1 + 0.0001 = 1 in half precision. That's what will cause a certain numbers of problems, specifically three that can occur and mess up your training.\n1. The weight update is imprecise: inside your optimizer, you basically do w = w - lr * w.grad for each weight of your network. The problem in performing this operation in half precision is that very often, w.grad is several orders of magnitude below w, and the learning rate is also small. The situation where w=1 and lrw.grad is 0.0001 (or lower) is therefore very common, but the update doesn't do anything in those cases.\n2. Your gradients can underflow. In FP16, your gradients can easily be replaced by 0 because they are too low.\n3. Your activations or loss can overflow. The opposite problem from the gradients: it's easier to hit nan (or infinity) in FP16 precision, and your training might more easily diverge.\nThe solution: mixed precision training\nTo address those three problems, we don't fully train in FP16 precision. As the name mixed training implies, some of the operations will be done in FP16, others in FP32. This is mainly to take care of the first problem listed above. For the next two there are additional tricks.\nThe main idea is that we want to do the forward pass and the gradient computation in half precision (to go fast) but the update in single precision (to be more precise). It's okay if w and grad are both half floats, but when we do the operation w = w - lr * grad, we need to compute it in FP32. That way our 1 + 0.0001 is going to be 1.0001. \nThis is why we keep a copy of the weights in FP32 (called master model). Then, our training loop will look like:\n1. compute the output with the FP16 model, then the loss\n2. back-propagate the gradients in half-precision.\n3. copy the gradients in FP32 precision\n4. do the update on the master model (in FP32 precision)\n5. copy the master model in the FP16 model.\nNote that we lose precision during step 5, and that the 1.0001 in one of the weights will go back to 1. But if the next update corresponds to add 0.0001 again, since the optimizer step is done on the master model, the 1.0001 will become 1.0002 and if we eventually go like this up to 1.0005, the FP16 model will be able to tell the difference.\nThat takes care of problem 1. For the second problem, we use something called gradient scaling: to avoid the gradients getting zeroed by the FP16 precision, we multiply the loss by a scale factor (scale=512 for instance). That way we can push the gradients to the right in the next figure, and have them not become zero.\n\nOf course we don't want those 512-scaled gradients to be in the weight update, so after converting them into FP32, we can divide them by this scale factor (once they have no risks of becoming 0). This changes the loop to:\n1. compute the output with the FP16 model, then the loss.\n2. multiply the loss by scale then back-propagate the gradients in half-precision.\n3. copy the gradients in FP32 precision then divide them by scale.\n4. do the update on the master model (in FP32 precision).\n5. copy the master model in the FP16 model.\nFor the last problem, the tricks offered by NVIDIA are to leave the batchnorm layers in single precision (they don't have many weights so it's not a big memory challenge) and compute the loss in single precision (which means converting the last output of the model in single precision before passing it to the loss).\n\nDynamic loss scaling\nThe only annoying thing with the previous implementation of mixed precision training is that it introduces one new hyper-parameter to tune, the value of the loss scaling. Fortunately for us, there is a way around this. We want the loss scaling to be as high as possible so that our gradients can use the whole range of representation, so let's first try a really high value. In all likelihood, this will cause our gradients or our loss to overflow, and we will try again with half that big value, and again, until we get to the largest loss scale possible that doesn't make our gradients overflow.\nThis value will be perfectly fitted to our model and can continue to be dynamically adjusted as the training goes, if it's still too high, by just halving it each time we overflow. After a while though, training will converge and gradients will start to get smaller, so we al\nso need a mechanism to get this dynamic loss scale larger if it's safe to do so. The strategy used in the Apex library is to multiply the loss scale by 2 each time we had a given number of iterations without overflowing.\nMixedPrecision -", "#|export\n@delegates(GradScaler)\nclass MixedPrecision(Callback):\n \"Mixed precision training using Pytorch's `autocast` and `GradScaler`\"\n order = 10\n def __init__(self, **kwargs): self.kwargs = kwargs\n def before_fit(self): \n self.autocast,self.learn.scaler,self.scales = autocast(),GradScaler(**self.kwargs),L()\n def before_batch(self): self.autocast.__enter__()\n def after_pred(self):\n if next(flatten(self.pred)).dtype==torch.float16: self.learn.pred = to_float(self.pred)\n def after_loss(self): self.autocast.__exit__(None, None, None)\n def before_backward(self): self.learn.loss_grad = self.scaler.scale(self.loss_grad)\n def before_step(self):\n \"Use `self` as a fake optimizer. `self.skipped` will be set to True `after_step` if gradients overflow. \"\n self.skipped=True\n self.scaler.step(self)\n if self.skipped: raise CancelStepException()\n self.scales.append(self.scaler.get_scale())\n def after_step(self): self.learn.scaler.update()\n\n @property \n def param_groups(self): \n \"Pretend to be an optimizer for `GradScaler`\"\n return self.opt.param_groups\n def step(self, *args, **kwargs): \n \"Fake optimizer step to detect whether this batch was skipped from `GradScaler`\"\n self.skipped=False\n def after_fit(self): self.autocast,self.learn.scaler,self.scales = None,None,None\n\nshow_doc(MixedPrecision)\n\n#|export\nclass FP16TestCallback(Callback):\n \"Asserts that predictions are `float16` values\"\n order = 9\n def after_pred(self): assert listify(flatten(self.pred))[0].dtype==torch.float16\n\n#cuda\nset_seed(99, True)\nlearn = synth_learner(cbs=[MixedPrecision,FP16TestCallback], cuda=True)\nlearn.model = nn.Sequential(nn.Linear(1,1), nn.Linear(1,1)).cuda()\nlearn.opt_func = partial(SGD, mom=0.)\nlearn.splitter = lambda m: [list(m[0].parameters()), list(m[1].parameters())]\nlearn.fit(3)\nassert learn.recorder.values[-1][-1]<learn.recorder.values[0][-1]\n\n#|hide\n#cuda\n#Multioutput version\nset_seed(99, True)\nlearn = synth_learner(cbs=[MixedPrecision,FP16TestCallback], cuda=True)\nclass MultiOutputModel(Module):\n def __init__(self): self.linear1, self.linear2 = nn.Linear(1,1) , nn.Linear(1,1)\n def forward(self,x): return self.linear1(x), self.linear2(x)\ndef multioutputloss(pred, val): return ((val-pred[0]).abs() + 0.5 * (val-pred[1]).abs()).sum()\nlearn.model = MultiOutputModel()\nlearn.opt_func = partial(SGD, mom=0.)\nlearn.splitter = lambda m: [list(m.linear1.parameters()), list(m.linear2.parameters())]\nlearn.loss_func=multioutputloss\nlearn.fit(3)\nassert learn.recorder.values[-1][-1]<learn.recorder.values[0][-1]\n\n#|export\n@patch\n@delegates(GradScaler)\ndef to_fp16(self:Learner, **kwargs): return self.add_cb(MixedPrecision(**kwargs))\n\n#|export\n@patch\ndef to_fp32(self:Learner): return self.remove_cb(MixedPrecision)", "Util functions\nBefore going in the main Callback we will need some helper functions. We use the ones from the APEX library.", "#|export \nfrom fastai.fp16_utils import convert_network, model_grads_to_master_grads, master_params_to_model_params", "Converting the model to FP16\nWe will need a function to convert all the layers of the model to FP16 precision except the BatchNorm-like layers (since those need to be done in FP32 precision to be stable). In Apex, the function that does this for us is convert_network. We can use it to put the model in FP16 or back to FP32.", "model = nn.Sequential(nn.Linear(10,30), nn.BatchNorm1d(30), nn.Linear(30,2)).cuda()\nmodel = convert_network(model, torch.float16)\n\nfor i,t in enumerate([torch.float16, torch.float32, torch.float16]):\n test_eq(model[i].weight.dtype, t)\n test_eq(model[i].bias.dtype, t)\n \nmodel = nn.Sequential(nn.Linear(10,30), BatchNorm(30, ndim=1), nn.Linear(30,2)).cuda()\nmodel = convert_network(model, torch.float16)\n\nfor i,t in enumerate([torch.float16, torch.float32, torch.float16]):\n test_eq(model[i].weight.dtype, t)\n test_eq(model[i].bias.dtype, t)", "Creating the master copy of the parameters\nFrom our model parameters (mostly in FP16), we'll want to create a copy in FP32 (master parameters) that we will use for the step in the optimizer. Optionally, we concatenate all the parameters to do one flat big tensor, which can make that step a little bit faster.\nWe can't use the FP16 util function here as it doesn't handle multiple parameter groups, which is the thing we use to\n- do transfer learning and freeze some layers\n- apply discriminative learning rates\n- don't apply weight decay to some layers (like BatchNorm) or the bias terms", "#|export\nfrom torch.nn.utils import parameters_to_vector\n\n#|export\ndef get_master(\n opt:Optimizer, # Optimizer from which to retrieve model params\n flat_master:bool=False, # Flatten fp32 params into a vector for better performance\n) -> list: # List of fp16 params, and list of fp32 params\n \"Creates fp16 model params given an initialized `Optimizer`, also returning fp32 model params. \"\n model_params = [[param for param in pg if getattr(param, 'requires_grad', False) and hasattr(param, 'data')] for pg in opt.param_lists]\n if flat_master:\n master_params = []\n for pg in model_params:\n mp = parameters_to_vector([param.data.float() for param in pg])\n mp = nn.Parameter(mp, requires_grad=True)\n if mp.grad is None: mp.grad = mp.new(*mp.size())\n master_params.append([mp])\n else:\n master_params = [[nn.Parameter(param.data.clone().float().detach(), requires_grad=True) for param in pg] for pg in model_params]\n return model_params, master_params\n\n#|hide\n#cuda\nlearn = synth_learner()\nlearn.model = convert_network(nn.Sequential(nn.Linear(1,1), nn.Linear(1,1)), torch.float16).cuda()\nlearn.splitter = lambda m: [list(m[0].parameters()), list(m[1].parameters())]\nlearn.opt = learn.opt_func(learn.splitter(learn.model), learn.lr)\nmodel_p,master_p = get_master(learn.opt)\ntest_eq(len(model_p), 2) #2 pqrqm groups\ntest_eq(len(master_p), 2)\nfor pg1,pg2 in zip(model_p,master_p):\n test_eq([p.float() for p in pg1], pg2) #Same values but different types\n for p in pg1: assert p.dtype == torch.float16\n\n#|hide\n#cuda\n#Flattened version\nmodel_pf,master_pf = get_master(learn.opt, flat_master=True)\ntest_eq(len(model_pf), 2) #2 pqrqm groups\ntest_eq(len(master_pf), 2)\nfor pg1,pg2 in zip(model_pf,master_pf):\n test_eq(len(pg2), 1) #One flattened tensor\n test_eq([p.float().squeeze() for p in pg1], [p for p in pg2[0]]) #Same values but different types\n for p in pg1: assert p.dtype == torch.float16", "Copy the gradients from model params to master params\nAfter the backward pass, all gradients must be copied to the master params before the optimizer step can be done in FP32. The corresponding function in the Apex utils is model_grads_to_master_grads but we need to adapt it to work with param groups.", "#|export \ndef to_master_grads( \n model_pgs:list, # Fp16 model parameters to copy gradients from\n master_pgs:list, # Fp32 model parameters to copy gradients to\n flat_master:bool=False, # Whether or not fp32 parameters were previously flattened\n):\n \"Move fp16 model gradients to fp32 master gradients\"\n for (model_params,master_params) in zip(model_pgs,master_pgs):\n model_grads_to_master_grads(model_params, master_params, flat_master=flat_master)\n\n#|hide\n#cuda\nxb,yb = learn.dls.one_batch()\npred = learn.model.cuda()(xb.cuda().half())\nloss = F.mse_loss(pred, yb.cuda().half())\nloss.backward()\nto_master_grads(model_p, master_p)\nto_master_grads(model_pf, master_pf, flat_master=True)\ntest_eq([[p.grad.float() for p in pg] for pg in model_p],\n [[p.grad for p in pg] for pg in master_p])\ntest_eq([[p.grad.float().squeeze() for p in pg] for pg in model_pf], \n [[p for p in pg[0].grad] for pg in master_pf])\nxb.shape", "Copy the master params to the model params\nAfter the step, we need to copy back the master parameters to the model parameters for the next update. The corresponding function in Apex is master_params_to_model_params.", "#|export \ndef to_model_params(\n model_pgs:list, # Fp16 model params to copy to\n master_pgs:list, # Fp32 master params to copy from\n flat_master:bool=False # Whether master_pgs was previously flattened\n)->None:\n \"Copy updated fp32 master params to fp16 model params after gradient step. \" \n for (model_params,master_params) in zip(model_pgs,master_pgs):\n master_params_to_model_params(model_params, master_params, flat_master=flat_master)\n\n#|hide\n#cuda\nlearn.opt.params = master_p\nlearn.opt.step()\nto_model_params(model_p, master_p)\ntest_close([p.float() for pg in model_p for p in pg], [p for pg in master_p for p in pg], eps=1e-3)\n\n#|hide\n#cuda\nlearn.opt.params = master_pf\nlearn.opt.step()\nto_model_params(model_pf, master_pf, flat_master=True)\ntest_close([p.float().squeeze() for pg in model_pf for p in pg], [p for pg in master_pf for p in pg[0]], eps=1e-3)", "Checking for overflow\nFor dynamic loss scaling, we need to know when the gradients have gone up to infinity. It's faster to check it on the sum than to do torch.isinf(x).any().", "#|export \ndef test_overflow(x:torch.Tensor):\n \"Tests whether fp16 gradients have overflown.\"\n s = float(x.float().sum())\n return (s == float('inf') or s == float('-inf') or s != s)\n\nx = torch.randn(3,4)\nassert not test_overflow(x)\nx[1,2] = float('inf')\nassert test_overflow(x)", "Then we can use it in the following function that checks for gradient overflow:", "#|export \ndef grad_overflow(pgs:list)->bool: \n \"Tests all fp16 parameters in pgs for gradient overflow\"\n for pg in pgs:\n for p in pg:\n if p.grad is not None and test_overflow(p.grad.data): return True\n return False\n\n#|hide\n#cuda\nassert not grad_overflow(model_p)\nassert not grad_overflow(model_pf)\nmodel_p[1][0].grad.data[0,0] = float('inf')\nmodel_pf[0][1].grad.data[0] = float('inf')\nassert grad_overflow(model_p)\nassert grad_overflow(model_pf)", "NonNativeMixedPrecision -", "#|export\ndef copy_clone(d):\n return {k:(v.detach().clone().float() if isinstance(v,Tensor) else v) for k,v in d.items()}\n\n#|export\ndef _copy_state(opt, pgs1, pgs2):\n opt.param_lists = pgs2\n for pg1,pg2 in zip(pgs1, pgs2):\n for p1,p2 in zip(pg1, pg2): opt.state[p2] = copy_clone(opt.state.pop(p1, {}))\n\n#|export\nclass ModelToHalf(Callback):\n \"Use with NonNativeMixedPrecision callback (but it needs to run at the very beginning)\"\n order=-50\n def before_fit(self): self.learn.model = convert_network(self.model, dtype=torch.float16)\n def after_fit (self): self.learn.model = convert_network(self.model, dtype=torch.float32)\n\n#|export\n@docs\nclass NonNativeMixedPrecision(Callback):\n \"Run training in mixed precision\"\n order=10\n def __init__(self, \n loss_scale:int=512, # Non-dynamic loss scale, used to avoid underflow of gradients. \n flat_master:bool=False, # Whether to flatten fp32 parameters for performance\n dynamic:bool=True, # Whether to automatically determine loss scaling\n max_loss_scale:float=2.**24, # Starting value for dynamic loss scaling\n div_factor:float=2., # Divide by this on overflow, multiply by this after scale_wait batches\n scale_wait:int=500, # Number of batches to wait for increasing loss scale\n clip:float=None, # Value to clip gradients at, max_norm, as in `nn.utils.clip_grad_norm_`\n ): \n assert torch.backends.cudnn.enabled, \"Mixed precision training requires cudnn.\"\n self.flat_master,self.dynamic,self.max_loss_scale = flat_master,dynamic,max_loss_scale\n self.div_factor,self.scale_wait,self.clip = div_factor,scale_wait,clip\n self.loss_scale = max_loss_scale if dynamic else loss_scale\n\n def before_fit(self):\n assert self.dls.device.type == 'cuda', \"Mixed-precision training requires a GPU, remove the call `to_fp16`\"\n if self.learn.opt is None: self.learn.create_opt()\n self.model_pgs,self.master_pgs = get_master(self.opt, self.flat_master)\n self.old_pgs = self.opt.param_lists\n #Changes the optimizer so that the optimization step is done in FP32.\n _copy_state(self.learn.opt, self.model_pgs, self.master_pgs)\n if self.dynamic: self.count = 0\n\n def before_batch(self): self.learn.xb = to_half(self.xb)\n def after_pred(self): self.learn.pred = to_float(self.pred)\n def before_backward(self): self.learn.loss_grad *= self.loss_scale\n\n def before_step(self):\n #First, check for an overflow\n if self.dynamic and grad_overflow(self.model_pgs):\n self.loss_scale /= self.div_factor\n self.learn.loss_grad /= self.div_factor #to record correct loss\n self.model.zero_grad()\n raise CancelBatchException() #skip step and zero_grad\n to_master_grads(self.model_pgs, self.master_pgs, self.flat_master)\n for master_params in self.master_pgs:\n for param in master_params:\n if param.grad is not None: param.grad.div_(self.loss_scale)\n if self.clip is not None:\n for group in self.master_pgs: nn.utils.clip_grad_norm_(group, self.clip)\n # Check if it's been long enough without overflow\n if self.dynamic:\n self.count += 1\n if self.count == self.scale_wait:\n self.count = 0\n self.loss_scale *= self.div_factor\n\n def after_step(self):\n self.model.zero_grad() #Zero the gradients of the model manually (optimizer disconnected)\n to_model_params(self.model_pgs, self.master_pgs, self.flat_master)\n\n def after_batch(self):\n if self.training: self.learn.loss_grad /= self.loss_scale #Log correct loss\n def after_fit(self):\n if not hasattr(self,'master_pgs'): return\n _copy_state(self.learn.opt, self.master_pgs, self.model_pgs)\n self.learn.opt.param_lists = self.old_pgs\n delattr(self, \"master_pgs\")\n delattr(self, \"model_pgs\")\n delattr(self, \"old_pgs\")\n\n _docs = dict(before_fit=\"Put the model in FP16 and prepare the two copies of the parameters\",\n before_batch=\"Put the input in FP16\",\n after_pred=\"Put the output back to FP32 so that the loss is computed in FP32\",\n before_backward=\"Apply loss scaling to avoid gradient underflow\",\n before_step=\"Update and apply dynamic loss scaling, move gradients to fp32, apply gradient clipping\",\n after_step=\"Zero fp16 grads and update fp16 params with fp32 params. \",\n after_batch=\"Ensure loss is logged correctly\",\n after_fit=\"Put the model back in FP32\")\n\n#|hide\nclass TestBeforeMixedPrecision(Callback):\n order=-55\n def before_fit(self): test_eq(first(self.model.parameters()).dtype, torch.float32)\n def before_batch(self): test_eq(self.x.dtype, torch.float32)\n def after_pred(self): test_eq(self.pred.dtype, torch.float16)\n def after_loss(self): self.tst_loss = self.learn.loss_grad.detach().clone()\n def before_step(self):\n self.learn.has_overflown = grad_overflow(self.non_native_mixed_precision.model_pgs)\n self.grads = [p.grad.data.clone() for p in self.model.parameters()]\n self.old_params = [p.data.clone() for p in self.model.parameters()]\n def after_cancel_step(self): assert self.has_overflown\n\nclass TestAfterMixedPrecision(Callback):\n order=65\n def before_fit(self): test_eq(first(self.model.parameters()).dtype, torch.float16)\n def after_fit(self): test_eq(first(self.model.parameters()).dtype, torch.float32)\n def before_batch(self): test_eq(self.x.dtype, torch.float16)\n def after_pred(self): test_eq(self.pred.dtype, torch.float32)\n def before_backward(self):\n loss_scale = self.non_native_mixed_precision.loss_scale if self.training else 1.\n test_eq(self.loss_grad, self.test_before_mixed_precision.tst_loss * loss_scale) \n def before_step(self):\n tbmp = self.test_before_mixed_precision\n test_eq(self.loss_grad, tbmp.loss_grad)\n #Test gradients have been copied and scaled back\n test_close(sum([[p.grad.data for p in pg] for pg in self.non_native_mixed_precision.master_pgs], []),\n [g.float()/self.non_native_mixed_precision.loss_scale for g in tbmp.grads])\n def after_batch(self):\n if self.has_overflown: return\n tbmp,mp =self.test_before_mixed_precision,self.non_native_mixed_precision\n #Test master params have been copied to model\n test_close(sum([[p.data for p in pg] for pg in mp.master_pgs], []),\n [p.data.float() for p in self.model.parameters()], eps=1e-3)\n #Test update has been done properly\n for p,g,op in zip(self.model.parameters(), tbmp.grads, tbmp.old_params):\n test_close(p.data.float(), op.float() - self.lr*g.float()/self.non_native_mixed_precision.loss_scale, eps=1e-3)\n\n#|hide\n#cuda\nlearn = synth_learner(cbs=[ModelToHalf(), NonNativeMixedPrecision()], cuda=True)\nlearn.model = nn.Sequential(nn.Linear(1,1), nn.Linear(1,1)).cuda()\nlearn.opt_func = partial(SGD, mom=0.)\nlearn.splitter = lambda m: [list(m[0].parameters()), list(m[1].parameters())]\nlearn.fit(3, cbs=[TestAfterMixedPrecision(), TestBeforeMixedPrecision()])\n#Check loss scale did change\nassert 1 < learn.non_native_mixed_precision.loss_scale < 2**24\n#Check the model did train\nfor v1,v2 in zip(learn.recorder.values[0], learn.recorder.values[-1]): assert v2<v1\n\n#|hide\n#cuda\nlearn = synth_learner(cbs=[ModelToHalf(), NonNativeMixedPrecision(dynamic=False)], cuda=True)\nlearn.model = nn.Sequential(nn.Linear(1,1), nn.Linear(1,1)).cuda()\nlearn.opt_func = partial(SGD, mom=0.)\nlearn.splitter = lambda m: [list(m[0].parameters()), list(m[1].parameters())]\nlearn.fit(3, cbs=[TestAfterMixedPrecision(), TestBeforeMixedPrecision()])\n#Check loss scale did mot change\ntest_eq(learn.non_native_mixed_precision.loss_scale,512)\n#Check the model did train\nfor v1,v2 in zip(learn.recorder.values[0], learn.recorder.values[-1]): assert v2<v1\n\n#|export\n@patch\n@delegates(NonNativeMixedPrecision.__init__)\ndef to_non_native_fp16(self:Learner, **kwargs): return self.add_cbs([ModelToHalf(), NonNativeMixedPrecision(**kwargs)])\n\n#cuda\nlearn = synth_learner(cuda=True)\nlearn.model = nn.Sequential(nn.Linear(1,1), nn.Linear(1,1)).cuda()\nlearn.opt_func = partial(SGD, mom=0.)\nlearn.splitter = lambda m: [list(m[0].parameters()), list(m[1].parameters())]\nlearn.to_non_native_fp16()\nlearn.fit(3, cbs=[TestAfterMixedPrecision(), TestBeforeMixedPrecision()])\n#Check the model did train\nfor v1,v2 in zip(learn.recorder.values[0], learn.recorder.values[-1]): assert v2<v1\n\n#|hide\n#cuda\nlearn = synth_learner(cuda=True)\nlearn.model = nn.Sequential(nn.Linear(1,1), nn.Linear(1,1)).cuda()\nlearn.opt_func = partial(SGD, mom=0.9)\nlearn.splitter = lambda m: [list(m[0].parameters()), list(m[1].parameters())]\nlearn.to_non_native_fp16()\nlearn.freeze()\nlearn.create_opt()\ninit_ps = [p for pg in learn.opt.param_groups for p in pg]\nlearn.fit(3)\nfinal_ps = [p for pg in learn.opt.param_groups for p in pg]\nfor p1,p2 in zip(init_ps, final_ps): test_is(p1, p2)\n#First param groups has no state because not trained\ntest_eq([learn.opt.state[p] for p in learn.opt.param_lists[0]], [{}, {'do_wd': False}])\n#Second param groups has state \nfor p in learn.opt.param_lists[1]: assert 'grad_avg' in learn.opt.state[p]\n\n#|export\n@patch\ndef to_non_native_fp32(self: Learner): return self.remove_cbs([ModelToHalf, NonNativeMixedPrecision])\n\n#cuda\nlearn = learn.to_non_native_fp32()", "Export -", "#|hide\nfrom nbdev.export import *\nnotebook2script()" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
spacecowboy/article-annriskgroups-source
RPartVariables.ipynb
gpl-3.0
[ "RPartVariables\nThis script runs repeated cross-validation as a search for suitable parameter values for RPart.\nIt has been re-run for all data-sets and the plotted results for each were considered. It was determined\nthat the default parameters had the best overall performance in terms of medial survival time.", "# import stuffs\n%matplotlib inline\nimport numpy as np\nimport pandas as pd\nfrom pyplotthemes import get_savefig, classictheme as plt\nfrom lifelines.utils import k_fold_cross_validation\nplt.latex = True", "Load data", "from datasets import get_nwtco, get_colon, get_lung, get_pbc, get_flchain\n\n# Values of (trn, test)\ndatasets = {}\n\n# Add the data sets\nfor name, getter in zip([\"pbc\", \"lung\", \"colon\", \"nwtco\", \"flchain\"],\n [get_pbc, get_lung, get_colon, get_nwtco, get_flchain]):\n trn = getter(norm_in=True, norm_out=False, training=True)\n datasets[name] = trn\n \n cens = (trn.iloc[:, 1] == 0)\n censcount = np.sum(cens) / trn.shape[0]\n print(name, \"censed:\", censcount)", "Helper methods", "from pysurvival.rpart import RPartModel\nfrom stats import surv_area\nfrom lifelines.estimation import KaplanMeierFitter, median_survival_times\n\n\ndef score(T_actual, labels, E_actual):\n '''\n Return a score based on grouping\n '''\n scores = []\n labels = labels.ravel()\n for g in ['high', 'mid', 'low']:\n members = labels == g\n \n if np.sum(members) > 0:\n kmf = KaplanMeierFitter()\n kmf.fit(T_actual[members],\n E_actual[members],\n label='{}'.format(g))\n \n # Last survival time\n if np.sum(E_actual[members]) > 0:\n lasttime = np.max(T_actual[members][E_actual[members] == 1])\n else:\n lasttime = np.nan\n \n # End survival rate, median survival time, member count, last event\n subscore = (kmf.survival_function_.iloc[-1, 0],\n median_survival_times(kmf.survival_function_),\n np.sum(members),\n lasttime)\n else:\n # Rpart might fail in this respect\n subscore = (np.nan, np.nan, np.sum(members), np.nan)\n \n scores.append(subscore)\n return scores\n\n# Use for validation - should this be negative??\ndef high_median_time(T_actual, labels, E_actual):\n members = (labels == 'high').ravel()\n if np.sum(members) > 0:\n kmf = KaplanMeierFitter()\n kmf.fit(T_actual[members],\n E_actual[members])\n return median_survival_times(kmf.survival_function_)\n else:\n return np.nan\n \ndef area(T_actual, labels, E_actual):\n mem = (labels == 'high').ravel()\n if np.any(mem):\n high_area = surv_area(T_actual[mem], E_actual[mem])\n else:\n high_area = np.nan\n \n mem = (labels == 'low').ravel()\n if np.any(mem):\n low_area = surv_area(T_actual[mem], E_actual[mem])\n else:\n low_area = np.nan\n \n return [low_area, high_area]", "Default and possible values", "default_values = dict(highlim=0.1,\n lowlim=0.1,\n minsplit=20,\n minbucket=None,\n xval=3,\n cp=0.01)\npossible_values = dict(cp=[0.01, 0.05, 0.1],\n xval=[0, 3, 5, 7, 10],\n minsplit=[1, 5, 10, 20, 50])\n\n# Update values as we go along\n#try:\n# current_values\n#except NameError:\ncurrent_values = default_values.copy()\n\nprint(current_values)", "Winner is determined by looking at median survival time.", "def get_winning_value(values, repeat_results, current_val):\n winner = 0, 0\n for i, x in enumerate(values):\n mres = np.median(np.array(repeat_results)[:, i, :])\n # For stability\n if mres > winner[0] or (x == current_val and mres >= winner[0]):\n winner = mres, x\n return winner[1]", "Choose a data set", "d = datasets['colon']\ndurcol = d.columns[0]\neventcol = d.columns[1]", "Run comparison of parameters", "print(\"Starting values\")\nfor k, v in current_values.items():\n print(\" \", k, \"=\", v)\n\n# Repeat all variables\nn = 1score\nk = 3\nrepcount = 0\nstable = False\nwhile repcount < 4 and not stable:\n repcount += 1\n print(repcount)\n stable = True\n for key, values in sorted(possible_values.items()):\n print(key)\n models = []\n for x in values:\n kwargs = current_values.copy()\n kwargs[key] = x\n model = RPartModel(**kwargs)\n model.var_label = key\n model.var_value = x\n models.append(model)\n # Train and test\n repeat_results = []\n for rep in range(n):\n result = k_fold_cross_validation(models, d, durcol, eventcol, \n k=k, \n evaluation_measure=high_median_time, \n predictor='predict_classes')\n repeat_results.append(result)\n # See who won\n winval = get_winning_value(values, repeat_results, current_values[key])\n if winval != current_values[key]:\n stable = False\n print(key, current_values[key], \"->\", winval)\n current_values[key] = winval\n\nprint(\"\\nValues optimized after\", repcount, \"iterations\")\nfor k, v in current_values.items():\n print(\" \", k, \"=\", v)\n\n# Just print results from above\nprint(\"\\nValues optimized after\", repcount, \"iterations\")\nfor k, v in current_values.items():\n print(\" \", k, \"=\", v)", "Get some plottable results\nLet's see how the parameters perform", "#netcount = 6\nmodels = []\n# Try different epoch counts\nkey = 'minsplit'\nfor x in possible_values[key]:\n kwargs = current_values.copy()\n kwargs[key] = x\n e = RPartModel(**kwargs)\n e.var_label = key\n e.var_value = x\n models.append(e)\n\n\nn = 10\nk = 3\n# Repeated cross-validation\nrepeat_results = []\nfor rep in range(n):\n print(\"n =\", rep)\n # Training\n #result = k_fold_cross_validation(models, d, durcol, eventcol, k=k, evaluation_measure=logscore, predictor='get_log')\n # Validation\n result = k_fold_cross_validation(models, d, durcol, eventcol, k=k,\n evaluation_measure=area,\n predictor='predict_classes')\n repeat_results.append(result)\n#repeat_results", "Plot results", "def plot_score_multiple(repeat_results, models):\n boxes = []\n labels = []\n var_label = None\n \n for i, m in enumerate(models):\n labels.append(str(m.var_value))\n var_label = m.var_label\n vals = []\n for result in repeat_results:\n for kscore in result[i]:\n vals.append(kscore[1])\n boxes.append(vals)\n\n plt.figure()\n plt.boxplot(boxes, labels=labels, vert=False, colors=plt.colors[:len(models)])\n plt.ylabel(var_label)\n plt.title(\"Cross-validation: n={} k={}\".format(n, k))\n plt.xlabel(\"Something..\")\n #plt.gca().set_xscale('log')\n \nplot_score_multiple(repeat_results, models)\n\ndef plot_score(repeat_results, models):\n boxes = []\n labels = []\n var_label = None\n # Makes no sense for low here for many datasets...\n for i, m in enumerate(models):\n labels.append(str(m.var_value))\n var_label = m.var_label\n vals = []\n for result in repeat_results:\n vals.extend(result[i])\n boxes.append(vals)\n\n plt.figure()\n plt.boxplot(boxes, labels=labels, vert=False, colors=plt.colors[:len(models)])\n plt.ylabel(var_label)\n plt.title(\"Cross-validation: n={} k={}\".format(n, k))\n plt.xlabel(\"Median survival time (max={:.0f})\".format(d[durcol].max()))\n #plt.gca().set_xscale('log')\n \nplot_score(repeat_results, models)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
yw-fang/readingnotes
machine-learning/handson_scikitlearn_tf_2017/ch01-notebook.ipynb
apache-2.0
[ "Ch01\nWhat is machine learning\nThere are seveal different definitions of ML.\nFor me, my favouriate definition comes from Tom Mitchell, 1997 in a engineering-oriented way:\nA computer program is said to learn from experience E with respect to some task T and some performance \nmeasure P, if its performance on T, as measured by P, improves with experice E.\nWhy use machine learning\nMachine learning is great for:\ncomlex problem/fluctuating environments/large data/no good solution using traditionl method\nTypes of Machine Learning Systems\nWe can classify them in borad categories based on:\n\n\nwhether or not they are trained with human supervison (supervise, unsupervised, semisupervised, reinforcement)\n\n\nSupervised learning: traning data includins the desired solutions, called labels. 邮件的垃圾邮件分类是一个非常好的supervised learning的例子。在训练集中,每一个sample(邮件)都被标记为spam或者ham,这就是labels,而这个过程的最终结果就是让邮箱系统能够自己将邮件进行分类。\n另一个典型的任务是去预测一个目标数值,即regression问题。例如预测一辆车的价格:给定一些列的汽车features(mileage,age,brand等等),这些特征被称作 predictors。为了实现训练目标,我们需要一个包含许多案例的数据集,其中不仅包含了这些predictors,还有相对应的labels(这里,就是价格)。\n\n\n注意 attribute 和 feature 的区别:尽管很多人经常相互替换地使用这两个词。但是严格地说,在机器学习中,attribute 是一种data type,例如 Mileage。然而 feature 则可以根据不同上下文关系有不同的含义,但通常意义上来说,feature 是 attribute 加上它的 value,例如 Mileage = 15000.\n注意 feature 和 preditors 的区别:在我看来,predictors 一定属于feature,但是feature不一定就是predictor。只有当你把这个feature用来作为训练时,这个feature才叫做predictor\n\n\nUnsupervised learning: 没有labels的机器学习。\n\n\nSemisupervised learning: partially labeled traning data\n\n\nReinforcement learning: it is a very different beast (野兽). The learning system, called an agent in this context,\n can observe the environment, slect, and perform actions, and get rewards (or the nagative rewards, penalties). It must then learn by itself what is the best strategy, called a policy, to get the most reward over time. A policy defines what action the agent should choose when it is in a given situation.\n\n\nwhether or not they can incrementally on the fly (online versus batch learning)\n\n\nbatch learning: incapable of learning incrementally; offline learning\n\n\nonline learning: learning incarementally; Note tahat the who process is usually done offline (i.e., not on the live system), so online learning can be a confusing name. Think of it as incremental learning.\n\n\nwhether they work by simply comparing new data points to known data points, or instead detect patterns in the traning data and build a predictive model, much like scientists do (instance-based versus model-based learning).\n\n\nExample 1-1\nModel-based learning\nMake some preparations for the environment", "# To support both python 2 and python 3\nfrom __future__ import division, print_function, unicode_literals\n\n# Common imports\nimport numpy as np\nimport os\n\n# to make this notebook's output stable across runs\nnp.random.seed(42) # I don't understand this line very much!\n\n# To plot pretty figures\n%matplotlib inline\nimport matplotlib\nimport matplotlib.pyplot as plt\nplt.rcParams['axes.labelsize'] = 14\nplt.rcParams['xtick.labelsize'] = 12\nplt.rcParams['ytick.labelsize'] = 12\n\n# Where to save the figures\nPROJECT_ROOT_DIR = \".\"\nCHAPTER_ID = \"fundamentals\"\n\n# prepare a function to save figures\ndef save_fig(fig_id, tight_layout=True, dpi=300):\n path = os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID, fig_id + \".png\")\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path, format='png', dpi=dpi)\n\n# Ignore useless warnings (see SciPy issue #5998)\nimport warnings\nwarnings.filterwarnings(action=\"ignore\", module=\"scipy\", message=\"^internal gelsd\")", "Author's Note\nAuthor's note: This function just merges the OECD's life satisfaction data and the IMF's GDP per capita data. It's a bit too long and boring and it's not specific to Machine Learning, which is why I left it out of the book.\nPrepare a function which merges the data into DataFrame", "\n# Define a function\ndef prepare_country_stats(oecd_bli, gdp_per_capita):\n oecd_bli = oecd_bli[oecd_bli[\"INEQUALITY\"]==\"TOT\"]\n oecd_bli = oecd_bli.pivot(index=\"Country\", columns=\"Indicator\", values=\"Value\")\n gdp_per_capita.rename(columns={\"2015\": \"GDP per capita\"}, inplace=True)\n gdp_per_capita.set_index(\"Country\", inplace=True)\n full_country_stats = pd.merge(left=oecd_bli, right=gdp_per_capita,\n left_index=True, right_index=True)\n full_country_stats.sort_values(by=\"GDP per capita\", inplace=True)\n remove_indices = [0, 1, 6, 8, 33, 34, 35]\n keep_indices = list(set(range(36)) - set(remove_indices))\n return full_country_stats[[\"GDP per capita\", 'Life satisfaction']].iloc[keep_indices]", "I just tweaked the data files here to fetch the files in datasets/lifesat.\nPlot the data pints with scatters", "# Code example\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport sklearn.linear_model\nimport os\n\n# Load the data thousands : str, default None, 千分位分割符,如“,”或者“.\"\n\ndatapath = os.path.join(\"datasets\", \"lifesat\", \"\")\noecd_bli = pd.read_csv(datapath + \"oecd_bli_2015.csv\", thousands=',')\ngdp_per_capita = pd.read_csv(datapath + \"gdp_per_capita.csv\",thousands=',',\n delimiter='\\t',\n encoding='latin1', na_values=\"n/a\")\n\n# Prepare the data\ncountry_stats = prepare_country_stats(oecd_bli, gdp_per_capita)\n# print(type(country_stats)) # country_stats is a DataFrame.\n# print(country_stats.head(10))\nxdata = np.c_[country_stats[\"GDP per capita\"]]\n# print(xdata[:10]) # xdata and ydata are python lists.\nydata = np.c_[country_stats[\"Life satisfaction\"]]\n# print(ydata[:10])\n\n# Visualize the data by scatter\ncountry_stats.plot(kind='scatter', s=80, color='red',\n x=\"GDP per capita\", y = \"Life satisfaction\")\n\n# Select a linear model\nmodel = sklearn.linear_model.LinearRegression()\n\n# Train the model\nmodel.fit(xdata, ydata)\n# Get the optimized paraters for the model\nk, b = model.intercept_[0], model.coef_[0][0]\nprint(k, b)", "Plot the best fit", "country_stats.plot(kind='scatter', s=80, color='red',\n x=\"GDP per capita\", y = \"Life satisfaction\")\n\n# plot the best fit, k, b can be found in the output of previous cell\nb = 4.853052800266436\nk = 4.911544589158484E-5\nx_tmp = np.linspace(0, 60000, 1000)\nplt.plot(x_tmp, b + k*x_tmp, \"blue\")\n# print(x_tmp)", "Predict Life satisfaction for a new instance", "# Make a prediction for Cyprus\nx_new = [[22587]] # Cyprus's GDP per capita\nprint(\"Life satisfaction of Cyprus is\", model.predict(x_new))\n\nLife satisfaction", "Apply instance-based learning into the Example1-1\nIn the following cell, I use $K$-Nearest Neighbours regression model to train the data. It's a widely used instance-based learning algorithm.\nIn this example, I will use $K$ = 3.", "# Select a linear model\n# from sklearn.neighbors import NearestNeighbors\nfrom sklearn.neighbors import KNeighborsRegressor\nclf = KNeighborsRegressor(n_neighbors=3)\n\n# Train the model\nclf.fit(xdata, ydata)\n\n# Make a prediction for Cyprus\nx_new = [[22587]] # Cyprus's GDP per capita\nprint(clf.predict(x_new))\n", "If it (instance-based learning) goes well, our model will make good predictions, however, if it doesn't work well, we need to use more attributes like employment rate, healty, air poluttion, etc, in other words, we need get more data in good quality, or perhaps select a more powerful model like Polynominal Regression model.\nIn Summary, a machine learning project usually look like\n\n\nWe stuided the data\n\n\nWe slected a model\n\n\nWe trained it on the training data (i.e. the learning algorithm searched for the model parameter values that minimize a cost function).\n\n\nFinnaly, we applied the model to make predictions on new cases (this is called $inference$), hoping that this model will generalize well.\n\n\nThe challenges of Machine Learning\nThe things may make your machine learning project into a mess is 'bad algorithm' and 'bad data'.\nWe usually need achive a tradeoff between the data development and spending money and time on algorithm development.\nData part\nInsufficient quality of training data\nNonrepresentative training data\nCheck whether your dataset has sampling bias/sampling noise or not. \nPoor-quality data\nIt's often well worth the effort to spend time cleaning up the training data. The truth is most data scientists spend a significant part of their time doing just that. For example:\n\n\nIf some instances are clearly outliers, it may help to simply discard them or try to fix the errors manually\n\n\nIf some instances are missing a few features (e.g., 50% of the customers did not specify their age), you msut decide whether you want to ignore this attibute altogether, ignore these instanxes, fill in the missing values (e.g., wthe the median age), or train one model with the feature and one model without it, and so on)\n\n\nIrrelevant Features\nincluding too many irrelevant features will influce machine learning results. Hence, feature engineering is quite important, and it usually involves\n\n\nFeature selection: choose the most useful features to train on aming existing features\n\n\nFeature extraction: combining exsisting featuers to produce a more useful one (dimensionality reduction algorithms can help)\n\n\nCreating new features by gathering new data\n\n\nAlgorithm part\nOverfitting the traning data\nOverfitting: model performs well on the training data, but does not generalize well. This usually happens when the model is highly complex with respect to the amount and noisiness of the training data.\nTo solve this problem, we have some solutions that we can try:\n\n\nTo simplify the model \n\n\nTo get more good data\n\n\nTo reduce the noise in the exsiting training data (fix or remove some bad data)\n\n\nConstraining a model to make it simpler and reduce the risk of overfitting: regularization.\nTo simplify a model, we can use other models instead of it, or we can simplify it by constraining it. The latter (constraining model) is called regularization\nThe amount of regularization to apply during learning can be controlled by a hyperparameter which is set before training and will not be influed by the learning algorithm.\nUnderfitting the traning data\nUnderfitting is the opposite of overfitting: it occurs when your model is too simple to learn the underlying structure of the data.\nThe iptions to fix underfitting:\n\n\nselect a more powerful model\n\n\nfeeding beter features\n\n\nreducing the constraints on the model (e.g. reducng the regularization hyperparameter)\n\n\nStepping back\nLet us step back and look at the big picture:\nIn a ML project, we feed the training set to a learning algorithm. If the algorithm is model-based it tunes some parameters to fit the model to the training set, and we will hope it be able to make good predictions on new cases as well. If the algorithm is instance-based, it just learns the examples by heart and uses a similarity measure to generalize to new instances.\nTesting and validating\nWe train the model using the training set and test the model using test set. The error rate on new cases is called the 'generalization error' or 'out-of-sample error', and by evaluating our model on the test set, we can estimate the error. This value indicate how well our model will perform on new instances.\nIn general, 80% of the data for training, and hold out 20% for testing.\nHowever, sometimes we may find that even the generalization error in the test set is quite low, it still performs bad in the new instances out of the dataset, the solution is to have a second holdout set called the 'validation set'. \nTo avoid 'wasting' too much training data in validation sets, a common technique is to use cross-validation: the training set is split into complementary subsets, and each model is trained against a different combination of these subsets and validated against the ramaining parts. Once the model type and hyperparameters have been selected, a final model is trained using these hyperparameters on the full training set, and the generalized error is measured on the test set.\n-------------No Free Lunch Theorem-------------------\nA model is a simplied version of the observations. In the famous paper 'The lack of a priori distinctions between learning algorithms' by D. Wolperts in 1996, it demonsted that if you make absolutedly no assumption about the data, then there is no reason to prefer one model over any other. This is called the 'No Free Lunch Theorem'.", "10**9.5", "Example code of using Regularization to reduce the risk of overfitting", "%matplotlib inline\nimport matplotlib.style\nimport matplotlib as mpl\nmpl.style.use('classic')\nmpl.rcParams['figure.facecolor'] = '1'\n#if choose the grey backgroud, use 0.75\nmpl.rcParams['figure.figsize'] = [10,8/1.618]\nmpl.rcParams['lines.linewidth'] = 2.2\nmpl.rcParams['legend.fancybox'] = True\nmpl.rcParams['legend.fontsize'] = 19\nmpl.rcParams['legend.scatterpoints'] = 1 #scatterpoints,\nmpl.rcParams[\"axes.formatter.useoffset\"]=False #turn off the axis offset-values. \n# If on. the axis label will use a offset value by the side of axis\nmpl.rcParams[\"axes.linewidth\"] = 2.5 #change the boarder width\n#plt.rcParams[\"axes.edgecolor\"] = \"0.15\" #change the boarder color\nticklabel_size = 22\nmpl.rcParams['xtick.labelsize'] = ticklabel_size\nmpl.rcParams['ytick.labelsize'] = ticklabel_size\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame\nimport sklearn\n\n\n\n########################Load data########################\n# Define a function used for merging data\ndef prepare_country_stats(oecd_bli, gdp_per_capita):\n oecd_bli = oecd_bli[oecd_bli[\"INEQUALITY\"]==\"TOT\"]\n oecd_bli = oecd_bli.pivot(index=\"Country\", columns=\"Indicator\", values=\"Value\")\n gdp_per_capita.rename(columns={\"2015\": \"GDP per capita\"}, inplace=True)\n gdp_per_capita.set_index(\"Country\", inplace=True)\n full_country_stats = pd.merge(left=oecd_bli, right=gdp_per_capita,\n left_index=True, right_index=True)\n full_country_stats.sort_values(by=\"GDP per capita\", inplace=True)\n return full_country_stats[[\"GDP per capita\", 'Life satisfaction']]\n# remove_indices = [0, 1, 6, 8, 33, 34, 35]\n# keep_indices = list(set(range(36)) - set(remove_indices))\n# return full_country_stats[[\"GDP per capita\", 'Life satisfaction']].iloc[keep_indices]\n\nremove_indices = [0, 1, 6, 8, 33, 34, 35]\nkeep_indices = list(set(range(36)) - set(remove_indices))\n\ndatapath = os.path.join(\"datasets\", \"lifesat\", \"\")\noecd_bli = pd.read_csv(datapath + \"oecd_bli_2015.csv\", thousands=',')\ngdp_per_capita = pd.read_csv(datapath + \"gdp_per_capita.csv\",thousands=',',\n delimiter='\\t',\n encoding='latin1', na_values=\"n/a\")\n\n# Prepare the data\nprint(keep_indices)\nfull_data = prepare_country_stats(oecd_bli, gdp_per_capita)\n\n# devide the data into two part. I will compare the different learning by using partial data and all data\nsample_data = full_data.iloc[keep_indices]\nsupplementary_data = full_data.iloc[remove_indices]\nprint(type(sample_data))\n\n\n#################training#########################\n## Method 1: use sample_data for training, use LinearRegression algorithm\n#choose data\nxdata_sample = np.c_[sample_data['GDP per capita']]\nydata_sample = np.c_[sample_data['Life satisfaction']]\n#choose model\nlinear_model1 = sklearn.linear_model.LinearRegression()\n# train the model\nlinear_model1.fit(xdata_sample, ydata_sample)\n# Get the optimized paraters for the model\nb1, k1 = linear_model1.intercept_[0], linear_model1.coef_[0][0]\nprint(k1,b1)\n\n## Method 2: use full_data for training, use LinearRegression algorithm\n# choose data\nxdata_full = np.c_[full_data['GDP per capita']]\nydata_full = np.c_[full_data['Life satisfaction']]\n# choose model, use the same model as method 1\nlinear_model2 = sklearn.linear_model.LinearRegression()\n# train the model\nlinear_model2.fit(xdata_full, ydata_full)\n# Get the optimized paraters for the model\nb2, k2 = linear_model2.intercept_[0], linear_model2.coef_[0][0]\nprint(k2,b2)\n\n\n## Method 3: use sample_data for training, perform \"Regularization\" to reduce the risk of overfitting\n# choose data, here, I use the data as Method 1\n# choose model, use the Ridge regularization method\nridge_model = sklearn.linear_model.Ridge(alpha=10**9.5) \n# train the model\nridge_model.fit(xdata_sample, ydata_sample)\nb3, k3 = ridge_model.intercept_[0], ridge_model.coef_[0][0]\n\n## Method 3-1:\nridge_model_1 = sklearn.linear_model.Ridge(alpha=20**9.5) \nridge_model_1.fit(xdata_sample, ydata_sample)\nb4, k4 = ridge_model_1.intercept_[0], ridge_model_1.coef_[0][0]\n\n#################Ploting part#####################\nfig1 = plt.figure()\nax1=fig1.add_subplot(1,1,1)\n\n# plot all the data using scatters\nax1.plot(sample_data['GDP per capita'], sample_data['Life satisfaction'], 'o',color='red',markersize=15,\n label='sample dataset')\nax1.plot(supplementary_data['GDP per capita'], supplementary_data['Life satisfaction'], '<', color='blue',\n markersize=15, label='supplementary dataset')\n\n\n\n# plot the fitting by Method 1\n# XX = np.linspace(0, 120000, 1000)\nXX = np.linspace(0, 110000, 1000)\nax1.plot(XX, k1*XX+b1, \"b-\", label='Linear model on sample data')\n\n# plot the fitting by Method 2\nax1.plot(XX, k2*XX+b2, \"b--\", label='Linear model on all the data')\n\n# plot the fitting by Method 3: apply the L2 regularization\nax1.plot(XX, k3*XX+b3, \"-\", color='orange', label=\"Regularization on sample data\")\n\n# plot the fitting by Method 3-1: apply the L2 regularization, but with a very large value of alpha\nax1.plot(XX, k4*XX+b4, \"--\", color='orange', label=r\"Regularization on sample data with laerger $\\alpha$\")\n\n\nax1.legend(loc='best')\nplt.show()", "Regularization method to the partial data (sample_data) is more generalized than the\noriginal linear model on the same data. We can find that it reduces the overfitting.\nHowever, if we choose a too large $\\alpha$ (i.e., 20**9.5), it leads to underfitting.\nHence, a good choice of $\\alpha$ is quite important. For more details, please read the section of\n\"L1 and L2 Regularization Methods\" in \nch01-appendix.ipynb" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
neoscreenager/JupyterNotebookWhirlwindTourOfPython
whirlwind1.ipynb
gpl-3.0
[ "I = iter([2, 4, 6, 8, 10])\n\nprint next(I)\n\nprint next(I)\n\n\nprint next(I)", "bookmark : http://nbviewer.jupyter.org/github/jakevdp/WhirlwindTourOfPython/blob/master/10-Iterators.ipynb", "L = [2, 4, 6, 8, 10] #use enumerate to get both index and value of a list\nfor i, val in enumerate(L):\n print(i, val)", "zip\nOther times, you may have multiple lists that you want to iterate over simultaneously. You could certainly iterate over the index as in the non-Pythonic example we looked at previously, but it is better to use the zip iterator, which zips together iterables:", "L = [2, 4, 6, 8, 10]\nR = [3, 6, 9, 12, 15]\nfor lval, rval in zip(L, R):\n print(lval, rval)", "map and filter\nThe map iterator takes a function and applies it to the values in an iterator:", "\n\n# find the first 10 square numbers\nsquare = lambda x: x ** 2\nfor val in map(square, range(10)):\n print(val)\n\n", "The filter iterator looks similar, except it only passes-through values for which the filter function evaluates to True:", "\n\n# find values up to 10 for which x % 2 is zero\nis_even = lambda x: x % 2 == 0\nfor val in filter(is_even, range(10)):\n print(val)\n\n", "Yields and generators need to revisit to grasp the concept fully. To access the chapter, use this link:\nhttp://nbviewer.jupyter.org/github/jakevdp/WhirlwindTourOfPython/blob/master/12-Generators.ipynb", "Next jumping to data science tool " ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
beangoben/HistoriaDatos_Higgs
Dia1/3_Intro a Matplotlib-Vacio.ipynb
gpl-2.0
[ "Intro a Matplotlib\nMatplotlib = Libreria para graficas cosas matematicas\nQue es Matplotlib?\n\nMatplotlin es un libreria para crear imagenes 2D de manera facil.\nChecate mas en :\n\nPagina oficial : http://matplotlib.org/\nGalleria de ejemplo: http://matplotlib.org/gallery.html\nUna libreria mas avanzada que usa matplotlib, Seaborn: http://stanford.edu/~mwaskom/software/seaborn/\nLibreria de visualizacion interactiva: http://bokeh.pydata.org/\nBuenisimo Tutorial: http://www.labri.fr/perso/nrougier/teaching/matplotlib/\n\nPara usar matplotlib, solo tiene que importar el modulo ..tambien te conviene importar numpy pues es muy util", "import numpy as np # modulo de computo numerico\nimport matplotlib.pyplot as plt # modulo de graficas\nimport pandas as pd # modulo de datos\n# esta linea hace que las graficas salgan en el notebook\n%matplotlib inline", "Crear graficas (plot)\nCrear graficas es muy facil en matplotlib, si tienes una lista de valores X y otra y..solo basta usar :\nPodemos usar la funcion np.linspace para crear valores en un rango, por ejemplo si queremos 100 numeros entre 0 y 10 usamos:\nY podemos graficar dos cosas al mismo tiempo:\nQue tal si queremos distinguir cada linea? Pues usamos legend(), de leyenda..tambien tenemos que agregarles nombres a cada plot\nTambien podemos hacer mas cosas, como dibujar solamente los puntos, o las lineas con los puntos usando linestyle:\nDibujando puntos (scatter)\nAveces no queremos dibujar lineas, sino puntos, esto nos da informacion de donde se encuentras datos de manera espacial. Para esto podemos usarlo de la siguiente manera:\nPero ademas podemos meter mas informacion, por ejemplo dar colores cada punto, o darle tamanos diferentes:\nHistogramas (hist)\nLos histogramas nos muestran distribuciones de datos, la forma de los datos, nos muestran el numero de datos de diferentes tipos:\notro tipo de datos, tomados de una campana de gauss, es decir una distribucion normal:\nBases de datos en el internet\nAveces los datos que queremos se encuentran en el internet. Asumiendo que se encuentran ordenados y en un formato amigable siempre los podemos bajar y guardar como un DataFrame.\nPor ejemplo:\nGapminder es una pagina con mas de 500 conjunto de daatos relacionado a indicadores globales como ingresos, producto interno bruto (PIB=GDP) y esperanza de vida.\nAqui bajamos la base de datos de esperanza de vida, lo guardamos en memoria y lo lodeamos como un excel:\nOjo! Aqui usamos .head() para imprimir los primeros 5 renglones del dataframe pues son gigantescos los datos.", "xurl=\"http://spreadsheets.google.com/pub?key=phAwcNAVuyj2tPLxKvvnNPA&output=xls\"\ndf=pd.read_excel(xurl)\nprint(\"Tamano completo es %s\"%str(df.shape))\ndf.head()", "Arreglando los Datos\nHead nos permite darle un vistazo a los datos... asi a puro ojo vemos que las columnas son anios y los renglones los paises...ponder reversar esto con transpose, pero tambien vemos que esta con indices enumerados, prefeririamos que los indices fueran los paises, entonces los cambiamos y tiramos la columna que ya no sirve...al final un head para ver que todo esta bien... a este juego de limpiar y arreglar datos se llama \"Data Wrangling\"", "df = df.rename(columns={'Life expectancy with projections. Yellow is IHME': 'Life expectancy'})\ndf.index=df['Life expectancy']\ndf=df.drop('Life expectancy',axis=1)\ndf=df.transpose()\ndf.head()", "Entonces ahora podemos ver la calidad de vida en Mexico atravez del tiempo:", "df['Mexico'].plot()\nprint(\"== Esperanza de Vida en Mexico ==\")", "de esta visualizacion vemos que la caldiad ha ido subiendo apartir de 1900, ademas vemos mucho movimiento entre 1890 y 1950, justo cuando habia muchas guerras en Mexico.\nTambien podemos seleccionar un rango selecto de años, vemos que este rango es interesante entonces", "subdf=df[ df.index >= 1890 ]\nsubdf=subdf[ subdf.index <= 1955 ]\nsubdf['Mexico'].plot()\nplt.title(\"Esperanza de Vida en Mexico entre 1890 y 1955\")\nplt.show()", "o sin tanto rollo, podemos restringuir el rango de nuestra grafica con xlim (los limites del eje X)", "df['Mexico'].plot()\nplt.xlim(1890,1955)\nplt.title(\"Esperanza de Vida en Mexico entre 1890 y 1955\")\nplt.show()", "Tambien es importante ver como esto se compara con otros paises, podemos comparar con todo Norteamerica:", "df[['Mexico','United States','Canada']].plot()\nplt.title(\"Esperanza de Vida en Norte-America\")\nplt.show()", "Ejercicios:\n\nCompara la esperanza de vida en Latino America (o al menos algunos paises de ella).\nSolo grafica los años entre 1900 y 2000, tambien 2000-2014.\nQuita los paises que tienen valores 'Nan', checa la funcion .dropna().\nSaca estadisticas para paises Latino Americanos.\nLo mismo de arriba para diferentes periodos 1800-1900, 1900-2000, 2000-2014" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
cyucheng/skimr
jupyter/2_Data_Exploration.ipynb
bsd-3-clause
[ "Data Exploration\nPerform exploratory data analysis on highlights and fulltexts of articles from Medium.com\nBefore trying to train a model, I performed exploratory analysis to visualize features of sentences from the corpus that I had scraped from Medium.com.", "import os, time, re, pickle\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom datetime import timedelta, date\nimport urllib\nimport html5lib\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom bs4 import BeautifulSoup, SoupStrainer\n", "NOTES\n\n\nFIND DUPLICATES\n\n\nTo calculate:\n\n\nLength of highlight\n\n\nFind highlight in fulltext (calculate when it starts, as percentage) -- break fulltext and highlight into words?\n\n\nPercent length of highlight in fulltext\n\n\nTF/IDF\n\n\nGrade level of fulltext and highlight\n\n\nCHECK LENGTH OF HIGHLIGHTS", "with open('/Users/clarencecheng/Dropbox/~Insight/skimr/datasets/highlights_20170606_00-54-27.txt','r') as fhigh:\n hlist = fhigh.readlines()\n \nmatword = []\nmatchar = []\nid_hl = []\nid_no = []\nct_hl = 0\nct_no = 0\nhtext = []\n\nfor line in hlist:\n \n # Analyze length of highlights and number of lines with highlights\n line = line.strip().split('\\t')\n if len(line) <= 2:\n id_no.append(line[1])\n ct_no += 1\n else:\n lenword = len(line[2].split())\n lenchar = len(line[2])\n # print( line )\n # print( length )\n matword.append(lenword)\n matchar.append(lenchar)\n id_hl.append(line[1])\n htext.append(line[2])\n ct_hl += 1\n \nprint( 'articles without highlights: '+str(ct_no) )\nprint( 'articles with highlights: '+str(ct_hl) )\nprint( htext[:10] )\nprint( id_hl[:10] )\n\nplt.hist(matword, bins=50)\nplt.title(\"Highlights, 20170606_00-54-27\")\nplt.xlabel(\"Number of words\")\nplt.ylabel(\"Frequency\")\nplt.show()\n\nplt.hist(matchar, bins=50)\nplt.title(\"Highlights, 20170606_00-54-27\")\nplt.xlabel(\"Number of letters\")\nplt.ylabel(\"Frequency\")\nplt.show()\n", "CHECK FOR DUPLICATES\nBecause I scraped 30 popular articles per day with only 5 days between, some articles might be duplicates. Here, check for duplicates in the dataset.", "seen = set()\nseen_add = seen.add\n\nhtext_uniq = []\nid_hl_uniq = [] # store ids of first unique highlights\nid_hl_nonu = [] # store ids of non-unique highlights\nidnum = 0\n\nfor x in htext: # get unique highlights, preserving order\n if x in seen:\n id_hl_nonu.append(id_hl[idnum])\n idnum += 1\n continue\n seen_add(x)\n htext_uniq.append(x)\n id_hl_uniq.append(id_hl[idnum])\n idnum += 1\n\nprint(id_hl_uniq[49]) # check that id_hl_uniq matches htext_uniq -- it does!!\nprint(htext_uniq[49])\ncounts = []\ncounts2 = []\nfor x in htext_uniq:\n counts.append(htext_uniq.count(x))\n counts2.append(htext.count(x))\nplt.hist(counts)\nplt.show()\nplt.hist(counts2)\nplt.show()\n\nprint('number of unique highlights: '+str(len(id_hl_uniq)))\nprint('number of non-unique highlights: '+str(len(id_hl_nonu)))\n", "RE-CHECK LENGTH OF HIGHLIGHTS AFTER REMOVING DUPLICATES", "matword_uniq = []\nmatchar_uniq = []\nidnum = 0\nfor line in htext_uniq:\n# print(id_hl_uniq[idnum]+'\\t'+line)\n# print(htext_uniq)\n lenword = len(line.split())\n matword_uniq.append(lenword)\n lenchar = len(line)\n matchar_uniq.append(lenchar)\n idnum += 1\n \nplt.hist(matword_uniq, bins=50)\nplt.title(\"Highlights (Unique), 20170606_00-54-27\")\nplt.xlabel(\"Number of words\")\nplt.ylabel(\"Frequency\")\nplt.show()\n\nplt.hist(matchar_uniq, bins=50)\nplt.title(\"Highlights (Unique), 20170606_00-54-27\")\nplt.xlabel(\"Number of letters\")\nplt.ylabel(\"Frequency\")\nplt.show()\n", "CLEAN UP FULLTEXT\nGet full text of article from html.\n\nnote: fixed to avoid getting comments as well as main text", "fhtml = open('/Users/clarencecheng/Dropbox/~Insight/skimr/datasets/fullhtml_20170606_00-54-27_isolate.txt','r')\n\nid_ft = []\nfullt = []\n\n# num = 0\nfor line in fhtml:\n text = line.strip().split('\\t')\n fullh = text[2]\n fullt_line = []\n\n # testing SoupStrainer\n# # content = SoupStrainer('div',attrs={'data-source':'post_page'})\n# soup = BeautifulSoup(fullh,'lxml')#,parse_only=content)\n# txt0 = soup.find('div',attrs={'data-source':'post_page'})#class_='postArticle-content')\n# # print(txt0)\n# txt1 = txt0.find_all('p',class_='graf')\n# # print(txt1)\n\n soup = BeautifulSoup(fullh,'lxml') #,parse_only=content)\n txt0 = soup.find('div',attrs={'data-source':'post_page'}) #class_='postArticle-content')\n if not txt0:\n print('error! skipping '+text[1])\n continue\n txt1 = txt0.find_all('p',class_='graf')\n id_ft.append(text[1])\n\n for line in txt1:\n txt2 = re.sub('<[^>]+>', '', str(line) )\n fullt_line.append(txt2)\n# num+=1\n# if num == 10:\n# break\n\n# print(fullt_line)\n \n fullt.append( fullt_line )\n\nprint(id_ft[2])\nprint(fullt[2])\n\n \n# fhtmldelim.write('\\t'.join(fullh))\n# fhtmldelim.close()\n\n", "Check that id_ft matches fullt matches id_hl_uniq matches htext_uniq -- all good!!", "print(id_ft[64])\nprint(fullt[64])\n", "CHECK LENGTH OF HIGHLIGHTS for second dataset\nAs mentioned before, I originally scraped articles with 10 days' spacing; now, scrape more articles with 10 days' spacing except offset by 5 days relative to original dates scraped.", "\nwith open('/Users/clarencecheng/Dropbox/~Insight/skimr/datasets/highlights_20170606_10-45-58.txt','r') as fhigh2:\n hlist2 = fhigh2.readlines()\n \nmatword2 = []\nmatchar2 = []\nid_hl2 = []\nid_no2 = []\nct_hl2 = 0\nct_no2 = 0\nhtext2 = []\n\nfor line in hlist2:\n \n # Analyze length of highlights and number of lines with highlights\n line = line.strip().split('\\t')\n if len(line) <= 2:\n id_no2.append(str(int(line[1])+2384))\n ct_no2 += 1\n else:\n lenword2 = len(line[2].split())\n lenchar2 = len(line[2])\n # print( line )\n # print( length )\n matword2.append(lenword2)\n matchar2.append(lenchar2)\n id_hl2.append(str(int(line[1])+2384))\n htext2.append(line[2])\n ct_hl2 += 1\n \nprint( 'articles without highlights: '+str(ct_no2) )\nprint( 'articles with highlights: '+str(ct_hl2) )\nprint( htext2[:10] )\nprint( id_hl2[:10] )\n\nplt.hist(matword2, bins=50)\nplt.title(\"Highlights, 20170606_10-45-58\")\nplt.xlabel(\"Number of words\")\nplt.ylabel(\"Frequency\")\nplt.show()\n\nplt.hist(matchar2, bins=50)\nplt.title(\"Highlights, 20170606_10-45-58\")\nplt.xlabel(\"Number of letters\")\nplt.ylabel(\"Frequency\")\nplt.show()\n", "CLEAN UP FULLTEXT for second dataset", "\nfhtml2 = open('/Users/clarencecheng/Dropbox/~Insight/skimr/datasets/fullhtml_20170606_10-45-58_edit_isolate.txt','r')\n\nid_ft2 = []\nfullt2 = []\n\n# num = 0\nfor line in fhtml2:\n text = line.strip().split('\\t')\n fullh = text[2]\n fullt_line = []\n\n# insetCol = SoupStrainer('div',{'class':'section-inner sectionLayout--insetColumn'})\n soup = BeautifulSoup(fullh,'lxml') #,parse_only=content)\n txt0 = soup.find('div',attrs={'data-source':'post_page'}) #class_='postArticle-content')\n if not txt0:\n print('error! skipping '+text[1])\n continue\n txt1 = txt0.find_all('p',class_='graf')\n id_ft2.append(str(int(text[1])+2384))\n\n# soup = BeautifulSoup(fullh,'lxml')#,parse_only=insetCol)\n# txt0 = soup.find('div',class_='section-inner sectionLayout--insetColumn')\n# txt1 = txt0.find_all('p',class_='graf')\n\n for line in txt1:\n txt2 = re.sub('<[^>]+>', '', str(line) )\n fullt_line.append(txt2)\n\n# print(fullt_line)\n fullt2.append( fullt_line )\n# num+=1\n# if num == 22:\n# break\n\nprint(id_ft2[9])\nprint(fullt2[9])\nprint(id_ft2)\nprint(str(len(id_ft2)))\nprint(str(len(fullt2)))\n\n \n# fhtmldelim.write('\\t'.join(fullh))\n# fhtmldelim.close()\n", "COMBINE DATASETS 1 AND 2", "id_ft_all = id_ft + id_ft2\nfullt_all = fullt + fullt2\n\nid_hl_all = id_hl + id_hl2\nhtext_all = htext + htext2\n\nprint(str(len(id_ft)))\nprint(str(len(id_ft2)))\nprint(str(len(id_ft_all)))\nprint(str(len(fullt)))\nprint(str(len(fullt2)))\nprint(str(len(fullt_all)))\n\nprint(str(len(id_hl)))\nprint(str(len(id_hl2)))\nprint(str(len(id_hl_all)))\nprint(str(len(htext)))\nprint(str(len(htext2)))\nprint(str(len(htext_all)))\n\nprint(str(id_ft[-1]))", "CHECK FOR DUPLICATES AFTER COMBINING DATASETS", "\nseen = set()\nseen_add = seen.add\n\nhtext_unq = []\nid_hl_unq = [] # store ids of first unique highlights\nid_hl_non = [] # store ids of non-unique highlights\nidnum = 0\n\nfor x in htext_all: # get unique highlights, preserving order\n if x in seen:\n id_hl_non.append(id_hl_all[idnum])\n idnum += 1\n continue\n seen_add(x)\n htext_unq.append(x)\n id_hl_unq.append(id_hl_all[idnum])\n idnum += 1\n\nprint(id_hl_unq[49]) # check that id_hl_uniq matches htext_uniq -- it does!!\nprint(htext_unq[49])\ncounts = []\ncounts2 = []\nfor x in htext_unq:\n counts.append(htext_unq.count(x))\n counts2.append(htext_all.count(x))\nplt.hist(counts)\nplt.show()\nplt.hist(counts2)\nplt.show()\n\nprint('number of unique highlights: '+str(len(id_hl_unq)))\nprint('number of non-unique highlights: '+str(len(id_hl_non)))\n", "Create dictionaries to identify only articles containing a top highlight\n• combine highlights+ids in dict, fulltext+ids in dict; compare and pull out only fulltext with highlights", "\nkeys_fullt = id_ft_all\nvals_fullt = fullt_all\ndict_fullt = dict(zip(keys_fullt,vals_fullt))\n\nkeys_htext = id_hl_unq\nvals_htext = htext_unq\ndict_htext = dict(zip(keys_htext,vals_htext))\n\nkeysf = set(dict_fullt.keys())\nkeysh = set(dict_htext.keys())\nintersect = keysf & keysh\n# print(intersect)\n\nprint(len(keysf))\nprint(len(keysh))\nprint(len(intersect))\n\ninterx = list(map(int,intersect))\ninterx.sort()\ninterx = list(map(str,interx))\n# print(interx)\n\nkeys_h = []\nvals_h = []\nkeys_f = []\nvals_f = []\nfor i in interx:\n keys_h.append(i)\n vals_h.append(dict_htext[i])\n keys_f.append(i)\n vals_f.append(dict_fullt[i])\n\ndict_h = dict(zip(keys_h,vals_h))\ndict_f = dict(zip(keys_f,vals_f))\n\nprint(len(keys_h))\nprint(len(vals_h))\nprint(len(keys_f))\nprint(len(vals_f))\n\nkeys_h == keys_f # True\n\nvals_all = zip(vals_h, vals_f)\ndict_all = dict(zip(keys_h, vals_all))\n\ndict_all['2']\n", "Save dict_all with pickle", "# fdict_all = open('/Users/clarencecheng/Dropbox/~Insight/skimr/datasets/dict_all','wb')\n# pickle.dump(dict_all, fdict_all)\n", "retrieve with pickle", "dict_temp = pickle.load(open('/Users/clarencecheng/Dropbox/~Insight/skimr/dict_all','rb'))\n\n# dict_temp == dict_all", "Write to files", "file_high = open('/Users/clarencecheng/Dropbox/~Insight/skimr/datasets/final_highlights.txt','w')\nfile_full = open('/Users/clarencecheng/Dropbox/~Insight/skimr/datasets/final_fulltext.txt','w')\nfile_all = open('/Users/clarencecheng/Dropbox/~Insight/skimr/datasets/final_all.txt','w')\n\nfor i in interx:\n file_high.write(i+'\\t'+dict_h[i]+'\\n')\n file_full.write(i+'\\t'+'|'.join(dict_f[i])+'\\n')\n file_all.write(i+'\\t'+dict_h[i]+'\\t'+'|'.join(dict_f[i])+'\\n')\n\nfile_high.close()\nfile_full.close()\nfile_all.close()\n\n# DELETE HIGHLIGHTS FROM FULLTEXT SENTENCES\n\nn = 0\nsent_all = []\nfor i in data['ids']:\n full = str(' '.join(data['text'][n]))\n high = data['highlights'][n]\n fnoh = full.replace(high,' ')\n\n sent = tokenizer.tokenize(fnoh)\n for j in sent: \n sent_all.append(j) # collect all sentences from all full texts into one list\n\n n+=1\n\nprint(len(sent_all))", "Save all sentences with pickle and save dataframe with pickle", "fsent_all = open('/Users/clarencecheng/Dropbox/~Insight/skimr/datasets/sent_all','wb')\npickle.dump(sent_all, fsent_all)\n\n\n\n# data = pd.DataFrame({'ids':keys_h, 'highlights':vals_h, 'text':vals_f})\nfdata = open('/Users/clarencecheng/Dropbox/~Insight/skimr/datasets/data_pd','wb')\npickle.dump(data,fdata)\n\ndata_temp = pickle.load(open('/Users/clarencecheng/Dropbox/~Insight/skimr/datasets/data_pd','rb'))\ndata_temp == data", "REANALYSIS - 6/8/2017\nload dict_all from pickled file as dict_temp", "# print(dict_temp['2'])\nkeys = []\nvals_h = []\nvals_f = []\nfor key, val in dict_temp.items():\n keys.append(key)\n vals_h.append(val[0])\n vals_f.append(val[1])\ndata_tmp = pd.DataFrame({'ids':keys, 'highlights':vals_h, 'text':vals_f})\n\ndata_temp = pickle.load(open('/Users/clarencecheng/Dropbox/~Insight/skimr/datasets/data_pd','rb'))\ndata_tmp == data_temp\ndata_tmp.equals(data_temp)\n\n", "FIND HIGHLIGHT IN FULL TEXT (START AND END POSITION)\n\ncalculate fraction of way through document (in sentences) (maybe words later?) (why not characters?)\ncalculate fraction length (highlight / document) (in words and sentences)", "import nltk.data\n\n# # read dict_all into list\n# data = [(k,v) for k,v in dict_all.items()]\n# # d_high = data[1][0]\n# # print(data[1])\n\n# READ dict_all into pandas\nsent_join = []\n\n# CONVERT list of paragraphs in 'text' column into string containing all text\nfor i in data['text']:\n sent = str(' '.join(i))\n sent_join.append(str(sent))\n# print(sent) # '\\u200a' appears when not using print fxn\n# sent\n# print(sent_join[-1])\n# sent_join[-1]\n\n# BREAK text into sentences and find fraction of sentences into text that highlight appears\ntokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n\nn = 0\ncount_in = 0\ncount_out = 0\nlenfs = []\nposhs = []\nfracs = []\n# sent_all = []\nfor i in data['ids']:\n full = str(' '.join(data['text'][n]))\n sent = tokenizer.tokenize(full)\n# for j in sent: \n# sent_all.append(j) # collect all sentences from all full texts into one list\n lenf = len(sent)\n lenfs.append(lenf)\n high = tokenizer.tokenize(data['highlights'][n])\n try:\n posh = sent.index(high[0])\n poshs.append(posh)\n fracs.append(posh/lenf)\n# print('highlight pos: '+str(posh)+'\\tnumber of sent: '+str(lenf)+'\\tfraction: '+str(posh/lenf))\n count_in += 1\n except ValueError:\n# print('highlight not in sentence list!')\n count_out += 1\n pass\n n += 1\n# if n == 6:\n# break\n \nprint(count_in)\nprint(count_out)\n\n# print(len(sent_all))\n\n\n", "ALTERNATIVE:\n\nFind fraction of words into text that highlight appears, then...\nFind which sentence num that word belongs to and calc sentence fraction for highlight" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
kubeflow/code-intelligence
Issue_Embeddings/notebooks/Get-GitHub-Issues.ipynb
mit
[ "Fetch GitHub Issues and Compute Embeddings\n\nThis notebook downloads GitHub Issues and then computes the embeddings using a trained model\n\nissues_loader.ipynb is a very similar notebook\n\n\nThat notebook however just uses the IssuesLoader class as way of hard coding some paths.\n\n\nRunning this Notebook\n\nThis notebook was last run on [gcr.io/kubeflow-images-public/tensorflow-1.15.2-notebook-gpu:1.0.0]\nResource specs\nCPU 15\n\nRAM 32Gi\n\n\nIf kernel dies while computing embeddings it could be because you run out of memory\n\n\nCompute: This notebook was run on a p3.8xlarge on AWS\nTesla V100 GPU, 32 vCPUs 244GB of Memory", "import logging\nimport os\nfrom pathlib import Path\nimport sys\n\nlogging.basicConfig(format='%(message)s')\nlogging.getLogger().setLevel(logging.INFO)\n\nhome = str(Path.home())\n\n# Installing the python packages locally doesn't appear to have them automatically\n# added the path so we need to manually add the directory\nlocal_py_path = os.path.join(home, \".local/lib/python3.6/site-packages\")\n\nfor p in [local_py_path, os.path.abspath(\"../../py\")]:\n if p not in sys.path:\n logging.info(\"Adding %s to python path\", p)\n # Insert at front because we want to override any installed packages\n sys.path.insert(0, p)\n\n\n!pip3 install --user --upgrade -r ../requirements.txt\n\nfrom bs4 import BeautifulSoup\nimport requests\nfrom fastai.core import parallel, partial\n\nfrom collections import Counter\nfrom tqdm import tqdm_notebook\nimport torch\nfrom code_intelligence import embeddings\nfrom code_intelligence import graphql\nfrom code_intelligence import gcs_util\nfrom google.cloud import storage", "Get a list of Kubeflow REPOs\n\nYou will need to either set a GitHub token or use a GitHub App in order to call the API\nTODO(jlewi): This is no longer really necessary since we are using BigQuery now to fetch the data we can query by org", "if not os.getenv(\"GITHUB_TOKEN\"):\n logging.warning(f\"No GitHub token set defaulting to hardcode list of Kubeflow repositories\")\n \n # The list of repos can be updated using the else block\n repo_names = ['arena', 'batch-predict', 'caffe2-operator', 'chainer-operator', 'code-intelligence', 'common', 'community', 'crd-validation', 'example-seldon', 'examples', 'fairing', 'features', 'frontend', 'homebrew-cask', 'homebrew-core', 'internal-acls', 'katib', 'kfctl', 'kfp-tekton', 'kfserving', 'kubebench', 'kubeflow', 'manifests', 'marketing-materials', 'metadata', 'mpi-operator', 'mxnet-operator', 'pipelines', 'pytorch-operator', 'reporting', 'testing', 'tf-operator', 'triage-issues', 'website', 'xgboost-operator']\nelse:\n gh_client = graphql.GraphQLClient()\n \n repo_query=\"\"\"query repoQuery($org: String!) {\n organization(login: $org) {\n repositories(first:100) {\n totalCount \n edges {\n node {\n name\n }\n }\n }\n }\n }\n \"\"\"\n variables = {\n \"org\": \"kubeflow\",\n }\n results = gh_client.run_query(repo_query, variables)\n repo_nodes = graphql.unpack_and_split_nodes(results, [\"data\", \"organization\", \"repositories\", \"edges\"])\n repo_names = [n[\"name\"] for n in repo_nodes]\n\n \",\".join([f\"'{n}'\" for n in sorted(repo_names)])\n names_str = \", \".join([f\"'{n}'\" for n in sorted(repo_names)])\n print(f\"[{names_str}]\")", "Get The Data", "import pandas as pd\nfrom inference import InferenceWrapper", "Load Model Artifacts (Download from GC if not on local)\n\nWe need to load the model used to compute embeddings", "from pathlib import Path\nfrom urllib import request as request_url\n\ndef pass_through(x):\n return x\n\nmodel_url = 'https://storage.googleapis.com/issue_label_bot/model/lang_model/models_22zkdqlr/trained_model_22zkdqlr.hdf'\ninference_wrapper = embeddings.load_model_artifact(model_url)", "Warning: The below cell benefits tremendously from parallelism, the more cores your machine has the better\n\nThe code will fail if you aren't running with a GPU\n\nGet the Data Using BigQuery\n\nWe can use BigQuery to fetch the data from the GitHub Archive\nHere is a list of GitHub Event Types\nWe need to consider both IssuesEvent and IssueCommentEvent\nAt the time of this writing 2020/04/08 there are approximately 137K events in Kubeflow and it takes O(30) seconds to fetch all of them.\nTODO\nIt looks like when we transfer a repo or maybe an issue we end up with duplicate entries with diffferent URLs (original and new one). We should look into dedupping those", "from pandas.io import gbq\nimport subprocess \n# TODO(jlewi): Get the project using fairing?\nPROJECT = subprocess.check_output([\"gcloud\", \"config\", \"get-value\", \"project\"]).strip().decode()\n\n# TODO(jlewi): This code should now be a function in embeddings/github_bigquery.py\nquery = \"\"\"SELECT \n JSON_EXTRACT(payload, '$.issue.html_url') as html_url,\n JSON_EXTRACT(payload, '$.issue.title') as title,\n JSON_EXTRACT(payload, '$.issue.body') as body,\n JSON_EXTRACT(payload, \"$.issue.labels\") as labels,\n JSON_EXTRACT(payload, \"$.issue.updated_at\") as updated_at,\n org.login,\n type,\n FROM `githubarchive.month.20*`\n WHERE (type=\"IssuesEvent\" or type=\"IssueCommentEvent\") and org.login = 'kubeflow'\"\"\"\nissues_and_pulls=gbq.read_gbq(query, dialect='standard', project_id=PROJECT)", "Pull request comments also get included so we need to filter those out", "import re\npattern = re.compile(\".*issues/[\\d]+\")\nissues_index = issues_and_pulls[\"html_url\"].apply(lambda x: pattern.match(x) is not None)\nissues=issues_and_pulls[issues_index]", "We need to group the events by issue and then select the most recent event for each issue as that should have\n the most up to date labels for each issue\nTODO(jlewi): We should look for the most recent event in the dataset and then have some alert if the age exceeds some\n limit as that indicates the data isn't up to date.", "latest_issues = issues.groupby(\"html_url\", as_index=False).apply(lambda x: x.sort_values([\"updated_at\"]).iloc[-1])\n\n# Example of fetching a specific issue\n# This allows easy spot checking of the data\nsome_issue = \"https://github.com/kubeflow/kubeflow/issues/4916\"\ntest_issue = latest_issues.loc[latest_issues[\"html_url\"]==f'\"{some_issue}\"']\ntest_issue", "We need to parse the labels which are json and get the names", "import json\ndef get_labels(x):\n d = json.loads(x)\n return [i[\"name\"] for i in d]\n\nlatest_issues[\"parsed_labels\"] = latest_issues[\"labels\"].apply(get_labels)", "We need to deserialize the json strings to remove escaping", "for f in [\"html_url\", \"title\", \"body\"]:\n latest_issues[f] = latest_issues[f].apply(lambda x : json.loads(x))", "Compute Embeddings\n\nFor each repo compute the embeddings and save to GCS\nTODO(jlewi): Can we use the metadata storage to keep track of artifacts?", "input_data = latest_issues[[\"title\", \"body\"]]\n\nissue_embeddings = inference_wrapper.df_to_embedding(input_data)\n\nissue_embeddings.shape", "Sanity Check the embeddings\n\nWe want to make sure the embeddings are computed the same way as during inference time\nDuring inference IssueLabelerPredict.predict_labels_for_issue calls embeddings.get_issue_text to fetch the body and title\nWe call embeddings.get_issue_text one of the issues to make sure it matches the data in the dataframe from which we compute the embeddings\n\nThis calls the /text on the embeddings microservice\n\n\nTODO(https://github.com/kubeflow/code-intelligence/issues/126) The label bot microservice needs to be updated to actually\n use the GraphQL API to match this code. Hopefully, in the interim the model is robust to slight deviations caused\n by the differences in whitespace", "from code_intelligence import util as code_intelligence_util\n\nissue_index = 1020\nlogging.info(f\"Fetching issue {latest_issues.iloc[issue_index]['html_url']}\")\nissue_owner, issue_repo, issue_num = code_intelligence_util.parse_issue_url(latest_issues.iloc[issue_index][\"html_url\"].strip(\"\\\"\"))\n\nsome_issue_data = embeddings.get_issue(latest_issues.iloc[issue_index][\"html_url\"], gh_client)\n\nsome_issue_data\n\nprint(latest_issues.iloc[issue_index][\"title\"])\nprint(some_issue_data[\"title\"])\nprint(latest_issues.iloc[issue_index][\"body\"])\nprint(some_issue_data[\"body\"])\nsome_issue_data[\"title\"] == latest_issues.iloc[issue_index][\"title\"]\nsome_issue_data[\"body\"] == latest_issues.iloc[issue_index][\"body\"]", "Compare the embeddings computed in this notebook to the embeddings computed using inference_wrapper", "dict_for_embeddings = inference_wrapper.process_dict(some_issue_data)\n\ninference_wrapper.get_pooled_features(dict_for_embeddings['text']).detach().cpu().numpy()\n\nissue_embeddings[issue_index,:]", "Save the issues and embeddings to an HDF5 file", "import h5py\nimport datetime\n\nnow = code_intelligence_util.now().isoformat()\n\ngit_tag = subprocess.check_output([\"git\", \"describe\", \"--tags\", \"--always\", \"--dirty\"]).decode().strip()\nfile_name = f\"kubeflow_issue_embeddings_{now}.hdf5\"\nlocal_file = os.path.join(home, file_name)\n\nlatest_issues.to_hdf(local_file, \"issues\", mode=\"a\")\n\nh5_file = h5py.File(local_file, mode=\"a\")\n\nh5_file.create_dataset(\"issue_embeddings\", data=issue_embeddings)\n\n# store some metadata\nh5_file.attrs[\"file\"] = \"Get-GitHub-Issues.ipynb\"\nh5_file.attrs[\"git-tag\"] = git_tag \n\nh5_file.close()", "Save Embeddings to GCS", "embeddings_file = os.path.join(embeddings_dir, file_name)\nif gcs_util.check_gcs_object(embeddings_file):\n logging.info(f\"File {embeddings_file} exists\")\nelse: \n logging.info(f\"Copying {local_file} to {embeddings_file}\") \n gcs_util.copy_to_gcs(local_file, embeddings_file)\n\nembeddings_file", "Notes\nIt takes 4min to retrieve embeddings and labels for Kubeflow\\Kubeflow this time can likely be brought down to 1 minute by batching the text instead of feeding the language model one by one." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
fastai/fastai
nbs/38_tutorial.text.ipynb
apache-2.0
[ "#|hide\n#|skip\n! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab\n\nfrom fastai.text.all import *\n\n#|all_slow", "Transfer learning in text\n\nHow to fine-tune a language model and train a classifier\n\nIn this tutorial, we will see how we can train a model to classify text (here based on their sentiment). First we will see how to do this quickly in a few lines of code, then how to get state-of-the art results using the approach of the ULMFit paper.\nWe will use the IMDb dataset from the paper Learning Word Vectors for Sentiment Analysis, containing a few thousand movie reviews.\nTrain a text classifier from a pretrained model\nWe will try to train a classifier using a pretrained model, a bit like we do in the vision tutorial. To get our data ready, we will first use the high-level API:\nUsing the high-level API\nWe can download the data and decompress it with the following command:", "path = untar_data(URLs.IMDB)\npath.ls()\n\n(path/'train').ls()", "The data follows an ImageNet-style organization, in the train folder, we have two subfolders, pos and neg (for positive reviews and negative reviews). We can gather it by using the TextDataLoaders.from_folder method. The only thing we need to specify is the name of the validation folder, which is \"test\" (and not the default \"valid\").", "dls = TextDataLoaders.from_folder(untar_data(URLs.IMDB), valid='test')", "We can then have a look at the data with the show_batch method:", "dls.show_batch()", "We can see that the library automatically processed all the texts to split then in tokens, adding some special tokens like:\n\nxxbos to indicate the beginning of a text\nxxmaj to indicate the next word was capitalized\n\nThen, we can define a Learner suitable for text classification in one line:", "learn = text_classifier_learner(dls, AWD_LSTM, drop_mult=0.5, metrics=accuracy)", "We use the AWD LSTM architecture, drop_mult is a parameter that controls the magnitude of all dropouts in that model, and we use accuracy to track down how well we are doing. We can then fine-tune our pretrained model:", "learn.fine_tune(4, 1e-2)\n\nlearn.fine_tune(4, 1e-2)", "Not too bad! To see how well our model is doing, we can use the show_results method:", "learn.show_results()", "And we can predict on new texts quite easily:", "learn.predict(\"I really liked that movie!\")", "Here we can see the model has considered the review to be positive. The second part of the result is the index of \"pos\" in our data vocabulary and the last part is the probabilities attributed to each class (99.1% for \"pos\" and 0.9% for \"neg\"). \nNow it's your turn! Write your own mini movie review, or copy one from the Internet, and we can see what this model thinks about it. \nUsing the data block API\nWe can also use the data block API to get our data in a DataLoaders. This is a bit more advanced, so fell free to skip this part if you are not comfortable with learning new APIs just yet.\nA datablock is built by giving the fastai library a bunch of information:\n\nthe types used, through an argument called blocks: here we have images and categories, so we pass TextBlock and CategoryBlock. To inform the library our texts are files in a folder, we use the from_folder class method.\nhow to get the raw items, here our function get_text_files.\nhow to label those items, here with the parent folder.\nhow to split those items, here with the grandparent folder.", "imdb = DataBlock(blocks=(TextBlock.from_folder(path), CategoryBlock),\n get_items=get_text_files,\n get_y=parent_label,\n splitter=GrandparentSplitter(valid_name='test'))", "This only gives a blueprint on how to assemble the data. To actually create it, we need to use the dataloaders method:", "dls = imdb.dataloaders(path)", "The ULMFiT approach\nThe pretrained model we used in the previous section is called a language model. It was pretrained on Wikipedia on the task of guessing the next word, after reading all the words before. We got great results by directly fine-tuning this language model to a movie review classifier, but with one extra step, we can do even better: the Wikipedia English is slightly different from the IMDb English. So instead of jumping directly to the classifier, we could fine-tune our pretrained language model to the IMDb corpus and then use that as the base for our classifier.\nOne reason, of course, is that it is helpful to understand the foundations of the models that you are using. But there is another very practical reason, which is that you get even better results if you fine tune the (sequence-based) language model prior to fine tuning the classification model. For instance, in the IMDb sentiment analysis task, the dataset includes 50,000 additional movie reviews that do not have any positive or negative labels attached in the unsup folder. We can use all of these reviews to fine tune the pretrained language model — this will result in a language model that is particularly good at predicting the next word of a movie review. In contrast, the pretrained model was trained only on Wikipedia articles.\nThe whole process is summarized by this picture:\n\nFine-tuning a language model on IMDb\nWe can get our texts in a DataLoaders suitable for language modeling very easily:", "dls_lm = TextDataLoaders.from_folder(path, is_lm=True, valid_pct=0.1)", "We need to pass something for valid_pct otherwise this method will try to split the data by using the grandparent folder names. By passing valid_pct=0.1, we tell it to get a random 10% of those reviews for the validation set.\nWe can have a look at our data using show_batch. Here the task is to guess the next word, so we can see the targets have all shifted one word to the right.", "dls_lm.show_batch(max_n=5)", "Then we have a convenience method to directly grab a Learner from it, using the AWD_LSTM architecture like before. We use accuracy and perplexity as metrics (the later is the exponential of the loss) and we set a default weight decay of 0.1. to_fp16 puts the Learner in mixed precision, which is going to help speed up training on GPUs that have Tensor Cores.", "learn = language_model_learner(dls_lm, AWD_LSTM, metrics=[accuracy, Perplexity()], path=path, wd=0.1).to_fp16()", "By default, a pretrained Learner is in a frozen state, meaning that only the head of the model will train while the body stays frozen. We show you what is behind the fine_tune method here and use a fit_one_cycle method to fit the model:", "learn.fit_one_cycle(1, 1e-2)", "This model takes a while to train, so it's a good opportunity to talk about saving intermediary results. \nYou can easily save the state of your model like so:", "learn.save('1epoch')", "It will create a file in learn.path/models/ named \"1epoch.pth\". If you want to load your model on another machine after creating your Learner the same way, or resume training later, you can load the content of this file with:", "learn = learn.load('1epoch')", "We can them fine-tune the model after unfreezing:", "learn.unfreeze()\nlearn.fit_one_cycle(10, 1e-3)", "Once this is done, we save all of our model except the final layer that converts activations to probabilities of picking each token in our vocabulary. The model not including the final layer is called the encoder. We can save it with save_encoder:", "learn.save_encoder('finetuned')", "Jargon: Encoder: The model not including the task-specific final layer(s). It means much the same thing as body when applied to vision CNNs, but tends to be more used for NLP and generative models.\n\nBefore using this to fine-tune a classifier on the reviews, we can use our model to generate random reviews: since it's trained to guess what the next word of the sentence is, we can use it to write new reviews:", "TEXT = \"I liked this movie because\"\nN_WORDS = 40\nN_SENTENCES = 2\npreds = [learn.predict(TEXT, N_WORDS, temperature=0.75) \n for _ in range(N_SENTENCES)]\n\nprint(\"\\n\".join(preds))", "Training a text classifier\nWe can gather our data for text classification almost exactly like before:", "dls_clas = TextDataLoaders.from_folder(untar_data(URLs.IMDB), valid='test', text_vocab=dls_lm.vocab)", "The main difference is that we have to use the exact same vocabulary as when we were fine-tuning our language model, or the weights learned won't make any sense. We pass that vocabulary with text_vocab.\nThen we can define our text classifier like before:", "learn = text_classifier_learner(dls_clas, AWD_LSTM, drop_mult=0.5, metrics=accuracy)", "The difference is that before training it, we load the previous encoder:", "learn = learn.load_encoder('finetuned')", "The last step is to train with discriminative learning rates and gradual unfreezing. In computer vision, we often unfreeze the model all at once, but for NLP classifiers, we find that unfreezing a few layers at a time makes a real difference.", "learn.fit_one_cycle(1, 2e-2)", "In just one epoch we get the same result as our training in the first section, not too bad! We can pass -2 to freeze_to to freeze all except the last two parameter groups:", "learn.freeze_to(-2)\nlearn.fit_one_cycle(1, slice(1e-2/(2.6**4),1e-2))", "Then we can unfreeze a bit more, and continue training:", "learn.freeze_to(-3)\nlearn.fit_one_cycle(1, slice(5e-3/(2.6**4),5e-3))", "And finally, the whole model!", "learn.unfreeze()\nlearn.fit_one_cycle(2, slice(1e-3/(2.6**4),1e-3))" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
gear/motifwalk
notebooks/Random walk statistic.ipynb
mit
[ "Statistic of random walk-generated graph context\nIn my master studies, I am interested in complex networks and their building blocks - motifs. Inspired by the skipgram NLP model and the adaptation to network DeepWalk, I think that if we use network motifs as a biased random walk pattern, we can generate a more expressive graph context in term of mesoscopic structure emphasis.", "from sys import path\npath.append('./../src/') # import prototype modules\nfrom constrains import R, UTriangle, UWedge\nfrom walks import WalkGenerator\nimport networkx as nx\nimport numpy as np\nimport sklearn as sk\nfrom matplotlib import pyplot as plt\nimport pickle\nfrom collections import defaultdict as dd\nimport time\n%matplotlib inline\n\ndata_root = './../data/'", "Amazon co-purchase network", "amazon = pickle.load(open(data_root+'amazon.graph', 'rb'))\nassert isinstance(amazon, nx.Graph)\nassert amazon.size() == 925872", "Random walk context (identical to DeepWalk) is generated by WalkGenerator with different constrains.", "random_walker = WalkGenerator(graph=amazon, constrain=R())", "We want to create a random context, each walk length is 80, there is 20000 walks in total.", "def get_graph_context(random_walker, num_walk=20000, walk_length=80):\n \"\"\"Return randomly generated graph context\"\"\"\n t0 = t1 = time.time()\n random_context = [i[:] for i in random_walker._gen(num_walk, walk_length)]\n node_count = dd(int)\n for node_list in random_context:\n for i in node_list:\n node_count[i] += 1\n sorted_ids = sorted(node_count,\n key=lambda i: node_count[i],\n reverse=True)\n t1 = time.time()\n print(\"Time elapsed: {}\".format(t1-t0))\n return random_context, node_count, sorted_ids\n\namazon_random_context, amazon_random_node_count, amazon_sorted_ids = get_graph_context(random_walker)", "amazon_random_node_count contains 263385 unique nodes over 1,600,000 nodes in random walk. Maximum node id is 110284 with 622 occurences.", "def plot_freq_dist(shorted_ids, node_count):\n fig, (ax, ax_log) = plt.subplots(2,1)\n x = np.arange(0, len(node_count), 1, dtype=int)\n y = [node_count[shorted_ids[i]] for i in x]\n ax.plot(x, y)\n ax_log.loglog(x, y)\n plt.show()\n \nplot_freq_dist(amazon_sorted_ids, amazon_random_node_count)\n\ntriangle_walker = WalkGenerator(graph=amazon, constrain=UTriangle())\namazon_triangle_context, amazon_triangle_node_count, amazon_triangle_sorted_ids = get_graph_context(triangle_walker)\n\nplot_freq_dist(amazon_triangle_sorted_ids, amazon_triangle_node_count)\n\namazon_triangle_sorted_ids[:10]\n\namazon_sorted_ids[:10]", "Cora citation network", "cora = pickle.load(open(data_root+'cora.graph', 'rb'))", "First I will try to plot the same walk statistic on undirected cora graph.", "ucora = cora.to_undirected() # Convert to undirected graph\n\nrandom_ucora_walker = WalkGenerator(graph=ucora, constrain=R())\n\ncora_random_context, cora_random_node_count, cora_sorted_ids = get_graph_context(random_ucora_walker)\n\nplot_freq_dist(cora_sorted_ids, cora_random_node_count)\n\ntriangle_ucora_walker = WalkGenerator(graph=ucora, constrain=UTriangle())\n\ncora_triangle_context, cora_triangle_node_count, cora_triangle_sorted_ids = get_graph_context(triangle_ucora_walker)\n\nplot_freq_dist(cora_triangle_sorted_ids, cora_triangle_node_count)\n\nwedge_ucora_walker = WalkGenerator(graph=ucora, constrain=UWedge())\ncora_wedge_context, cora_wedge_node_count, cora_wedge_sorted_ids = get_graph_context(wedge_ucora_walker)\nplot_freq_dist(cora_wedge_sorted_ids, cora_wedge_node_count)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Jackporter415/phys202-2015-work
assignments/assignment03/NumpyEx02.ipynb
mit
[ "Numpy Exercise 2\nImports", "import numpy as np\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns", "Factorial\nWrite a function that computes the factorial of small numbers using np.arange and np.cumprod.", "def np_fact(n):\n \"\"\"Compute n! = n*(n-1)*...*1 using Numpy.\"\"\"\n #Creates array from 1 to n\n c = np.arange(1,n+1,1)\n #Returns a 1D array of the factorials of each number\n a = c.cumprod()\n #Settles the 0 and 1 case\n if n == 0 or n == 1:\n return 1\n #returns the last number in the array (The one we are looking for)\n else: \n return a[-1]\n\n\nassert np_fact(0)==1\nassert np_fact(1)==1\nassert np_fact(10)==3628800\nassert [np_fact(i) for i in range(0,11)]==[1,1,2,6,24,120,720,5040,40320,362880,3628800]", "Write a function that computes the factorial of small numbers using a Python loop.", "def loop_fact(n):\n \"\"\"Compute n! using a Python for loop.\"\"\"\n #Creates a list from 0 to n\n array = [0,n+1]\n #i is a counting variable, number is the placeholder\n i = 0\n number = 1\n #0 and 1 case\n if n == 0 or n == 1:\n return 1\n #while i is less than the number count up to the number and multiply it by the previous numbers (number)\n else:\n while i < n:\n i += 1\n number = number * i\n return number\n\n\n\nassert loop_fact(0)==1\nassert loop_fact(1)==1\nassert loop_fact(10)==3628800\nassert [loop_fact(i) for i in range(0,11)]==[1,1,2,6,24,120,720,5040,40320,362880,3628800]", "Use the %timeit magic to time both versions of this function for an argument of 50. The syntax for %timeit is:\npython\n%timeit -n1 -r1 function_to_time()", "%timeit -n1 -r1 np_fact(50)\n%timeit -n1 -r1 loop_fact(50)\n", "In the cell below, summarize your timing tests. Which version is faster? Why do you think that version is faster?\nloop_fact() is faster. This was the function that used python loops rather than numpy. This could be because the numpy function goes through an entire list of factorials while the python loop calculates one factorial in one loop." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
johnpfay/environ859
06_WebGIS/Notebooks/Bird-Demo-Reuben.ipynb
gpl-3.0
[ "Applied example of scraping the Handbook of Birds of the World to get a list of subspecies for a given bird species.", "#Import modules\nimport requests\nfrom bs4 import BeautifulSoup\n\n#Example URL\ntheURL = \"https://www.hbw.com/species/brown-wood-owl-strix-leptogrammica\"\n\n#Get content of the species web page\nresponse = requests.get(theURL)\n\n#Convert to a \"soup\" object, which BS4 is designed to work with\nsoup = BeautifulSoup(response.text,'lxml')", "Introspection of the source HTML of the species web page reveals that the sub-species listings fall within a section (div in HTML lingo) labeled \"&lt;div class=\"ds-ssp_comp&gt;\" in the HTML. So we'll search the 'soup' for this section, which returns a list of one object, then we extract that one object to a variable named subSection. \n\nhttps://www.crummy.com/software/BeautifulSoup/bs4/doc/#searching-by-css-class", "#Find all sections with the CSS class 'ds-ssp_comp' and get the first (only) item found\ndiv = soup.find_all('div',class_='ds-ssp_comp')\nsection = div[0]", "All the entries with the tag &lt;em&gt; are the subspecies entries.", "#Find all lines in the section with the tag 'em'\nsubSpecies = section.find_all('em')", "We can loop through each subspecies found and print its name", "#Extract to a variable\nfor subSpp in subSpecies:\n print (subSpp.get_text())" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
arcyfelix/Courses
17-09-17-Python-for-Financial-Analysis-and-Algorithmic-Trading/02-NumPy/3-Numpy-Operations.ipynb
apache-2.0
[ "<a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>\n\n<center>Copyright Pierian Data 2017</center>\n<center>For more information, visit us at www.pieriandata.com</center>\nNumPy Operations\nArithmetic\nYou can easily perform array with array arithmetic, or scalar with array arithmetic. Let's see some examples:", "import numpy as np\narr = np.arange(0, 10)\n\narr + arr\n\narr * arr\n\narr - arr\n\n# Warning on division by zero, but not an error!\n# Just replaced with nan\narr / arr\n\n# Also warning, but not an error instead infinity\n1 / arr\n\narr ** 3", "Universal Array Functions\nNumpy comes with many universal array functions, which are essentially just mathematical operations you can use to perform the operation across the array. Let's show some common ones:", "#Taking Square Roots\nnp.sqrt(arr)\n\n#Calcualting exponential (e^)\nnp.exp(arr)\n\nnp.max(arr) #same as arr.max()\n\nnp.sin(arr)\n\nnp.log(arr)", "Great Job!\nThat's all we need to know for now!" ]
[ "markdown", "code", "markdown", "code", "markdown" ]
ireapps/cfj-2017
completed/09. Working with APIs (Part 2).ipynb
mit
[ "Working with data from an API\nWeb APIs are now a common way to interact with data, and many governments now have open data portals that offer access via API. Socrata is a common vendor.\nHere, we're going to tap into the API feed of a dataset of vacant buildings in St. Paul.\nImport the modules we need", "import json\nimport requests", "Fetch the page and get the JSON", "# URL\nURL = 'https://information.stpaul.gov/resource/rfbb-x7za.json'\n\n# use the json() method, which converts the json into Python objects\nvb_data = requests.get(URL).json()\n\n# print to see what we're working with\nprint(vb_data)", "Filter the data\nLooks like we're dealing with a list of dictionaries. Maybe our goal here is to filter out everything except the vacant single-family residences.\nLet's use a new thing called a list comprehension -- really handy when you want to filter a group of things and store the result in a variable.", "sfr_vb = [x for x in vb_data if x['dwelling_type'] == 'Single Family Residential']\nprint(len(sfr_vb), 'SFR of', len(vb_data), 'total')", "Exercise\nFrom the original data set, filter out everything except vacant buildings that were vacant as of (vacant_as_of) 2013 or later. Select whatever elements of the data are interesting to you and write out to a CSV file.\nBreaking down the problem:\n\nFilter the data to include only buildings vacant as of 2013 or later\nUse slicing to isolate the year from the vacancy date\nCoerce that year string to an integer\nIn a list comprehension, use an if statement to compare whether the year is greater than or equal to 2013\n\n\nOpen a file to write to\nLoop over that filtered list of data\nSelect the elements of the data that you think belong in your CSV and write them out" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
GEMScienceTools/oq-subduction
openquake/sub/notebooks/multiple_cross_sections.ipynb
agpl-3.0
[ "%matplotlib inline\nimport os\nimport re\nimport sys\nimport numpy\nimport matplotlib.pyplot as plt\ntry:\n import cPickle as pickle\nexcept:\n import pickle\nimport matplotlib.patheffects as PathEffects\n\nfrom matplotlib.patches import Circle\nfrom matplotlib.collections import PatchCollection\n\nsys.path.append('/Users/mpagani/Projects/hmtk/')\nsys.path.append('/Users/mpagani/Projects/original/oq-engine/')\n\nfrom hmtk.subduction.cross_sections import CrossSection, Trench\nfrom hmtk.seismicity.selector import CatalogueSelector\nfrom mpl_toolkits.basemap import Basemap", "Resources\n\nhttp://nicoya.eas.gatech.edu/Data_Products\nGTDEF http://geophysics.eas.gatech.edu/anewman/classes/MGM/GTdef/\n\nLoad trench data", "fin = open('./../data/trench.xy', 'r')\ntrench = []\nfor line in fin: \n aa = re.split('\\s+', re.sub('^\\s+', '', line))\n trench.append((float(aa[0]), float(aa[1])))\nfin.close()\ntrench = Trench(numpy.array(trench))\n\ncat = pickle.load(open(\"./../data/catalogue_ext_cac.p\", \"rb\" ))", "Compute the traces of cross-sections", "minlo = -110\nminla = 5\nmaxlo = -75\nmaxla = 25\nmidlo = -100\nmidla = 20\n\nfrom hmtk.parsers.catalogue.gcmt_ndk_parser import ParseNDKtoGCMT\nfrom obspy.imaging.beachball import beach\n\ngcmt_filename = '/Users/mpagani/Data/catalogues/gcmt/jan76_dec13.ndk'\ngcmtc = ParseNDKtoGCMT(gcmt_filename)\ngcmtc.read_file()\n\ndef plot_nodal_planes(catalogue, ax, minlo, minla, maxlo, maxla): \n beach1 = beach(np1, xy=(-70, 80), width=30)\n beach2 = beach(mt, xy=(50, 50), width=50)\n ax.add_collection(beach1) \n ax.add_collection(beach2)\n\nfig = plt.figure(figsize=(12,9))\n\n#\n# Plot the basemap\nm = Basemap(llcrnrlon=minlo, llcrnrlat=minla,\n urcrnrlon=maxlo, urcrnrlat=maxla,\n resolution='i', projection='tmerc', \n lon_0=midlo, lat_0=midla)\n\n#\n# Draw paralleles and meridians with labels \n# labels = [left,right,top,bottom]\nm.drawcoastlines()\nm.drawmeridians(numpy.arange(numpy.floor(minlo/10.)*10,\n numpy.ceil(maxlo/10.)*10,5.),\n labels=[False, False, False, True])\nm.drawparallels(numpy.arange(numpy.floor(minla/10.)*10,\n numpy.ceil(maxla/10.)*10,5.),\n labels=[True, False, False, False])\n\n#\n# Plot the instrumental catalogue\nxa, ya = m(cat.data['longitude'], cat.data['latitude'])\nszea = (cat.data['magnitude']*100)**1.5\npatches = []\nfor x, y, sze in zip(list(xa), list(ya), szea):\n circle = Circle((x, y), sze, ec='white')\n patches.append(circle)\nprint ('depths: %f %f ' % (min(cat.data['depth']), max(cat.data['depth'])))\ncolors = cat.data['depth']\np = PatchCollection(patches, zorder=6, edgecolors='white')\np.set_alpha(0.5)\np.set_clim([0, 200])\np.set_array(numpy.array(colors))\nplt.gca().add_collection(p)\nplt.colorbar(p,fraction=0.02, pad=0.04, extend='max')\n\n#\n# GCMT \nx, y = m(gcmtc.catalogue.data['longitude'], \n gcmtc.catalogue.data['latitude'])\n#plt.plot(x, y, 'sr', zorder=10, alpha=.5)\n\n#\n# Plot the traces of cross-sections\ndistance = 100\ncs_len = 400\n\nts = trench.resample(distance)\n\nfou = open('cs_traces.csv', 'w')\nx, y = m(trench.axis[:, 0], trench.axis[:, 1])\nplt.plot(x, y, '-g', linewidth=2, zorder=10)\nx, y = m(ts.axis[:, 0], ts.axis[:, 1])\nplt.plot(x, y, '--y', linewidth=4, zorder=20)\n\nfor idx, cs in enumerate(trench.iterate_cross_sections(distance, cs_len)):\n if cs is not None:\n x, y = m(cs.plo, cs.pla)\n plt.plot(x, y, ':r', linewidth=2, zorder=20)\n text = plt.text(x[-1], y[-1], '%d' % idx, ha='center', va='center', size=10, zorder=30)\n text.set_path_effects([PathEffects.withStroke(linewidth=3, foreground=\"w\")])\n tmps = '%f %f %f %f %d\\n' % (cs.plo[0], cs.pla[0], cs_len, cs.strike[0], idx)\n print (tmps.rstrip())\n fou.write(tmps)\nfou.close()" ]
[ "code", "markdown", "code", "markdown", "code" ]
mne-tools/mne-tools.github.io
0.14/_downloads/plot_visualize_raw.ipynb
bsd-3-clause
[ "%matplotlib inline", "Visualize Raw data", "import os.path as op\nimport numpy as np\n\nimport mne\n\ndata_path = op.join(mne.datasets.sample.data_path(), 'MEG', 'sample')\nraw = mne.io.read_raw_fif(op.join(data_path, 'sample_audvis_raw.fif'))\nraw.set_eeg_reference() # set EEG average reference\nevents = mne.read_events(op.join(data_path, 'sample_audvis_raw-eve.fif'))", "The visualization module (:mod:mne.viz) contains all the plotting functions\nthat work in combination with MNE data structures. Usually the easiest way to\nuse them is to call a method of the data container. All of the plotting\nmethod names start with plot. If you're using Ipython console, you can\njust write raw.plot and ask the interpreter for suggestions with a\ntab key.\nTo visually inspect your raw data, you can use the python equivalent of\nmne_browse_raw.", "raw.plot(block=True)", "The channels are color coded by channel type. Generally MEG channels are\ncolored in different shades of blue, whereas EEG channels are black. The\nscrollbar on right side of the browser window also tells us that two of the\nchannels are marked as bad. Bad channels are color coded gray. By\nclicking the lines or channel names on the left, you can mark or unmark a bad\nchannel interactively. You can use +/- keys to adjust the scale (also = works\nfor magnifying the data). Note that the initial scaling factors can be set\nwith parameter scalings. If you don't know the scaling factor for\nchannels, you can automatically set them by passing scalings='auto'. With\npageup/pagedown and home/end keys you can adjust the amount of data\nviewed at once.\nDrawing annotations\nYou can enter annotation mode by pressing a key. In annotation mode you\ncan mark segments of data (and modify existing annotations) with the left\nmouse button. You can use the description of any existing annotation or\ncreate a new description by typing when the annotation dialog is active.\nNotice that the description starting with the keyword 'bad' means that\nthe segment will be discarded when epoching the data. Existing annotations\ncan be deleted with the right mouse button. Annotation mode is exited by\npressing a again or closing the annotation window. See also\n:class:mne.Annotations and marking_bad_segments. To see all the\ninteractive features, hit ? key or click help in the lower left\ncorner of the browser window.\n<div class=\"alert alert-danger\"><h4>Warning</h4><p>Annotations are modified in-place immediately at run-time.\n Deleted annotations cannot be retrieved after deletion.</p></div>\n\nThe channels are sorted by channel type by default. You can use the order\nparameter of :func:raw.plot &lt;mne.io.Raw.plot&gt; to group the channels in a\ndifferent way. order='selection' uses the same channel groups as MNE-C's\nmne_browse_raw (see CACCJEJD). The selections are defined in\nmne-python/mne/data/mne_analyze.sel and by modifying the channels there,\nyou can define your own selection groups. Notice that this also affects the\nselections returned by :func:mne.read_selection. By default the selections\nonly work for Neuromag data, but order='position' tries to mimic this\nbehavior for any data with sensor positions available. The channels are\ngrouped by sensor positions to 8 evenly sized regions. Notice that for this\nto work effectively, all the data channels in the channel array must be\npresent. The order parameter can also be passed as an array of ints\n(picks) to plot the channels in the given order.", "raw.plot(order='selection')", "We read the events from a file and passed it as a parameter when calling the\nmethod. The events are plotted as vertical lines so you can see how they\nalign with the raw data.\nWe can check where the channels reside with plot_sensors. Notice that\nthis method (along with many other MNE plotting functions) is callable using\nany MNE data container where the channel information is available.", "raw.plot_sensors(kind='3d', ch_type='mag', ch_groups='position')", "We used ch_groups='position' to color code the different regions. It uses\nthe same algorithm for dividing the regions as order='position' of\n:func:raw.plot &lt;mne.io.Raw.plot&gt;. You can also pass a list of picks to\ncolor any channel group with different colors.\nNow let's add some ssp projectors to the raw data. Here we read them from a\nfile and plot them.", "projs = mne.read_proj(op.join(data_path, 'sample_audvis_eog-proj.fif'))\nraw.add_proj(projs)\nraw.plot_projs_topomap()", "The first three projectors that we see are the SSP vectors from empty room\nmeasurements to compensate for the noise. The fourth one is the average EEG\nreference. These are already applied to the data and can no longer be\nremoved. The next six are the EOG projections that we added. Every data\nchannel type has two projection vectors each. Let's try the raw browser\nagain.", "raw.plot()", "Now click the proj button at the lower right corner of the browser\nwindow. A selection dialog should appear, where you can toggle the projectors\non and off. Notice that the first four are already applied to the data and\ntoggling them does not change the data. However the newly added projectors\nmodify the data to get rid of the EOG artifacts. Note that toggling the\nprojectors here doesn't actually modify the data. This is purely for visually\ninspecting the effect. See :func:mne.io.Raw.del_proj to actually remove the\nprojectors.\nRaw container also lets us easily plot the power spectra over the raw data.\nHere we plot the data using spatial_colors to map the line colors to\nchannel locations (default in versions >= 0.15.0). Other option is to use the\naverage (default in < 0.15.0). See the API documentation for more info.", "raw.plot_psd(tmax=np.inf, average=False)", "Plotting channel-wise power spectra is just as easy. The layout is inferred\nfrom the data by default when plotting topo plots. This works for most data,\nbut it is also possible to define the layouts by hand. Here we select a\nlayout with only magnetometer channels and plot it. Then we plot the channel\nwise spectra of first 30 seconds of the data.", "layout = mne.channels.read_layout('Vectorview-mag')\nlayout.plot()\nraw.plot_psd_topo(tmax=30., fmin=5., fmax=60., n_fft=1024, layout=layout)" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Heroes-Academy/OOP_Spring_2016
notebooks/giordani/Python_3_OOP_Part_2__Classes_and_members.ipynb
mit
[ "Python Classes Strike Again\nThe Python implementation of classes has some peculiarities. The bare truth is that in Python the class of an object is an object itself. You can check this by issuing type() on the class", "a = 1\nprint(type(a))\n\nprint(type(int))", "This shows that the int class is an object, an instance of the type class.\nThis concept is not so difficult to grasp as it can seem at first sight: in the real world we deal with concepts using them like things: for example we can talk about the concept of \"door\", telling people how a door looks like and how it works. In this case the concept of door is the topic of our discussion, so in our everyday experience the type of an object is an object itself. In Python this can be expressed by saying that everything is an object.\nIf the class of an object is itself an instance it is a concrete object and is stored somewhere in memory. Let us leverage the inspection capabilities of Python and its id() function to check the status of our objects. The id() built-in function returns the memory position of an object.\nIn the first post we defined this class", "class Door:\n def __init__(self, number, status):\n self.number = number\n self.status = status\n \n def open(self):\n self.status = 'open'\n \n def close(self):\n self.status = 'closed'", "First of all, let's create two instances of the Door class and check that the two objects are stored at different addresses", "door1 = Door(1, 'closed')\ndoor2 = Door(1, 'closed')\n\nhex(id(door1))\n\nhex(id(door2))", "This confirms that the two instances are separate and unrelated.\nThe second instance was given the same attributes of the first instance to show that the two are different objects regardless of the value of the attributes.\nHowever if we use id() on the class of the two instances we discover that the class is exactly the same", "hex(id(door1.__class__))\n\nhex(id(door2.__class__))", "Well this is very important. In Python, a class is not just the schema used to build an object. Rather, the class is a shared living object, which code is accessed at run time.\nAs we already tested, however, attributes are not stored in the class but in every instance, due to the fact that __init__() works on self when creating them. Classes, however, can be given attributes like any other object; with a terrific effort of imagination, let's call them class attributes.\nAs you can expect, class attributes are shared among the class instances just like their container", "class Door:\n colour = 'brown'\n\n def __init__(self, number, status):\n self.number = number\n self.status = status\n \n def open(self):\n self.status = 'open'\n \n def close(self):\n self.status = 'closed'", "Pay attention: the colour attribute here is not created using self, so it is contained in the class and shared among instances", "door1 = Door(1, 'closed')\ndoor2 = Door(2, 'closed')\n\nDoor.colour\n\ndoor1.colour\n\ndoor2.colour", "Until here things are not different from the previous case. Let's see if changes of the shared value reflect on all instances", "Door.colour = 'white'\nDoor.colour\n\ndoor1.colour\n\ndoor2.colour\n\nhex(id(Door.colour))\n\nhex(id(door1.colour))\n\nhex(id(door2.colour))", "Raiders of the Lost Attribute\nAny Python object is automatically given a __dict__ attribute, which contains its list of attributes. Let's investigate what this dictionary contains for our example objects:", "door1 = Door(1, 'closed')\ndoor2 = Door(2, 'closed')\nprint(type(Door.__dict__))\nDoor.__dict__\n\nprint(type(door1.__dict__))\ndoor1.__dict__", "Leaving aside the difference between a dictionary and a mappingproxy object, you can see that the colour attribute is listed among the Door class attributes, while status and number are listed for the instance.\nHow comes that we can call door1.colour, if that attribute is not listed for that instance? This is a job performed by the magic __getattribute__() method; in Python the dotted syntax automatically invokes this method so when we write door1.colour, Python executes door1.__getattribute__('colour'). That method performs the attribute lookup action, i.e. finds the value of the attribute by looking in different places.\nThe standard implementation of __getattribute__() searches first the internal dictionary (__dict__) of an object, then the type of the object itself; in this case door1.__getattribute__('colour') executes first door1.__dict__['colour'] and then, since the latter raises a KeyError exception, door1.__class__.__dict__['colour']", "try:\n door1.__dict__['colour']\nexcept KeyError as e:\n print(\"Cannot find key {}\".format(e))\n\ndoor1.__class__.__dict__['colour']", "Indeed, if we compare the objects' equality through the is operator we can confirm that both door1.colour and Door.colour are exactly the same object", "door1.colour is Door.colour", "When we try to assign a value to a class attribute directly on an instance, we just put in the __dict__ of the instance a value with that name, and this value masks the class attribute since it is found first by __getattribute__(). As you can see from the examples of the previous section, this is different from changing the value of the attribute on the class itself.", "door1.colour = 'white'\ndoor1.__dict__['colour']\n\ndoor1.__class__.__dict__['colour']\n\nDoor.colour = 'red'\ndoor1.__dict__['colour']\n\ndoor1.__class__.__dict__['colour']", "Revenge of the Methods\nLet's play the same game with methods. First of all you can see that, just like class attributes, methods are listed only in the class __dict__. Chances are that they behave the same as attributes when we get them", "door1.open is Door.open", "Whoops. Let us further investigate the matter", "Door.__dict__['open']\n\nDoor.open\n\ndoor1.open", "So, the class method is listed in the members dictionary as function. So far, so good. The same happens when taking it directly from the class; here Python 2 needed to introduce unbound methods, which are not present in Python 3. Taking it from the instance returns a bound method.\nWell, a function is a procedure you named and defined with the def statement. When you refer to a function as part of a class in Python 3 you get a plain function, without any difference from a function defined outside a class.\nWhen you get the function from an instance, however, it becomes a bound method. The name method simply means \"a function inside an object\", according to the usual OOP definitions, while bound signals that the method is linked to that instance. Why does Python bother with methods being bound or not? And how does Python transform a function into a bound method?\nFirst of all, if you try to call a class function you get an error", "try:\n Door.open()\nexcept TypeError as e:\n print(e)", "Yes. Indeed the function was defined to require an argument called 'self', and calling it without an argument raises an exception. This perhaps means that we can give it one instance of the class and make it work", "Door.open(door1)\ndoor1.status", "Python does not complain here, and the method works as expected. So Door.open(door1) is the same as door1.open(), and this is the difference between a plain function coming from a class an a bound method: the bound method automatically passes the instance as an argument to the function.\nAgain, under the hood, __getattribute__() is working to make everything work and when we call door1.open(), Python actually calls door1.__class__.open(door1). However, door1.__class__.open is a plain function, so there is something more that converts it into a bound method that Python can safely call.\nWhen you access a member of an object, Python calls __getattribute__() to satisfy the request. This magic method, however, conforms to a procedure known as descriptor protocol. For the read access __getattribute__() checks if the object has a __get__() method and calls this latter. So the converstion of a function into a bound method happens through such a mechanism. Let us review it by means of an example.", "door1.__class__.__dict__['open']", "This syntax retrieves the function defined in the class; the function knows nothing about objects, but it is an object (remember \"everything is an object\"). So we can look inside it with the dir() built-in function", "dir(door1.__class__.__dict__['open'])\n\ndoor1.__class__.__dict__['open'].__get__", "As you can see, a __get__ method is listed among the members of the function, and Python recognizes it as a method-wrapper. This method shall connect the open function to the door1 instance, so we can call it passing the instance alone", "door1.__class__.__dict__['open'].__get__(door1)", "and we get exactly what we were looking for. This complex syntax is what happens behind the scenes when we call a method of an instance.\nWhen Methods met Classes\nUsing type() on functions defined inside classes reveals some other details on their internal representation", "Door.open\n\ndoor1.open\n\ntype(Door.open)\n\ntype(door1.open)", "As you can see, Python tells the two apart recognizing the first as a function and the second as a method, where the second is a function bound to an instance.\nWhat if we want to define a function that operates on the class instead of operating on the instance? As we may define class attributes, we may also define class methods in Python, through the classmethod decorator. Class methods are functions that are bound to the class and not to an instance.", "class Door:\n colour = 'brown'\n\n def __init__(self, number, status):\n self.number = number\n self.status = status\n\n @classmethod\n def knock(cls):\n print(\"Knock!\")\n\n def open(self):\n self.status = 'open'\n \n def close(self):\n self.status = 'closed'", "Such a definition makes the method callable on both the instance and the class", "door1 = Door(1, 'closed')\ndoor1.knock()\n\nDoor.knock()", "and Python identifies both as (bound) methods", "door1.__class__.__dict__['knock']\n\ndoor1.knock\n\nDoor.knock\n\nprint(type(Door.knock))\n\nprint(type(door1.knock))", "As you can see the knock() function accepts one argument, which is called cls just to remember that it is not an instance but the class itself. This means that inside the function we can operate on the class, and the class is shared among instances.", "class Door:\n colour = 'brown'\n\n def __init__(self, number, status):\n self.number = number\n self.status = status\n\n @classmethod\n def knock(cls):\n print(\"Knock!\")\n\n @classmethod\n def paint(cls, colour):\n cls.colour = colour\n\n def open(self):\n self.status = 'open'\n \n def close(self):\n self.status = 'closed'", "The paint() classmethod now changes the class attribute colour which is shared among instances. Let's check how it works", "door1 = Door(1, 'closed')\ndoor2 = Door(2, 'closed')\nDoor.colour\n\ndoor1.colour\n\ndoor2.colour\n\nDoor.paint('white')\nDoor.colour\n\ndoor1.colour\n\ndoor2.colour", "The class method can be called on the class, but this affects both the class and the instances, since the colour attribute of instances is taken at runtime from the shared class.", "door1.paint('yellow')\nDoor.colour\n\ndoor1.colour\n\ndoor2.colour", "Class methods can be called on instances too, however, and their effect is the same as before. The class method is bound to the class, so it works on this latter regardless of the actual object that calls it (class or instance).\nMovie Trivia\nSection titles come from the following movies: The Empire Strikes Back (1980), Raiders of the Lost Ark (1981), Revenge of the Nerds (1984), When Harry Met Sally (1989).\nSources\nYou will find a lot of documentation in this Reddit post. Most of the information contained in this series come from those sources.\nFeedback\nFeel free to use the blog Google+ page to comment the post. The GitHub issues page is the best place to submit corrections." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
dkirkby/bossdata
examples/nb/PlottingExamples.ipynb
mit
[ "Plotting Examples\nDemonstrate the plotting functions in bossdata.plot.\nInitialization", "%pylab inline\n\nimport astropy.io.fits as fits\n\nimport bossdata\nprint(bossdata.__version__)\n\nfinder = bossdata.path.Finder()\nmirror = bossdata.remote.Manager()", "Targeting Metadata", "spAll = bossdata.meta.Database(finder=finder, mirror=mirror, lite=False)\n\ndef get_targets(plate, mjd):\n return spAll.select_all(what='XFOCAL,YFOCAL,FIBER,OBJTYPE,CLASS',\n where='PLATE={plate} and MJD={mjd}'.format(plate=plate, mjd=mjd))\n\nplate6640 = get_targets(6640, 56385)\nstd6640 = plate6640['OBJTYPE'] == 'SPECTROPHOTO_STD'\nsky6640 = plate6640['OBJTYPE'] == 'SKY'", "Flux Calibration Vectors\nLoad the spFluxcalib or spFluxcorr vector for all fibers of a single exposure in the blue or red cameras:", "def get_calib_vector(plate, mjd, ftype, exposure_index, camera='blue'):\n num_fibers = bossdata.plate.get_num_fibers(plate)\n # Load the list of exposures used to co-add this observation.\n plan_name = finder.get_plate_plan_path(plate=plate, mjd=mjd, combined=True)\n plan_file = bossdata.plate.Plan(mirror.get(plan_name))\n spec1_name = plan_file.get_exposure_name(exposure_index, camera, 1, ftype)\n spec2_name = plan_file.get_exposure_name(exposure_index, camera, num_fibers, ftype)\n if spec1_name is None or spec2_name is None:\n print('Missing {} exposures [{}] for {}-{}.'.format(camera, exposure_index, plate, mjd))\n return\n exposure_id = plan_file.exposures['science'][exposure_index]['EXPID']\n print('Exposure [{}] is #{}.'.format(exposure_index, exposure_id))\n spec1_file = fits.open(mirror.get(finder.get_plate_path(plate, spec1_name)))\n spec2_file = fits.open(mirror.get(finder.get_plate_path(plate, spec2_name)))\n return np.vstack((spec1_file[0].data, spec2_file[0].data))\n\ncorr6640_0 = get_calib_vector(6640, 56385, 'spFluxcorr', 0, 'blue')\ncorr6640_1 = get_calib_vector(6640, 56385, 'spFluxcorr', 1, 'blue')\ncorr6640_2 = get_calib_vector(6640, 56385, 'spFluxcorr', 2, 'blue')\n\ncalib6640_0 = get_calib_vector(6640, 56385, 'spFluxcalib', 0, 'blue')\ncalib6640_1 = get_calib_vector(6640, 56385, 'spFluxcalib', 1, 'blue')\ncalib6640_2 = get_calib_vector(6640, 56385, 'spFluxcalib', 2, 'blue')", "Flux Calibration Plots\nDefine the special subsets to identify in the plots.", "subsets = {\n 'Standards': { 'options': dict(marker='*', s=250), 'fibers': std6640 },\n 'Sky': { 'options': dict(marker='s'), 'fibers': sky6640 }\n}", "Plot flux calibration vectors (left) and correction factors (right) for the first three exposures of 6640-56385:", "fig = plt.figure(figsize=(15, 20))\nwlen = 1000\n#\nplt.subplot(3,2,1)\nbossdata.plot.by_fiber(calib6640_0[:,wlen], percentile_cut=0,\n plot_label='6640-56385\\n#159423', data_label='Flux Calibration')\nplt.subplot(3,2,2)\nbossdata.plot.focal_plane(plate6640['XFOCAL'], plate6640['YFOCAL'], corr6640_0[:,wlen], percentile_cut=2,\n background=std6640, subsets=subsets, numbered=std6640,\n plot_label='6640-56385\\n#159423', data_label='Flux Correction')\n#\nplt.subplot(3,2,3)\nbossdata.plot.by_fiber(calib6640_1[:,wlen], percentile_cut=0,\n plot_label='6640-56385\\n#159424', data_label='Flux Calibration')\nplt.subplot(3,2,4)\nbossdata.plot.focal_plane(plate6640['XFOCAL'], plate6640['YFOCAL'], corr6640_1[:,wlen], percentile_cut=2,\n background=std6640, subsets=subsets, numbered=std6640,\n plot_label='6640-56385\\n#159424', data_label='Flux Correction')\n#\nplt.subplot(3,2,5)\nbossdata.plot.by_fiber(calib6640_2[:,wlen], percentile_cut=0,\n plot_label='6640-56385\\n#159425', data_label='Flux Calibration')\nplt.subplot(3,2,6)\nbossdata.plot.focal_plane(plate6640['XFOCAL'], plate6640['YFOCAL'], corr6640_2[:,wlen], percentile_cut=2,\n background=std6640, subsets=subsets, numbered=std6640,\n plot_label='6640-56385\\n#159425', data_label='Flux Correction')\n#\nplt.tight_layout()\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
bwalrond/explore-notebooks
jupyter_notebooks/SanFranCrime.ipynb
mit
[ "sc", "Data Ingest\nHere, the raw train data is read from csv flat file and saved as a more type-friendly parquet file", "import csv\nfrom dateutil import parser\nfrom datetime import datetime\nfrom pyspark.sql.types import *\n\nsfcrime_raw_data = '/Users/bill.walrond/Documents/dsprj/data/SanFranCrime/train.csv'\nsfcrime_schema_file = '/Users/bill.walrond/Documents/dsprj/data/SanFranCrime/train_schema.txt'\n\nsfc_rdd = sc.textFile(sfcrime_raw_data)\n \nprint 'Line count of train file: %d' % sfc_rdd.count()\n\n# Read and process the schema file\nschemadict = {}\ni = 0\nschema = sc.textFile(sfcrime_schema_file)\nline_cnt = schema.count()\nprint 'Lines in schema file: %d' % line_cnt\nfor l in schema.collect():\n col = l.split(',')\n schemadict.update({i: (col[0],col[1])})\n i += 1\nprint 'Length of the schemadict: %d' % len(schemadict.keys())\n\n# Establish the Struct for the Schema\nkeys = schemadict.keys()\nkeys.sort()\nschema = StructType()\nfor k in keys:\n if schemadict[k][1] == 'int':\n schema.add(StructField(schemadict[k][0], IntegerType(), True))\n elif schemadict[k][1] == 'str':\n schema.add(StructField(schemadict[k][0], StringType(), True))\n elif schemadict[k][1] == 'float':\n schema.add(StructField(schemadict[k][0], FloatType(), True))\n elif schemadict[k][1] == 'datetime':\n schema.add(StructField(schemadict[k][0], TimestampType(), True))\n else:\n print 'Unsupported or incorrect data type'\n\n# Skip the first header line with this trick ...\nsfc_nh = sfc_rdd.zipWithIndex().filter(lambda tup: tup[1] > 0).map(lambda tup: tup[0])\n\ndef toRowSep(line):\n \"\"\"Parses one row using csv reader\"\"\"\n for r in csv.reader([line], delimiter=','):\n return r\n\nsfc_split = sfc_nh.map(toRowSep)\n \nlens = sfc_split.map(lambda r: len(r))\nprint 'Max len: %d' % lens.max()\nprint 'Min len: %d' % lens.min()\nsfc_split.cache()\n\ndef convert_types(row,schema):\n d = row\n for col, data in enumerate(row):\n if col <= len(schema.keys()):\n typed = schema[col]\n if data is None:\n d[col] = None\n elif typed[1] == 'string':\n d[col] = data\n elif typed[1] == 'int':\n d[col] = int(round(float(data)))\n elif typed[1] == 'float':\n d[col] = float(data)\n elif typed[1] == 'datetime':\n # d[col] = data\n # d[col] = dateutil.parser.parse(data)\n d[col] = datetime.strptime(data,'%Y-%m-%d %H:%M:%S')\n return d\n\ndef toTypedRow(row):\n return convert_types(row, schemadict)\n\n# Now, convert the types of all the rdd elements\nsfc_typed = sfc_split.map(toTypedRow)\nsfc_typed.take(1)\nsfc_train = sqlContext.createDataFrame(sfc_typed, schema)\nprint sfc_train.count()\n\nsfc_train = sfc_train.cache()\nsfc_train.printSchema()\n\nsfc_train.show(5)\n\nparqFileName = '/Users/bill.walrond/Documents/dsprj/data/SanFranCrime/train.pqt'\nsfc_train.write.parquet(parqFileName)\n\nsc", "Data Profiling and Analysis\nNext, the train data is injested from the parquet file and data profiling and analysis may begin", "parqFileName = '/Users/bill.walrond/Documents/dsprj/data/SanFranCrime/train.pqt'\nsfc_train = sqlContext.read.parquet(parqFileName)\nprint sfc_train.count()\nprint sfc_train.printSchema()\nsfc_train = sfc_train.cache()\n\n%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(style=\"whitegrid\", color_codes=True)\n\n\nsfc_train = sfc_train.repartition(8)\nprint sfc_train.count()\nsfc_train = sfc_train.cache()\n\n\nsfc_train.describe().show()\n", "Profiling on the date-time column: What do we want to know about a \"datetime\" column?", "[name for name,type in sfc_train.dtypes if type == 'float' ]\n\nprint 'Distinct date values: %d' % sfc_train.select(\"Dates\").distinct().count()\nprint 'Earliest date: %s ' % sfc_train.agg({\"Dates\": \"min\"}).collect()[0][0]\nprint 'Latest date: %s' % sfc_train.agg({\"Dates\": \"max\"}).collect()[0][0]\n\nsfc_mod = sfc_train.select('*', trunc(sfc_train.Dates,'month').alias('yr-mo'))\nsfc_mod = sfc_mod.cache()\nsfc_month_grp = sfc_mod.groupBy('yr-mo').count()\nsfc_month_grp = sfc_month_grp.cache()\nprint sfc_month_grp.orderBy('count', ascending=True).show(10)\nearliest_mo = sfc_month_grp.agg(min('yr-mo')).collect()[0][0]\nlatest_mo = sfc_month_grp.agg(max('yr-mo')).collect()[0][0]\nprint 'Average obs per month: %d' % sfc_month_grp.agg(avg('count')).collect()[0][0]\nprint 'Earliest month: {0}\\tLatest month: {1}'.format(earliest_mo, latest_mo)\nprint 'Time span: {0}'.format(str(latest_mo-earliest_mo))\nprint 'Nulls: %d' % sfc_train.filter(\"Dates is Null\").count()", "What do we want to know about a \"str\" column?\n\nnumber of nulls (% nulls)\nnumber of unique values (categories)\nCategory count\nSimple bar plot of the Top 20 Category Counts\nDoes it look like free-form text?\nMin/max/avg length\nAvg word count\nAvg word length\nlanguage", "# category.columns\nimport pandas as pd\ncols = [name for name,type in sfc_train.dtypes if type == 'string' ]\nfor c in cols:\n pdf = sfc_train.groupBy(c).count().orderBy('count', ascending=False).toPandas().head(15)\n \n y_pos = np.arange(start=len(pdf[c]),stop=0,step=-1)\n\n plt.barh(y_pos, pdf['count'], align='center', alpha=0.7)\n plt.yticks(y_pos, pdf[c])\n plt.xlabel('count')\n plt.title('Top 15 {0} Counts'.format(c))\n\n plt.show()\n\nfrom pyspark.sql.functions import *\ncols = [name for name,type in sfc_train.dtypes if type == 'string' ] # get a list of all the string columns\nsumm_cols = ['Column','Null_cnt','Non-null_cnt','Pct_null','Unique_cnt','Max_len','Min_len','Max_words','Min_words']\nsumm_df = pd.DataFrame(columns=summ_cols)\ntotal_rows = sfc_train.count()\nfor col in cols:\n sum_vals = []\n print '---- Summarizing: {0} ----'.format(col)\n words = sfc_train.select(split(col,\" \").alias(\"l\")).map(lambda l: len(l[0]))\n words = words.cache()\n sum_val = [col,\n sfc_train.filter(col+\" is Null\").count(), # count of nulls\n sfc_train.filter(col+\" is not Null\").count(), # count of non-nulls\n nulls/float(total_rows), # pct of nulls\n sfc_train.select(col).distinct().count(), # count of unique values\n sfc_train.select(max(length(col))).first()[0], # max length\n sfc_train.select(min(length(col))).first()[0], # min length\n words.max(), # max number of words\n words.min()] # min number of words\n summ_df.loc[len(summ_df)] = sum_val\n \nprint summ_df.head(len(summ_df))\n\nsns.set(style=\"white\")\n\n# Load the example planets dataset\nplanets = sns.load_dataset(\"planets\")\n\n# Make a range of years to show categories with no observations\nyears = np.arange(2000, 2015)\n\n# Draw a count plot to show the number of planets discovered each year\ng = sns.factorplot(x=\"year\", data=planets, kind=\"count\",\n palette=\"BuPu\", size=6, aspect=1.5, order=years)\ng.set_xticklabels(step=2)\n\ncategory.columns\n\nprint len(planets)\n\n\n# example 1\ng = sns.factorplot(x=\"age\", y=\"embark_town\",\n hue=\"sex\", row=\"class\",\n data=titanic[titanic.embark_town.notnull()],\n orient=\"h\", size=2, aspect=3.5, palette=\"Set3\",\n kind=\"violin\", split=True, cut=0, bw=.2)\n\n# example 2\ng = sns.factorplot(x=\"who\", \n y=\"survived\", col=\"class\",\n data=titanic, saturation=.5,\n kind=\"bar\", ci=None, aspect=.6)\n\n(g.set_axis_labels(\"\", \"Survival Rate\")\n .set_xticklabels([\"Men\", \"Women\", \"Children\"])\n .set_titles(\"{col_name} {col_var}\")\n .set(ylim=(0, 1))\n .despine(left=True))\n\nfrom bokeh.charts import Bar, output_notebook, show\nfrom bokeh.charts.attributes import cat, color\nfrom bokeh.charts.operations import blend\nfrom bokeh.charts.utils import df_from_json\nfrom bokeh.sampledata.olympics2014 import data\n\n# utilize utility to make it easy to get json/dict data converted to a dataframe\ndf = df_from_json(data)\n\n# filter by countries with at least one medal and sort by total medals\ndf = df[df['total'] > 0]\ndf = df.sort(\"total\", ascending=False)\n\nbar = Bar(df,\n values=blend('bronze', 'silver', 'gold', name='medals', labels_name='medal'),\n label=cat(columns='abbr', sort=False),\n stack=cat(columns='medal', sort=False),\n color=color(columns='medal', palette=['SaddleBrown', 'Silver', 'Goldenrod'],\n sort=False),\n legend='top_right',\n title=\"Medals per Country, Sorted by Total Medals\",\n tooltips=[('medal', '@medal'), ('country', '@abbr')])\n\n\n# output_file(\"stacked_bar.html\", title=\"stacked_bar.py example\")\noutput_notebook()\n\nshow(bar)\n\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nsns.set(style=\"darkgrid\")\n\ntips = sns.load_dataset(\"tips\")\ng = sns.FacetGrid(tips, row=\"sex\", col=\"time\", margin_titles=True)\nbins = np.linspace(0, 60, 13)\ng.map(plt.hist, \"total_bill\", color=\"steelblue\", bins=bins, lw=0)\n\n\nsc.stop()" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
maxentile/method-of-moments-tinker
HMM method of moments.ipynb
mit
[ "A Method of Moments for Mixture Models and Hidden Markov Models\nby A. Anandkumar, D. Hsu, and S.M. Kakade\nhttp://arxiv.org/abs/1203.0683\n2. Warm-up: bag-of-words topic modeling\n\nSetup:\nA document is a bag of words.\nA document belongs to a single topic.\nThe words in a document are drawn i.i.d. from a multinomial distribution corresponding to the document's topic.\nThere are $k$ topics.\nThere are $d$ words.\nEach document contains $\\ell \\geq 3$ words.\n\n\n\nGenerative process for a document:\n\nDocument's topic $h$ is drawn from a multinomial distribution specified by $\\vec{w} \\in \\Delta^{k-1}$\n$$ \\Pr [h=j] = w_j $$\nwhere\n$j\\in [k]$\nGiven the topic $h$, the document's $\\ell$ words are drawn from the multinomial distribution $\\vec{\\mu}_h\\in\\Delta^{d-1}$. Each word in the document is represented by a one-hot random vector $\\vec{x}_v = \\vec{e}_i$ (\"the $v$-th word in the document is $i$\").\nFor each word $v \\in [\\ell]$ in the document, the conditional probabilty of the word given the topic is:\n$$ \\Pr[\\vec{x}v = \\vec{e}_i | h=j] = \\langle \\vec{e}_i,\\vec{\\mu}_j \\rangle = M{i,j}$$\nwhere\n$i \\in [d]$\n$j \\in [k]$\n$M \\equiv [\\vec{\\mu_1} | \\vec{\\mu_2} | \\cdots | \\vec{\\mu_k} ] \\in \\mathbb{R}^{d \\times k}$\n\n\n\n\nNon-degeneracy conditions:\n$w_j>0 \\forall j \\in [k]$\n$\\text{rank}(M)=k$\n\n\n\n\n\nPairwise and triple-wise probabilities:\n\n$\\text{Pairs}_{i,j} \\equiv \\Pr [\\vec{x}_1 = \\vec{e}_i, \\vec{x}_2 = \\vec{e}_j]$\n$\\text{Triples}_{i,j,k} \\equiv \\Pr [\\vec{x}_1 = \\vec{e}_i, \\vec{x}_2 = \\vec{e}_j,\\vec{x}_3 = \\vec{e}_k]$\nWe can also view $\\text{Pairs}$ and $\\text{Triples}$ as expectations of tensor products of the random vectors:\n$\\text{Pairs}_{i,j} = \\mathbb{E} [\\vec{x}_1 \\otimes \\vec{x}_2] $\n$\\text{Triples}_{i,j} = \\mathbb{E} [\\vec{x}_1 \\otimes \\vec{x}_2 \\otimes \\vec{x}_3] $\n\n\nWe can also view $\\text{Triples}$ as the following linear operator:\n$\\text{Triples} : \\mathbb{R}^d \\to \\mathbb{R}^{d\\times d}$\n$\\text{Triples} : \\vec{\\eta} \\mapsto \\mathbb{E}[(\\vec{x}_1 \\otimes \\vec{x}_2) \\langle \\vec{\\eta} , \\vec{x}_3 \\rangle]$\n$\\text{Triples}(\\vec{\\eta}){i,j} = \\sum{x=1}^d \\vec{\\eta}x \\text{Triples}{i,j,x} = \\sum_{x=1}^d \\vec{\\eta}x \\text{Triples}(\\vec{e}_x){i,j}$\n\n\nWe can now write $\\text{Pairs}$ and $\\text{Triples}$ in terms of the model parameters $M$ and $\\vec{w}$, since $\\vec{x}_1,\\vec{x}_2,\\vec{x}_3$ are conditionally dependent given $h$\n$\\text{Pairs} = M \\text{diag}(\\vec{w}) M^T$\n$\\text{Triples}(\\vec{\\eta}) = M \\text{diag}(M^T \\vec{\\eta}) \\text{diag}(\\vec{w}) M^T$\n\n\n\n\n\nObservable operators and their spectral peroperties\n\n[revisit!]\n\nAlgorithm A:\n\nEstimate $\\widehat{\\text{Pairs}}\\in \\mathbb{R}^{d \\times d}$ and $\\widehat{\\text{Triples}} \\in \\mathbb{R}^{d \\times d \\times d}$\nCompute truncated SVD of $\\widehat{\\text{Pairs}}$\nLet $\\hat{U} \\in \\mathbb{R}^{d \\times k}$ be the left singular vectors of $\\widehat{\\text{Pairs}}$ corresponding to its top $k$ singular values\nLet $\\hat{V} \\in \\mathbb{R}^{d \\times k}$ be the right singular vectors of $\\widehat{\\text{Pairs}}$ corresponding to its top $k$ singular values\n\n\nPick $\\vec{\\eta}\\in \\mathbb{R}^d$\nSelect randomly from $\\text{range}(\\hat{U})$, e.g. by:\n$\\vec{\\eta} \\leftarrow \\hat{U} \\vec{\\theta}$, where\n$\\theta \\in \\mathbb{R}^k$ is a random unit vector distributed uniformly over $\\mathcal{S}^{k-1}$\n\n\n\n\n\n\nCompute the observable operator $\\hat{B}(\\vec{\\eta}) \\equiv (\\hat{U}^T \\widehat{\\text{Triples}}(\\vec{\\eta}) \\hat{V})(\\hat{U}^T \\widehat{\\text{Pairs}} \\hat{V})^{-1}$\nCompute right eigenvectors $\\hat{\\xi}_1,\\hat{\\xi}_2,\\dots,\\hat{\\xi}_k$ of $\\hat{B}(\\vec{\\eta})$\nFor each $j \\in [k]$, let $$\\hat{\\mu}_j \\equiv \\frac{\\hat{U} \\hat{\\xi}_j}{\\langle \\vec{1},\\hat{U} \\hat{\\xi}_j \\rangle} $$\nReturn $\\hat{M} \\equiv [\\hat{\\mu_1} | \\hat{\\mu_2} | \\cdots | \\hat{\\mu_k} ]$", "import numpy as np\nimport numpy.random as npr\nimport scipy.linalg\n\ndef pairwise_probabilities(X):\n return X.T.dot(X)\n \ndef triplewise_probabilities(X):\n # inefficient, will revisit later\n return sum([np.einsum('i,j,k->ijk',x,x,x) for x in X])\n \ndef uniformly_sample_unit_sphere(k):\n ''' \n \n Parameters\n ----------\n \n k : int\n Dimensionality of sphere\n \n Returns\n -------\n \n theta : (k,), numpy.ndarray\n \n \n '''\n X = npr.rand(k)-0.5\n norm = np.sqrt(np.sum(X**2))\n theta = X / norm\n return theta\n \n\ndef create_triples_operator(triples):\n \n '''\n Represent a third-order tensor of triple-wise probabilities as a linear operator\n \n \n Parameters\n ----------\n triples : (d,d,d), numpy.ndarray\n \n \n Returns\n -------\n \n triples_operator : (d,k), numpy.ndarray\n each column represents a topic by a vector of word probabilities,\n \n '''\n \n def triples_operator(eta):\n ''' '''\n return sum([triples[:,:,i]*eta[i] for i in range(len(eta))])\n \n return triples_operator\n\ndef algorithm_A(pairs,triples,k):\n '''\n Given low-order moments, recover mixture probabilities\n \n \n Parameters\n ----------\n pairs : (d,d), numpy.ndarray\n \n triples : (d,d,d), numpy.ndarray\n \n \n Returns\n -------\n \n M : (d,k), numpy.ndarray\n each column represents a topic by a vector of word probabilities,\n \n '''\n d = len(pairs)\n \n # truncated svd of pairs\n U,s,V = np.linalg.svd(pairs)\n U_hat = U[:,:k]\n V_hat = V[:,:k]\n \n \n # pick eta from range(U_hat) in R^d\n theta = uniformly_sample_unit_sphere(k)\n eta = U_hat.dot(theta)\n \n # compute value of observable operator\n triples_operator = create_triples_operator(triples)\n oo_eta = np.dot( U_hat.T.dot(triples_operator(eta)).dot(V_hat), np.linalg.inv(U_hat.T.dot(pairs).dot(V_hat)))\n \n # compute top-k right eigenvectors of oo_eta\n # evals,evecs=np.linalg.eigh(oo_eta) \n evals,evecs = scipy.linalg.eigh(oo_eta)\n \n # compute mu's\n mus = [U_hat.dot(evec) for evec in evecs.T]\n mus = [mu/np.sum(mu) for mu in mus]\n\n # concatenate and return M\n M = np.vstack(mus).T\n return M\n\nd=100\nk=10\nU_hat = npr.rand(d,k)\nevecs = npr.rand(d,k)\nevec = evecs[:,0]\nU_hat.dot(evec).shape\n\n# computing low-order moments of temporally ordered data\n\n# generic, e.g. continuous valued vectors or one-hot-encoded discrete variables\ndef onestep_probabilities(X):\n return sum([np.einsum('i,j->ij',X[i],X[i+1]) for i in xrange(len(X)-1)])\n \ndef twostep_probabilities(X):\n return sum([np.einsum('i,j,k->ijk',X[i],X[i+1],X[i+2]) for i in xrange(len(X)-2)])\n\n\n# specific to lists of discrete trajectories\ndef discrete_onestep_probabilities(dtrajs):\n '''\n \n Parameters\n ----------\n dtrajs : list of array-like\n each element in dtrajs is a flat list/array of integers\n \n Returns\n -------\n P_12 : (d,d), numpy.ndarray\n \n '''\n \n d = np.max(np.hstack(dtrajs))+1\n P_12 = np.zeros((d,d))\n \n for traj in dtrajs:\n for i in xrange(len(traj)-1):\n P_12[traj[i],traj[i+1]] += 1\n \n return P_12 / len(np.hstack(dtrajs))\n \ndef discrete_twostep_probabilities(dtrajs):\n '''\n \n Parameters\n ----------\n dtrajs : list of array-like\n each element in dtrajs is a flat list/array of integers\n \n Returns\n -------\n P_123 : (d,d,d), numpy.ndarray\n \n '''\n \n d = np.max(np.hstack(dtrajs))+1\n P_123 = np.zeros((d,d,d))\n \n for traj in dtrajs:\n for i in xrange(len(traj)-2):\n P_123[traj[i],traj[i+1],traj[i+2]] += 1\n \n return P_123 / len(np.hstack(dtrajs))", "Possible errors in method of moments paper:\n- 3.3 Algorithm B: should be $P_{1,3} \\in R^{d\\times d}$, not $P_{1,3} \\in R^{k \\times k}$", "def sample_rotation_matrix(k):\n raise NotImplementedError\n\ndef algorithm_B(P_12, P_13, P_123, k):\n ''' \n \n General method of moments estimator.\n \n \n Parameters\n ----------\n \n P_12 : (d,d), numpy.ndarray\n Empirical average of tensor product of x_1 and x_2, (x_1 \\otimes x_2)\n \n P_13 : (d,d), numpy.ndarray\n Empirical average of tensor product of x_1 and x_3, (x_1 \\otimes x_3)\n \n P_123 : (d,d,d), numpy.ndarray\n Empirical average of tensor product of x_1, x_2, and x_3, (x_1 \\otimes x_2 \\otimes x_3)\n \n k : int\n number of latent mixture components\n \n Returns\n -------\n \n M_3 : (d,k), numpy.ndarray\n \n \n '''\n # check inputs are compatible shapes\n d = len(P_12)\n assert(P_12.shape==(d,d))\n assert(P_13.shape==(d,d))\n assert(P_123.shape==(d,d,d))\n assert(k<=d)\n \n \n # compute top-k left and right singular vectors of P_12\n U1,_,U2=np.linalg.svd(P_12)\n U1 = U1[:,:k]\n U2 = U2[:,:k]\n \n # compute top-k right singular vectors of P_13\n _,_,U3=np.linalg.svd(P_13)\n U3 = U3[:,:k]\n \n # pick invertible theta\n theta = sample_rotation_matrix(k)\n \n # form B_123(U3 theta[0])\n B_123 = (U1.T.dot(P_123).dot(U3.dot(theta[0])).dot(U2)).dot(np.linalg.inv(U1.T.dot(P_12).dot(U_2)))\n \n # compute R1 that diagonalizes B_123(U3 theta[0])\n raise NotImplementedError\n \n # form matrix L\n raise NotImplementedError\n \n # form and return M3\n M3 = U3.dot(np.linalg.inv(theta)).dot(L)\n return M3", "Multi-view mixture models\n\nGeneral setting:\n$k$ is the number mixture components\n$\\ell \\geq 3$ is the number of views\n$\\vec{w} \\in \\Delta^{k-1}$ is a vector of mixing weights\n$h$ is a discrete hidden random variable, with $\\Pr[h=j]=w_j$ for all $j \\in [k]$\n$\\vec{x}1,\\dots,\\vec{x}\\ell \\in \\mathbb{R}^d$ are $\\ell$ random vectors conditionally independent given $h$\nThe conditional mean vectors are:\n$$\\vec{\\mu}_{v,j} \\equiv \\mathbb{E}[\\vec{x}_v | h=j]$$\nwhere\n$v \\in [\\ell]$\n$ j \\in [k]$\n\n\nLet $M_v \\equiv [\\vec{\\mu}{v,1} | \\vec{\\mu}{v,2} | \\cdots | \\vec{\\mu}_{v,k} ] \\in \\mathbb{R}^{d\\times k}$\nNote: we don't specify anything else about the distributions of $\\vec{x}_v$, and they can be continuous, discrete, or hybrid\n\n\n\n\nNon-degeneracy conditions:\n$w_j > 0$ for $j \\in [k]$\n$\\text{rank}(M_v)=k$ for $v \\in [\\ell]$\n\n\n\nObservable moments and operators\n\n$P_{1,2}$\n$P_{1,2,3}$\n$B_{1,2,3}(\\vec{\\eta}) = (U_1^T M_1) \\text{diag} (M_3^T \\vec{\\eta}) (U_1^T M_1)^{-1} $ \n\nAlgorithm B: general estimation procedure\n\nCompute empirical averages to form $\\hat{P}{1,2}$, $\\hat{P}{1,3}$, and $\\hat{P}_{1,2,3}$\nCompute $\\hat{U}_1$\nCompute $\\hat{U}_2$\nCompute $\\hat{U}_3$\nPick invertible matrix $\\Theta$\nE.g. a random rotation matrix\n\n\nForm $\\hat{B}_{1,2,3}$\nDiagonalize $\\hat{B}_{1,2,3}$\nForm the matrix $\\hat{L}$\nReturn $\\hat{M}_3 \\equiv \\hat{U}_3 \\Theta^{-1} \\hat{L}$ \n\nAn HMM is an instance of a 3-view mixture model:\n\n$\\vec{x}_1,\\vec{x}_2,\\vec{x}_3$ are conditionally independent given $h_2$\nParameters of three-view mixture model on $(h,\\vec{x}_1,\\vec{x}_2,\\vec{x}_3)$ are:\n$\\vec{w} \\equiv T \\hat{\\pi}$\n$M_1 \\equiv O \\text{diag}(\\vec{\\pi}) T^T \\text{diag} (T \\vec{pi})^{-1}$\n$M_2 \\equiv O$\n$M_3 \\equiv OT$\n\n\n$B_{3,1,2}(\\vec{\\eta}) = (U_3^T O T ) \\text{diag}(O^T\\vec{\\eta}) (U_3^T O T)^{-1}$\nThe HMM parameters are then given by:\nTransition matrix: $T = (U_3^T O)^{-1} R$, where\n$R$ is the matrix of right eigenvectors of $B_{3,1,2}(\\vec{\\eta})$\n\n\nConditional mean matrix: $O$", "# generate a random instance\nnpr.seed(0)\nk = 10 # number of topics\nd = 100 # number of words\n\n# distribution over topics\nw = npr.rand(k)\nw /= np.sum(w)\n\n# conditional distributions over words, given topics\nM = npr.rand(d,k)\nM /= M.sum(0)\n\n%%time\n\nd=100\nk=10\nU = npr.rand(d,k)\nV = npr.rand(d,k)\npairs = npr.rand(d,d)\ntriples = npr.rand(d,d,d)\neta = npr.rand(d)\n\ndef triples_operator(triples,eta):\n return sum([triples[:,:,i]*eta[i] for i in range(len(eta))])\n\ntriples_eta = triples_operator(triples,eta)\n\noo = np.dot( U.T.dot(triples_eta).dot(V), np.linalg.inv(U.T.dot(pairs).dot(V)))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
GoogleCloudPlatform/mlops-with-vertex-ai
04-pipeline-deployment.ipynb
apache-2.0
[ "04 - Test and Deploy Training Pipeline to Vertex Pipelines\nThe purpose of this notebook is to test, deploy, and run the TFX pipeline on Vertex Pipelines. The notebook covers the following tasks:\n1. Run the tests locally.\n2. Run the pipeline using Vertex Pipelines\n3. Execute the pipeline deployment CI/CD steps using Cloud Build.\nSetup\nImport libraries", "import os\nimport kfp\nimport tfx\n\nprint(\"Tensorflow Version:\", tfx.__version__)\nprint(\"KFP Version:\", kfp.__version__)", "Setup Google Cloud project", "PROJECT = '[your-project-id]' # Change to your project id.\nREGION = 'us-central1' # Change to your region.\nBUCKET = '[your-bucket-name]' # Change to your bucket name.\nSERVICE_ACCOUNT = \"[your-service-account]\"\n\nif PROJECT == \"\" or PROJECT is None or PROJECT == \"[your-project-id]\":\n # Get your GCP project id from gcloud\n shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null\n PROJECT = shell_output[0]\n \nif SERVICE_ACCOUNT == \"\" or SERVICE_ACCOUNT is None or SERVICE_ACCOUNT == \"[your-service-account]\":\n # Get your GCP project id from gcloud\n shell_output = !gcloud config list --format 'value(core.account)' 2>/dev/null\n SERVICE_ACCOUNT = shell_output[0]\n \nif BUCKET == \"\" or BUCKET is None or BUCKET == \"[your-bucket-name]\":\n # Get your bucket name to GCP project id\n BUCKET = PROJECT\n # Try to create the bucket if it doesn't exists\n ! gsutil mb -l $REGION gs://$BUCKET\n print(\"\")\n \nprint(\"Project ID:\", PROJECT)\nprint(\"Region:\", REGION)\nprint(\"Bucket name:\", BUCKET)\nprint(\"Service Account:\", SERVICE_ACCOUNT)", "Set configurations", "BQ_LOCATION = 'US'\nBQ_DATASET_NAME = 'playground_us' # Change to your BQ dataset name.\nBQ_TABLE_NAME = 'chicago_taxitrips_prep'\n\nVERSION = 'v01'\nDATASET_DISPLAY_NAME = 'chicago-taxi-tips'\nMODEL_DISPLAY_NAME = f'{DATASET_DISPLAY_NAME}-classifier-{VERSION}'\nPIPELINE_NAME = f'{MODEL_DISPLAY_NAME}-train-pipeline'\n\nCICD_IMAGE_NAME = 'cicd:latest'\nCICD_IMAGE_URI = f\"gcr.io/{PROJECT}/{CICD_IMAGE_NAME}\"\n\n!rm -r src/raw_schema/.ipynb_checkpoints/", "1. Run the CICD steps locally\nSet pipeline configurations for the local run", "os.environ[\"DATASET_DISPLAY_NAME\"] = DATASET_DISPLAY_NAME\nos.environ[\"MODEL_DISPLAY_NAME\"] = MODEL_DISPLAY_NAME\nos.environ[\"PIPELINE_NAME\"] = PIPELINE_NAME\nos.environ[\"PROJECT\"] = PROJECT\nos.environ[\"REGION\"] = REGION\nos.environ[\"BQ_LOCATION\"] = BQ_LOCATION\nos.environ[\"BQ_DATASET_NAME\"] = BQ_DATASET_NAME\nos.environ[\"BQ_TABLE_NAME\"] = BQ_TABLE_NAME\nos.environ[\"GCS_LOCATION\"] = f\"gs://{BUCKET}/{DATASET_DISPLAY_NAME}/e2e_tests\"\nos.environ[\"TRAIN_LIMIT\"] = \"1000\"\nos.environ[\"TEST_LIMIT\"] = \"100\"\nos.environ[\"UPLOAD_MODEL\"] = \"0\"\nos.environ[\"ACCURACY_THRESHOLD\"] = \"0.1\"\nos.environ[\"BEAM_RUNNER\"] = \"DirectRunner\"\nos.environ[\"TRAINING_RUNNER\"] = \"local\"\n\nfrom src.tfx_pipelines import config\nimport importlib\nimportlib.reload(config)\n\nfor key, value in config.__dict__.items():\n if key.isupper(): print(f'{key}: {value}')", "Run unit tests", "!py.test src/tests/datasource_utils_tests.py -s\n\n!py.test src/tests/model_tests.py -s", "Run e2e pipeline test", "!py.test src/tests/pipeline_deployment_tests.py::test_e2e_pipeline -s", "2. Run the training pipeline using Vertex Pipelines\nSet the pipeline configurations for the Vertex AI run", "os.environ[\"DATASET_DISPLAY_NAME\"] = DATASET_DISPLAY_NAME\nos.environ[\"MODEL_DISPLAY_NAME\"] = MODEL_DISPLAY_NAME\nos.environ[\"PIPELINE_NAME\"] = PIPELINE_NAME\nos.environ[\"PROJECT\"] = PROJECT\nos.environ[\"REGION\"] = REGION\nos.environ[\"GCS_LOCATION\"] = f\"gs://{BUCKET}/{DATASET_DISPLAY_NAME}\"\nos.environ[\"TRAIN_LIMIT\"] = \"85000\"\nos.environ[\"TEST_LIMIT\"] = \"15000\"\nos.environ[\"BEAM_RUNNER\"] = \"DataflowRunner\"\nos.environ[\"TRAINING_RUNNER\"] = \"vertex\"\nos.environ[\"TFX_IMAGE_URI\"] = f\"gcr.io/{PROJECT}/{DATASET_DISPLAY_NAME}:{VERSION}\"\nos.environ[\"ENABLE_CACHE\"] = \"1\"\n\nfrom src.tfx_pipelines import config\nimport importlib\nimportlib.reload(config)\n\nfor key, value in config.__dict__.items():\n if key.isupper(): print(f'{key}: {value}')", "Build the ML container image\nThis is the TFX runtime environment for the training pipeline steps.", "!echo $TFX_IMAGE_URI\n\n!gcloud builds submit --tag $TFX_IMAGE_URI . --timeout=15m --machine-type=e2-highcpu-8", "Compile pipeline", "from src.tfx_pipelines import runner\n\npipeline_definition_file = f'{config.PIPELINE_NAME}.json'\npipeline_definition = runner.compile_training_pipeline(pipeline_definition_file)\n\nPIPELINES_STORE = f\"gs://{BUCKET}/{DATASET_DISPLAY_NAME}/compiled_pipelines/\"\n!gsutil cp {pipeline_definition_file} {PIPELINES_STORE}", "Submit run to Vertex Pipelines", "from kfp.v2.google.client import AIPlatformClient\n\npipeline_client = AIPlatformClient(\n project_id=PROJECT, region=REGION)\n \njob = pipeline_client.create_run_from_job_spec(\n job_spec_path=pipeline_definition_file,\n parameter_values={\n 'learning_rate': 0.003,\n 'batch_size': 512,\n 'hidden_units': '128,128',\n 'num_epochs': 30,\n }\n)", "Extracting pipeline runs metadata", "from google.cloud import aiplatform as vertex_ai\n\npipeline_df = vertex_ai.get_pipeline_df(PIPELINE_NAME)\npipeline_df = pipeline_df[pipeline_df.pipeline_name == PIPELINE_NAME]\npipeline_df.T", "3. Execute the pipeline deployment CI/CD steps in Cloud Build\nThe CI/CD routine is defined in the pipeline-deployment.yaml file, and consists of the following steps:\n1. Clone the repository to the build environment.\n2. Run unit tests.\n3. Run a local e2e test of the pipeline.\n4. Build the ML container image for pipeline steps.\n5. Compile the pipeline.\n6. Upload the pipeline to Cloud Storage.\nBuild CI/CD container Image for Cloud Build\nThis is the runtime environment where the steps of testing and deploying the pipeline will be executed.", "!echo $CICD_IMAGE_URI\n\n!gcloud builds submit --tag $CICD_IMAGE_URI build/. --timeout=15m --machine-type=e2-highcpu-8", "Run CI/CD from pipeline deployment using Cloud Build", "REPO_URL = \"https://github.com/GoogleCloudPlatform/mlops-with-vertex-ai.git\" # Change to your github repo.\nBRANCH = \"main\"\n\nGCS_LOCATION = f\"gs://{BUCKET}/{DATASET_DISPLAY_NAME}/\"\nTEST_GCS_LOCATION = f\"gs://{BUCKET}/{DATASET_DISPLAY_NAME}/e2e_tests\"\nCI_TRAIN_LIMIT = 1000\nCI_TEST_LIMIT = 100\nCI_UPLOAD_MODEL = 0\nCI_ACCURACY_THRESHOLD = 0.1\nBEAM_RUNNER = \"DataflowRunner\"\nTRAINING_RUNNER = \"vertex\"\nVERSION = 'tfx-1.2'\nPIPELINE_NAME = f'{MODEL_DISPLAY_NAME}-train-pipeline'\nPIPELINES_STORE = os.path.join(GCS_LOCATION, \"compiled_pipelines\")\n\nTFX_IMAGE_URI = f\"gcr.io/{PROJECT}/{DATASET_DISPLAY_NAME}:{VERSION}\"\n\nSUBSTITUTIONS=f\"\"\"\\\n_REPO_URL='{REPO_URL}',\\\n_BRANCH={BRANCH},\\\n_CICD_IMAGE_URI={CICD_IMAGE_URI},\\\n_PROJECT={PROJECT},\\\n_REGION={REGION},\\\n_GCS_LOCATION={GCS_LOCATION},\\\n_TEST_GCS_LOCATION={TEST_GCS_LOCATION},\\\n_BQ_LOCATION={BQ_LOCATION},\\\n_BQ_DATASET_NAME={BQ_DATASET_NAME},\\\n_BQ_TABLE_NAME={BQ_TABLE_NAME},\\\n_DATASET_DISPLAY_NAME={DATASET_DISPLAY_NAME},\\\n_MODEL_DISPLAY_NAME={MODEL_DISPLAY_NAME},\\\n_CI_TRAIN_LIMIT={CI_TRAIN_LIMIT},\\\n_CI_TEST_LIMIT={CI_TEST_LIMIT},\\\n_CI_UPLOAD_MODEL={CI_UPLOAD_MODEL},\\\n_CI_ACCURACY_THRESHOLD={CI_ACCURACY_THRESHOLD},\\\n_BEAM_RUNNER={BEAM_RUNNER},\\\n_TRAINING_RUNNER={TRAINING_RUNNER},\\\n_TFX_IMAGE_URI={TFX_IMAGE_URI},\\\n_PIPELINE_NAME={PIPELINE_NAME},\\\n_PIPELINES_STORE={PIPELINES_STORE}\\\n\"\"\"\n\n!echo $SUBSTITUTIONS\n\n!gcloud builds submit --no-source --timeout=60m --config build/pipeline-deployment.yaml --substitutions {SUBSTITUTIONS} --machine-type=e2-highcpu-8" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
yafeunteun/wikipedia-spam-classifier
notebooks/spam_detection_wikipedia_fv.ipynb
mit
[ "Spam detection\nThe main aim of this project is to build a machine learning classifier that is able to automatically detect \nspammy articles, based on their content.", "! sh bootstrap.sh\n\nfrom sklearn.cluster import KMeans\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport random\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import f1_score\nfrom sklearn.cross_validation import KFold\nfrom sklearn.metrics import recall_score\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import BernoulliNB\n%matplotlib inline\n\n#Load testing dataset \ndf_test = pd.read_csv(\"enwiki.draft_quality.50k_stratified.feature_labels.tsv\", sep=\"\\t\")\n\n#Replace strings with integers : 1 for OK and 0 for Not OK\ndf_test[\"draft_quality\"] = df_test[\"draft_quality\"].replace({\"OK\" : 1, \"vandalism\" : 0, \"spam\" : 0, \"attack\" : 0})\n#Put features and labels on differents dataframes\nX_test=df_test.drop([\"draft_quality\"], 1)\nY_test=df_test[\"draft_quality\"]\n\n# Loading training dataset \ndf_train = pd.read_csv(\"enwiki.draft_quality.201608-201701.feature_labels.tsv\", sep=\"\\t\")\ndf_train[\"draft_quality\"] = df_train[\"draft_quality\"].replace({\"OK\" : 1, \"vandalism\" : 0, \"spam\" : 0, \"attack\" : 0})\nX_train=df_train.drop([\"draft_quality\"], 1)\nY_train=df_train[\"draft_quality\"]\n\n# Converting dataframes to array\nX_test=np.array(X_test)\nY_test=np.array(Y_test)\nX_train=np.array(X_train)\nY_train=np.array(Y_train)\n\n#lenghts of boths datasets\nprint(\"Test set length: %d\" % len(X_test))\nprint(\"Train set length: %d\" % len(X_train))", "Custom Helper Function Definitions", "from sklearn.metrics import roc_curve, auc\n\n# Compute ROC curve and ROC area \ndef compute_roc_and_auc(y_predict, y_true):\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n fpr, tpr, _ = roc_curve(y_predict, y_true)\n roc_auc = auc(fpr, tpr)\n return roc_auc, fpr, tpr\n\n# Plot of a ROC curve\ndef plot_roc(roc_auc, fpr, tpr): \n plt.figure()\n lw = 2\n plt.plot(fpr, tpr, color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n plt.show()", "Modeling\nWe tried out various models and selected the best performing models (with the best performing parameter settings for each model). At the end, we retained 3 models which are:\n1. Naïve Bayes Gaussian\n2. Random forest\n3. Logistic regression\nNaïve Bayes Gaussian", "weights=np.array([0.7,1-0.7])\nclf = BernoulliNB(alpha=22, class_prior=weights)\nclf.fit(X_train, Y_train)\nprediction_nb=clf.predict(X_test)\nconfusion=confusion_matrix(Y_test, prediction_nb, labels=None)\nprint(confusion)\nrecall=confusion[0,0]/(confusion[0,0]+confusion[0,1])\nprecision=confusion[0,0]/(confusion[0,0]+confusion[1,0])\nprint(\"Over all the not-OK articles included in the dataset, we detect:\")\nprint(recall)\nprint(\"Over all the articles predicted as being not-OK, only this proportion is really not-OK:\")\nprint(precision)\n\nroc_auc, fpr, tpr = compute_roc_and_auc(prediction_nb, Y_test)\n\nprint (plot_roc(roc_auc, fpr, tpr))", "logistic regression", "clf2 = LogisticRegression(penalty='l1', random_state=0, class_weight={1:0.1, 0: 0.9})\nclf2.fit(X_train, Y_train)\nprediction_lr=clf2.predict(X_test)\nconfusion=confusion_matrix(Y_test, prediction_lr, labels=None)\nprint(confusion)\nrecall=confusion[0,0]/(confusion[0,0]+confusion[0,1])\nprecision=confusion[0,0]/(confusion[0,0]+confusion[1,0])\nprint(\"Over all the not-OK articles included in the dataset, we detect:\")\nprint(recall)\nprint(\"Over all the articles predicted as being not-OK, only this proportion is really not-OK:\")\nprint(precision)\n\nroc_auc, fpr, tpr = compute_roc_and_auc(prediction_lr, Y_test)\n\nprint (plot_roc(roc_auc, fpr, tpr))", "random forest", "clf3 = RandomForestClassifier(n_jobs=16, n_estimators=2, min_samples_leaf=1, random_state=25, class_weight={1:0.9, 0: 0.1})\nclf3.fit(X_train, Y_train)\nprediction_rf=clf3.predict(X_test)\nconfusion=confusion_matrix(Y_test, prediction_rf, labels=None)\nprint(confusion)\nrecall=confusion[0,0]/(confusion[0,0]+confusion[0,1])\nprecision=confusion[0,0]/(confusion[0,0]+confusion[1,0])\nprint(\"Over all the not-OK articles included in the dataset, we detect:\")\nprint(recall)\nprint(\"Over all the articles predicted as being not-OK, only this proportion is really not-OK:\")\nprint(precision)\n\nroc_auc, fpr, tpr = compute_roc_and_auc(prediction_rf, Y_test)\n\nprint (plot_roc(roc_auc, fpr, tpr))", "Combination 1\nWe decided to try combining these models in order to construct a better and more consistent one.\nvoting system", "#Here we construct our voting function\ndef voting(pred1, pred2, pred3):\n final_prediction=np.zeros_like(pred1)\n for i in range(len(pred1)):\n if pred1[i]==pred2[i]:\n final_prediction[i]=pred1[i]\n elif pred1[i]==pred3[i]:\n final_prediction[i]=pred1[i]\n elif pred2[i]==pred3[i]:\n final_prediction[i]=pred2[i]\n return final_prediction\n\n#Here we make the prediction using voting function (with the three models defined above)\nprediction= voting(prediction_lr, prediction_nb, prediction_rf)\nfrom sklearn.metrics import confusion_matrix\nconfusion=confusion_matrix(Y_test, prediction, labels=None)\nprint(confusion)\nrecall=confusion[0,0]/(confusion[0,0]+confusion[0,1])\nprecision=confusion[0,0]/(confusion[0,0]+confusion[1,0])\nprint(\"Over all the not-OK articles included in the dataset, we detect:\")\nprint(recall)\nprint(\"Over all the articles predicted as being not-OK, only this proportion is really not-OK:\")\nprint(precision)\n\nroc_auc, fpr, tpr = compute_roc_and_auc(prediction, Y_test)\n\nprint (plot_roc(roc_auc, fpr, tpr))", "customizing", "#Since we are interested in negatives (not-OK) we will analyze how many times a model detects a not-OK article while\n#the others don't\ndef get_missclasified_indexes(pred1, Y_true, Class):\n index_list=[]\n a=0\n b=1\n if Class==\"negative\":\n a=1\n b=0 \n for i in range(len(pred1)):\n if pred1[i]==a and Y_true[i]==b:\n index_list.append(i)\n return index_list\n\nfalse_negative_indexes=get_missclasified_indexes(prediction, Y_test, \"negative\")\nprint(len(prediction[false_negative_indexes]))\nprint(np.sum(prediction_nb[false_negative_indexes]!=prediction[false_negative_indexes]))\nprint(np.sum(prediction_rf[false_negative_indexes]!=prediction[false_negative_indexes]))\nprint(np.sum(prediction_lr[false_negative_indexes]!=prediction[false_negative_indexes]))\n\n##Here we define our function based on the results above\ndef voting_customized(pred1, pred2, pred3):\n final_prediction=np.zeros_like(pred1)\n for i in range(len(pred1)):\n if pred1[i]==0:\n final_prediction[i]=0\n else:\n final_prediction[i]=pred3[i]\n return final_prediction\n\n#making a prediction with our new function\nprediction= voting_customized(prediction_lr, prediction_nb, prediction_rf)\nconfusion=confusion_matrix(Y, prediction, labels=None)\nprint(confusion)\nrecall=confusion[0,0]/(confusion[0,0]+confusion[0,1])\nprecision=confusion[0,0]/(confusion[0,0]+confusion[1,0])\nprint(\"Over all the not-OK articles included in the dataset, we detect:\")\nprint(recall)\nprint(\"Over all the articles predicted as being not-OK, only this proportion is really not-OK:\")\nprint(precision)\n\nroc_auc, fpr, tpr = compute_roc_and_auc(prediction, Y_test)\nprint (plot_roc(roc_auc, fpr, tpr))\n\nfalse_negative_indexes=get_missclasified_indexes(prediction, Y, \"negative\")\nprint(len(prediction[false_negative_indexes]))\nprint(np.sum(prediction_nb[false_negative_indexes]!=prediction[false_negative_indexes]))\nprint(np.sum(prediction_rf[false_negative_indexes]!=prediction[false_negative_indexes]))\nprint(np.sum(prediction_lr[false_negative_indexes]!=prediction[false_negative_indexes]))", "Here you can see that benefited from the good behavior of the logistic regression and the random forest. By contrast,\nwe couldn't do the same with the naive bayse, because, this makes as missclassify a lot of OK articles, which leads to\na low precision.\nCombination 2\nNow, we would like the capture more of the not-OK articles. To this end, we decided to include a few false positives \nin the training datasets. In order so in an intelligent way and to select some representative samples, we first \nanalyzed these false positives.", "from scipy.cluster.hierarchy import dendrogram, linkage\nZ = linkage(X[false_negative_indexes], 'ward')\nplt.figure(figsize=(25, 25))\nplt.title('Hierarchical Clustering Dendrogram')\nplt.xlabel('sample index')\nplt.ylabel('distance')\ndendrogram(\n Z,\n leaf_rotation=90., \n leaf_font_size=11., \n)\nplt.show()", "This means that we have two big clusters of false positives (green and red). We have chosen to pick up \nrandomly 50 samples of each cluster.", "#we perform a kmeans clustering with 2 clusters\nkmeans = KMeans(n_clusters=2, random_state=0).fit(X[false_negative_indexes])\ncluster_labels=kmeans.labels_\nprint(cluster_labels)\nprint(np.unique(cluster_labels))\n\n#Picking up the sapmles from theclusters and adding them to the training dataset.\n\nfalse_negatives_cluster0=[]\nfalse_negatives_cluster1=[]\n\nfor i in range(1,11):\n random.seed(a=i)\n false_negatives_cluster0.append(random.choice([w for index_w, w in enumerate(false_negative_indexes) if cluster_labels[index_w] == 0]))\nfor i in range(1,11):\n random.seed(a=i)\n false_negatives_cluster1.append(random.choice([w for index_w, w in enumerate(false_negative_indexes) if cluster_labels[index_w] == 1]))\n\n\n#adding 1st cluster's samples\nY_train=np.reshape(np.dstack(Y_train), (len(Y_train),1))\ntemp_arr=np.array([Y_test[false_negatives_cluster0]])\ntemp_arr=np.reshape(np.dstack(temp_arr), (10,1))\n\nX_train_new = np.vstack((X_train, X_test[false_negatives_cluster0]))\nY_train_new=np.vstack((Y_train, temp_arr))\n# Second\ntemp_arr2=np.array([Y_test[false_negatives_cluster1]])\ntemp_arr2=np.reshape(np.dstack(temp_arr2), (10,1))\n\nX_train_new = np.vstack((X_train_new, X_test[false_negatives_cluster1]))\nY_train_new=np.vstack((Y_train_new, temp_arr2))\n\nY_train_new=np.reshape(np.dstack(Y_train_new), (len(Y_train_new),))\nX_train = X_train_new\nY_train = Y_train_new", "Now we do the prediction again\nrandom forest", "clf3.fit(X_train, Y_train)\nprediction_rf_new=clf3.predict(X_test)\nconfusion=confusion_matrix(Y_test, prediction_rf_new, labels=None)\nprint(confusion)\nrecall=confusion[0,0]/(confusion[0,0]+confusion[0,1])\nprecision=confusion[0,0]/(confusion[0,0]+confusion[1,0])\nprint(\"Over all the not-OK articles included in the dataset, we detect:\")\nprint(recall)\nprint(\"Over all the articles predicted as being not-OK, only this proportion is really not-OK:\")\nprint(precision)\n\n\nroc_auc, fpr, tpr = compute_roc_and_auc(prediction_rf_new, Y_test)\nprint (plot_roc(roc_auc, fpr, tpr))", "logistic regression", "clf2.fit(X_train, Y_train)\nprediction_lr_new=clf2.predict(X_test)\nconfusion=confusion_matrix(Y_test, prediction_lr_new, labels=None)\nprint(confusion)\nrecall=confusion[0,0]/(confusion[0,0]+confusion[0,1])\nprecision=confusion[0,0]/(confusion[0,0]+confusion[1,0])\nprint(\"Over all the not-OK articles included in the dataset, we detect:\")\nprint(recall)\nprint(\"Over all the articles predicted as being not-OK, only this proportion is really not-OK:\")\nprint(precision)\n\nroc_auc, fpr, tpr = compute_roc_and_auc(prediction_lr_new, Y_test)\nprint (plot_roc(roc_auc, fpr, tpr)) ", "Naive Bayse", "from sklearn.naive_bayes import BernoulliNB\nweights=np.array([0.7,1-0.7])\nclf = BernoulliNB(alpha=22, class_prior=weights)\nclf.fit(X_train, Y_train)\nprediction_nb_new=clf.predict(X_test)\nconfusion=confusion_matrix(Y_test, prediction_nb_new, labels=None)\nprint(confusion)\nrecall=confusion[0,0]/(confusion[0,0]+confusion[0,1])\nprecision=confusion[0,0]/(confusion[0,0]+confusion[1,0])\nprint(\"Over all the not-OK articles included in the dataset, we detect:\")\nprint(recall)\nprint(\"Over all the articles predicted as being not-OK, only this proportion is really not-OK:\")\nprint(precision)\n\nroc_auc, fpr, tpr = compute_roc_and_auc(prediction_nb_new, Y_test)\nprint (plot_roc(roc_auc, fpr, tpr)) ", "Voting", "prediction= voting(prediction_lr_new, prediction_nb_new, prediction_rf_new)\nconfusion=confusion_matrix(Y_test, prediction, labels=None)\nprint(confusion)\nrecall=confusion[0,0]/(confusion[0,0]+confusion[0,1])\nprecision=confusion[0,0]/(confusion[0,0]+confusion[1,0])\nprint(\"Over all the not-OK articles included in the dataset, we detect:\")\nprint(recall)\nprint(\"Over all the articles predicted as being not-OK, only this proportion is really not-OK:\")\nprint(precision)\n\nroc_auc, fpr, tpr = compute_roc_and_auc(prediction, Y_test)\nprint (plot_roc(roc_auc, fpr, tpr)) ", "Customizing", "def voting_customized2(pred1, pred2, pred3):\n final_prediction=np.zeros_like(pred1)\n for i in range(len(pred1)):\n if pred1[i]==0:\n final_prediction[i]=0 \n else:\n final_prediction[i]=pred2[i]\n return final_prediction\n\nprediction= voting_customized2(prediction_lr_new, prediction_nb_new, prediction_rf_new)\nconfusion=confusion_matrix(Y, prediction, labels=None)\nprint(confusion)\nrecall=confusion[0,0]/(confusion[0,0]+confusion[0,1])\nprecision=confusion[0,0]/(confusion[0,0]+confusion[1,0])\nprint(\"Over all the not-OK articles included in the dataset, we detect:\")\nprint(recall)\nprint(\"Over all the articles predicted as being not-OK, only this proportion is really not-OK:\")\nprint(precision)\n\nroc_auc, fpr, tpr = compute_roc_and_auc(prediction, Y_test)\nprint (plot_roc(roc_auc, fpr, tpr)) " ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
kit-cel/wt
mloc/ch1_Preliminaries/MIMO_least_squares_detection.ipynb
gpl-2.0
[ "MIMO Least Squares Detection\nThis code is provided as supplementary material of the lecture Machine Learning and Optimization in Communications (MLOC).<br>\nThis code illustrates:\n* Toy example of MIMO Detection with constrained least-squares\n* Implementation of constrained least squares via gradient descent", "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline ", "We want to transmit $x$ over a MIMO channel $H\\in \\mathbb{R}^{k \\times n}$. \nThe receiver measures $y$, which is the result of\n$y=Hx$. At the receiver side, we have channel state information (CSI) and therefore know $H$. \nSpecify the simulation paramters. \nYou can vary $k$ (number of receive antennas) but leave $n$ (number of transmit antennas) fixed \nif you want to get a graphical output.", "n = 2 # Number of TX antennas. Leave n fixed to 2!\nk = 3 # Number of RX antennas.You can play around with k.\nx = np.random.rand(n) # Transmit data (random).\nx = x/np.linalg.norm(x) * np.random.rand() # Normalize x to a transmit energy in [0,1].\nH = np.random.randn(k, n) # MIMO channel (random).\ny = np.dot(H, x) # Apply channel to data.\nprint(\"x =\",x)", "Now, we want to estimate $\\boldsymbol{x}$ by using a Least-Square Detector:\n$\\min\\limits_{\\boldsymbol{x}} ||\\boldsymbol{H}\\boldsymbol{x}-\\boldsymbol{y}||_2^2$.\nThis is a minimization problem.\nThe first approach is a line search with gradient descent direction and fixed step length.", "delta = 1e-9 # Threshold for stopping criterion.\nepsilon = 1e-4 # Step length.\nmax_iter = 100000\n\n# Initial guess.\ninit_xg = np.random.rand(*x.shape)*1.4\nxg = init_xg\n\n# Gradient descent line search.\npoints = []\nwhile len(points) < max_iter:\n points.append(xg)\n grad = 2*H.T.dot(H).dot(xg)-2*np.dot(H.T,y) # Calc gradient at current position.\n if np.linalg.norm(grad) < delta:\n break\n xg = xg - 2*epsilon*grad\nprint(\"xg =\",xg)", "Plots:\n * [left subplot]: The function and the trajectory of the line search. \n The minimum at $x$ is marked with a red cross and \n the first guess with a green cross.\n * [right subplot]: The euclidean distance of the trajectory \n to the minimum at each iteration.", "def obj_func(mesh):\n return np.linalg.norm(np.tensordot(H, mesh, axes=1)-y[:, np.newaxis, np.newaxis], axis=0)**2\n\n# Least-Square function.doing a matrix multiplication for a mesh\nx_grid = np.arange(-1.5, 1.5, 0.02)\ny_grid = np.arange(-1.5, 1.5, 0.02)\nX, Y = np.meshgrid(x_grid, y_grid)\nfZ = obj_func([X, Y])\n\n# Line search trajectory.\ntrajectory_x = [points[i][0] for i in range(len(points))]\ntrajectory_y = [points[i][1] for i in range(len(points))]\n\n\nfont = {'size' : 20}\nplt.rc('font', **font)\nplt.rc('text', usetex=True)\nparams= {'text.latex.preamble' : [r'\\usepackage{amsmath}']}\nplt.rcParams.update(params)\n\nplt.figure(1,figsize=(15,6))\nplt.rcParams.update({'font.size': 15})\nplt.subplot(121)\nplt.contourf(X,Y,fZ,levels=20)\nplt.colorbar()\nplt.xlabel(\"$x_1$\")\nplt.ylabel(\"$x_2$\")\nplt.plot(trajectory_x, trajectory_y,marker='.',color='w',linewidth=2)\nplt.plot(x[0], x[1], marker='x',color='r',markersize=12, markeredgewidth=2)\nplt.plot(init_xg[0],init_xg[1], marker='x',color='g',markersize=12, markeredgewidth=2)\nplt.subplot(122)\nplt.plot(range(0,len(points)),[np.linalg.norm(p-x) for p in points])\nplt.grid(True)\nplt.xlabel(\"Step $i$\")\nplt.ylabel(r\"$\\Vert f(\\boldsymbol{x}^{(i)})-\\boldsymbol{x}\\Vert_2$\")\nplt.show()\n", "Now we use Newton's method. It reaches the minimum in one step, \nbecause the objective function is quadratic (Least-Square).", "xh = np.linalg.inv(H.T.dot(H)).dot(H.T).dot(y)\nprint('xh = ', xh)\n", "A limitation of the transmit signal energy is known.\n$\\boldsymbol{x}^T\\boldsymbol{x} \\leq 1$.\nWe add this information as a constraint to the problem with the use\nof a Lagrange multiplier. \nUse gradient descent direction to find the optimal $\\boldsymbol{x}$ of the new constrained\nproblem.", "max_iter = 100000\nlam = 5 # Init value for lambda.\ninit_xg = np.random.rand(*x.shape)*1.4 # Initial guess.\nxg = init_xg\n\npoints = []\nwhile len(points) < max_iter:\n points.append(xg)\n xg = np.linalg.inv(H.T.dot(H)+lam*np.identity(n)).dot(H.T).dot(y)\n lam = lam - epsilon*(1-xg.T.dot(xg))\n if np.abs(1-xg.T.dot(xg)) < delta or lam < delta:\n break\nprint(xg)", "Plots:\n * [left subplot]: The function and the trajectory of the line search. \n The minimum at $x$ is marked with a red cross and \n the first guess with a green cross. The constraint is displayed with a black line.\n * [right subplot]: The euclidean distance of the trajectory \n to the minimum at each iteration.", "trajectory_x = [points[i][0] for i in range(len(points))]\ntrajectory_y = [points[i][1] for i in range(len(points))]\n\nx_grid = np.arange(-1.5, 1.5, 0.02)\ny_grid = np.arange(-1.5, 1.5, 0.02)\nX, Y = np.meshgrid(x_grid, y_grid)\nfZ = obj_func([X, Y])\n\nplt.figure(1,figsize=(15,6))\nplt.subplot(121)\nfig = plt.gcf()\nax = fig.gca()\nplt.rcParams.update({'font.size': 14})\nplt.contourf(X,Y,fZ,levels=20)\nplt.colorbar()\nplt.xlabel(\"$x_1$\")\nplt.ylabel(\"$x_2$\")\ncircle = plt.Circle((0,0),radius=1, fill=False, color='r')\nax.add_artist(circle)\nplt.plot(trajectory_x, trajectory_y,marker='.',color='w',linewidth=2)\nplt.plot(x[0],x[1], marker='x',color='r',markersize=12, markeredgewidth=2)\nplt.plot(init_xg[0],init_xg[1], marker='x',color='g',markersize=12, markeredgewidth=2)\nplt.subplot(122)\nplt.plot(range(0,len(points)),[np.linalg.norm(p-x) for p in points])\nplt.grid(True)\nplt.xlabel(\"Step $i$\")\nplt.ylabel(r\"$\\Vert f(\\boldsymbol{x}^{(i)})-\\boldsymbol{x}\\Vert$\")\nplt.show()\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
astro4dev/OAD-Data-Science-Toolkit
Teaching Materials/Programming/Python/PythonISYA2018/04.Astropy/02_coordinates.ipynb
gpl-3.0
[ "Time and coordinates", "import matplotlib \nmatplotlib.use('Agg')\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n%matplotlib inline ", "We are going to use astropy to find out whether the Large Magellanic Cloud (LMC) is visible from the a given observatory at a given time and date. \nIn the process we need to manipulate different coordinates and time definitions.", "import astropy.units as u\nfrom astropy.time import Time\nfrom astropy.coordinates import SkyCoord, EarthLocation, AltAz", "Let's start by getting the coordinates of the LMC", "lmc_center = SkyCoord.from_name('LMC')\n\nlmc_center", "lmc_center is an instance of a class astropy.coordinates.sky_coordinate.SkyCoord", "type(lmc_center) ", "The full list of attributes and methods can be found using dir()", "dir(lmc_center)\n\n# To get the ra and dec we print the corresponding attribute\nprint(lmc_center.ra, lmc_center.dec) # units of degrees for RA\nprint(lmc_center.ra.hour, lmc_center.dec) # units of hours for RA", "An optional way to initialize an object belonging to the class SkyCoord would be\npython\noption = SkyCoord('0h39m00', '0d53m1s', frame='icrs')\nTo find out whether the LMC will be visible from the observatory, we have to define \nthe observatory location and the time of the year.\nLet's assume that we are going to observe from SALT (Southern African Large Telescope).", "SALT = EarthLocation.of_site(\"Southern African Large Telescope\")\n\nSALT.lat, SALT.lon, SALT.height", "You can get a list of observatory locations with:\npython\nEarthLocation.get_site_names()\nIf your observatory is not listed in astropy you can initialize its location using\npython\nmy_observatory = EarthLocation(lat=4.0*u.deg, lon=-75.0*u.deg, height=4000*u.m)\nNow let's fix the observation date and time. We are going to use a different class for that", "time = Time('2017-11-11 21:00:00') # That's in Universal Time Coordinated!\n\ntime", "We now have all the elements to compute the Altitude + Azimuth coordinates of the LMC at SALT location on November 11th 2017 at 9PM UTC.", "lmg_altaz = lmc_center.transform_to(AltAz(obstime=time,location=SALT))\n\nprint(lmg_altaz.az, lmg_altaz.alt) ", "With 42 degrees altitude it looks like the LMC was observable with SALT on November 11th 2017 at 9PM UTC!\nExercise 2.1\nPlot the altitude of M31 (the Andromeda galaxy) at Las Campanas Observatory on March 10th 2019 between 6PM and 6AM LOCAL TIME.\nHint The following Python code is valid to get the time 30 minutes later than a given time and date.\npython\ntime = Time('2019-03-10 18:00:00') + 0.5*u.hour\nWill be M31 observable that night at Las Campanas?" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
quole/gensim
docs/notebooks/Word2Vec_FastText_Comparison.ipynb
lgpl-2.1
[ "Comparison of FastText and Word2Vec\nFacebook Research open sourced a great project recently - fastText, a fast (no surprise) and effective method to learn word representations and perform text classification. I was curious about comparing these embeddings to other commonly used embeddings, so word2vec seemed like the obvious choice, especially considering fastText embeddings are an extension of word2vec. \nI've used gensim to train the word2vec models, and the analogical reasoning task (described in Section 4.1 of [2]) for comparing the word2vec and fastText models. I've compared embeddings trained using the skipgram architecture.\nDownload data", "import nltk\nnltk.download('brown') \n# Only the brown corpus is needed in case you don't have it.\n\n# Generate brown corpus text file\nwith open('brown_corp.txt', 'w+') as f:\n for word in nltk.corpus.brown.words():\n f.write('{word} '.format(word=word))\n\n# Make sure you set FT_HOME to your fastText directory root\nFT_HOME = 'fastText/'\n# download the text8 corpus (a 100 MB sample of cleaned wikipedia text)\nimport os.path\nif not os.path.isfile('text8'):\n !wget -c http://mattmahoney.net/dc/text8.zip\n !unzip text8.zip\n# download and preprocess the text9 corpus\nif not os.path.isfile('text9'):\n !wget -c http://mattmahoney.net/dc/enwik9.zip\n !unzip enwik9.zip\n !perl {FT_HOME}wikifil.pl enwik9 > text9", "Train models\nFor training the models yourself, you'll need to have both Gensim and FastText set up on your machine.", "MODELS_DIR = 'models/'\n!mkdir -p {MODELS_DIR}\n\nlr = 0.05\ndim = 100\nws = 5\nepoch = 5\nminCount = 5\nneg = 5\nloss = 'ns'\nt = 1e-4\n\nfrom gensim.models import Word2Vec, KeyedVectors\nfrom gensim.models.word2vec import Text8Corpus\n\n# Same values as used for fastText training above\nparams = {\n 'alpha': lr,\n 'size': dim,\n 'window': ws,\n 'iter': epoch,\n 'min_count': minCount,\n 'sample': t,\n 'sg': 1,\n 'hs': 0,\n 'negative': neg\n}\n\ndef train_models(corpus_file, output_name):\n output_file = '{:s}_ft'.format(output_name)\n if not os.path.isfile(os.path.join(MODELS_DIR, '{:s}.vec'.format(output_file))):\n print('Training fasttext on {:s} corpus..'.format(corpus_file))\n %time !{FT_HOME}fasttext skipgram -input {corpus_file} -output {MODELS_DIR+output_file} -lr {lr} -dim {dim} -ws {ws} -epoch {epoch} -minCount {minCount} -neg {neg} -loss {loss} -t {t}\n else:\n print('\\nUsing existing model file {:s}.vec'.format(output_file))\n \n output_file = '{:s}_ft_no_ng'.format(output_name)\n if not os.path.isfile(os.path.join(MODELS_DIR, '{:s}.vec'.format(output_file))):\n print('\\nTraining fasttext on {:s} corpus (without char n-grams)..'.format(corpus_file))\n %time !{FT_HOME}fasttext skipgram -input {corpus_file} -output {MODELS_DIR+output_file} -lr {lr} -dim {dim} -ws {ws} -epoch {epoch} -minCount {minCount} -neg {neg} -loss {loss} -t {t} -maxn 0\n else:\n print('\\nUsing existing model file {:s}.vec'.format(output_file))\n \n output_file = '{:s}_gs'.format(output_name)\n if not os.path.isfile(os.path.join(MODELS_DIR, '{:s}.vec'.format(output_file))):\n print('\\nTraining word2vec on {:s} corpus..'.format(corpus_file))\n \n # Text8Corpus class for reading space-separated words file\n %time gs_model = Word2Vec(Text8Corpus(corpus_file), **params); gs_model\n # Direct local variable lookup doesn't work properly with magic statements (%time)\n locals()['gs_model'].wv.save_word2vec_format(os.path.join(MODELS_DIR, '{:s}.vec'.format(output_file)))\n print('\\nSaved gensim model as {:s}.vec'.format(output_file))\n else:\n print('\\nUsing existing model file {:s}.vec'.format(output_file))\n\nevaluation_data = {}\ntrain_models('brown_corp.txt', 'brown')\n\ntrain_models(corpus_file='text8', output_name='text8')\n\ntrain_models(corpus_file='text9', output_name='text9')", "Comparisons", "# download the file questions-words.txt to be used for comparing word embeddings\n!wget https://raw.githubusercontent.com/tmikolov/word2vec/master/questions-words.txt", "Once you have downloaded or trained the models and downloaded questions-words.txt, you're ready to run the comparison.", "import logging\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n# Training times in seconds\nevaluation_data['brown'] = [(18, 54.3, 32.5)]\nevaluation_data['text8'] = [(402, 942, 496)]\nevaluation_data['text9'] = [(3218, 6589, 3550)]\n\ndef print_accuracy(model, questions_file):\n print('Evaluating...\\n')\n acc = model.accuracy(questions_file)\n\n sem_correct = sum((len(acc[i]['correct']) for i in range(5)))\n sem_total = sum((len(acc[i]['correct']) + len(acc[i]['incorrect'])) for i in range(5))\n sem_acc = 100*float(sem_correct)/sem_total\n print('\\nSemantic: {:d}/{:d}, Accuracy: {:.2f}%'.format(sem_correct, sem_total, sem_acc))\n \n syn_correct = sum((len(acc[i]['correct']) for i in range(5, len(acc)-1)))\n syn_total = sum((len(acc[i]['correct']) + len(acc[i]['incorrect'])) for i in range(5,len(acc)-1))\n syn_acc = 100*float(syn_correct)/syn_total\n print('Syntactic: {:d}/{:d}, Accuracy: {:.2f}%\\n'.format(syn_correct, syn_total, syn_acc))\n return (sem_acc, syn_acc)\n\nword_analogies_file = 'questions-words.txt'\naccuracies = []\nprint('\\nLoading Gensim embeddings')\nbrown_gs = KeyedVectors.load_word2vec_format(MODELS_DIR + 'brown_gs.vec')\nprint('Accuracy for Word2Vec:')\naccuracies.append(print_accuracy(brown_gs, word_analogies_file))\n\nprint('\\nLoading FastText embeddings')\nbrown_ft = KeyedVectors.load_word2vec_format(MODELS_DIR + 'brown_ft.vec')\nprint('Accuracy for FastText (with n-grams):')\naccuracies.append(print_accuracy(brown_ft, word_analogies_file))", "The accuracy takes an optional parameter restrict_vocab, which limits the vocabulary of model considered for fast approximate evaluation (default is 30000).\nWord2Vec embeddings seem to be slightly better than fastText embeddings at the semantic tasks, while the fastText embeddings do significantly better on the syntactic analogies. Makes sense, since fastText embeddings are trained for understanding morphological nuances, and most of the syntactic analogies are morphology based. \nLet me explain that better.\nAccording to the paper [1], embeddings for words are represented by the sum of their n-gram embeddings. This is meant to be useful for morphologically rich languages - so theoretically, the embedding for apparently would include information from both character n-grams apparent and ly (as well as other n-grams), and the n-grams would combine in a simple, linear manner. This is very similar to what most of our syntactic tasks look like.\nExample analogy:\namazing amazingly calm calmly\nThis analogy is marked correct if: \nembedding(amazing) - embedding(amazingly) = embedding(calm) - embedding(calmly)\nBoth these subtractions would result in a very similar set of remaining ngrams.\nNo surprise the fastText embeddings do extremely well on this.\nLet's do a small test to validate this hypothesis - fastText differs from word2vec only in that it uses char n-gram embeddings as well as the actual word embedding in the scoring function to calculate scores and then likelihoods for each word, given a context word. In case char n-gram embeddings are not present, this reduces (atleast theoretically) to the original word2vec model. This can be implemented by setting 0 for the max length of char n-grams for fastText.", "print('Loading FastText embeddings')\nbrown_ft_no_ng = KeyedVectors.load_word2vec_format(MODELS_DIR + 'brown_ft_no_ng.vec')\nprint('Accuracy for FastText (without n-grams):')\naccuracies.append(print_accuracy(brown_ft_no_ng, word_analogies_file))\nevaluation_data['brown'] += [[acc[0] for acc in accuracies], [acc[1] for acc in accuracies]]", "A-ha! The results for FastText with no n-grams and Word2Vec look a lot more similar (as they should) - the differences could easily result from differences in implementation between fastText and Gensim, and randomization. Especially telling is that the semantic accuracy for FastText has improved slightly after removing n-grams, while the syntactic accuracy has taken a giant dive. Our hypothesis that the char n-grams result in better performance on syntactic analogies seems fair. It also seems possible that char n-grams hurt semantic accuracy a little. However, the brown corpus is too small to be able to draw any definite conclusions - the accuracies seem to vary significantly over different runs.\nLet's try with a larger corpus now - text8 (collection of wiki articles). I'm also curious about the impact on semantic accuracy - for models trained on the brown corpus, the difference in the semantic accuracy and the accuracy values themselves are too small to be conclusive. Hopefully a larger corpus helps, and the text8 corpus likely has a lot more information about capitals, currencies, cities etc, which should be relevant to the semantic tasks.", "accuracies = []\nprint('Loading Gensim embeddings')\ntext8_gs = KeyedVectors.load_word2vec_format(MODELS_DIR + 'text8_gs.vec')\nprint('Accuracy for word2vec:')\naccuracies.append(print_accuracy(text8_gs, word_analogies_file))\n\nprint('Loading FastText embeddings (with n-grams)')\ntext8_ft = KeyedVectors.load_word2vec_format(MODELS_DIR + 'text8_ft.vec')\nprint('Accuracy for FastText (with n-grams):')\naccuracies.append(print_accuracy(text8_ft, word_analogies_file))\n\nprint('Loading FastText embeddings')\ntext8_ft_no_ng = KeyedVectors.load_word2vec_format(MODELS_DIR + 'text8_ft_no_ng.vec')\nprint('Accuracy for FastText (without n-grams):')\naccuracies.append(print_accuracy(text8_ft_no_ng, word_analogies_file))\n\nevaluation_data['text8'] += [[acc[0] for acc in accuracies], [acc[1] for acc in accuracies]]", "With the text8 corpus, we observe a similar pattern. Semantic accuracy falls by a small but significant amount when n-grams are included in FastText, while FastText with n-grams performs far better on the syntactic analogies. FastText without n-grams are largely similar to Word2Vec.\nMy hypothesis for semantic accuracy being lower for the FastText-with-ngrams model is that most of the words in the semantic analogies are standalone words and are unrelated to their morphemes (eg: father, mother, France, Paris), hence inclusion of the char n-grams into the scoring function actually makes the embeddings worse.\nThis trend is observed in the original paper too where the performance of embeddings with n-grams is worse on semantic tasks than both word2vec cbow and skipgram models.\nLet's do a quick comparison on an even larger corpus - text9", "accuracies = []\nprint('Loading Gensim embeddings')\ntext9_gs = KeyedVectors.load_word2vec_format(MODELS_DIR + 'text9_gs.vec')\nprint('Accuracy for word2vec:')\naccuracies.append(print_accuracy(text9_gs, word_analogies_file))\n\nprint('Loading FastText embeddings (with n-grams)')\ntext9_ft = KeyedVectors.load_word2vec_format(MODELS_DIR + 'text9_ft.vec')\nprint('Accuracy for FastText (with n-grams):')\naccuracies.append(print_accuracy(text9_ft, word_analogies_file))\n\nprint('Loading FastText embeddings')\ntext9_ft_no_ng = KeyedVectors.load_word2vec_format(MODELS_DIR + 'text9_ft_no_ng.vec')\nprint('Accuracy for FastText (without n-grams):')\naccuracies.append(print_accuracy(text9_ft_no_ng, word_analogies_file))\n\nevaluation_data['text9'] += [[acc[0] for acc in accuracies], [acc[1] for acc in accuracies]]\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\ndef plot(ax, data, corpus_name='brown'):\n width = 0.25\n pos = [(i, i + width, i + 2*width) for i in range(len(data))]\n colors = ['#EE3224', '#F78F1E', '#FFC222']\n acc_ax = ax.twinx()\n # Training time\n ax.bar(pos[0],\n data[0],\n width,\n alpha=0.5,\n color=colors\n )\n # Semantic accuracy\n acc_ax.bar(pos[1],\n data[1],\n width,\n alpha=0.5,\n color=colors\n )\n\n # Syntactic accuracy\n acc_ax.bar(pos[2],\n data[2],\n width,\n alpha=0.5,\n color=colors\n )\n\n ax.set_ylabel('Training time (s)')\n acc_ax.set_ylabel('Accuracy (%)')\n ax.set_title(corpus_name)\n\n acc_ax.set_xticks([p[0] + 1.5 * width for p in pos])\n acc_ax.set_xticklabels(['Training Time', 'Semantic Accuracy', 'Syntactic Accuracy'])\n\n # Proxy plots for adding legend correctly\n proxies = [ax.bar([0], [0], width=0, color=c, alpha=0.5)[0] for c in colors]\n models = ('Gensim', 'FastText', 'FastText (no-ngrams)')\n ax.legend((proxies), models, loc='upper left')\n \n ax.set_xlim(pos[0][0]-width, pos[-1][0]+width*4)\n ax.set_ylim([0, max(data[0])*1.1] )\n acc_ax.set_ylim([0, max(data[1] + data[2])*1.1] )\n\n plt.grid()\n\n# Plotting the bars\nfig = plt.figure(figsize=(10,15))\nfor corpus, subplot in zip(sorted(evaluation_data.keys()), [311, 312, 313]):\n ax = fig.add_subplot(subplot)\n plot(ax, evaluation_data[corpus], corpus)\n\nplt.show()", "The results from text9 seem to confirm our hypotheses so far. Briefly summarising the main points -\n\nFastText models with n-grams do significantly better on syntactic tasks, because of the syntactic questions being related to morphology of the words\nBoth Gensim word2vec and the fastText model with no n-grams do slightly better on the semantic tasks, presumably because words from the semantic questions are standalone words and unrelated to their char n-grams\nIn general, the performance of the models seems to get closer with the increasing corpus size. However, this might possibly be due to the size of the model staying constant at 100, and a larger model size for large corpora might result in higher performance gains.\nThe semantic accuracy for all models increases significantly with the increase in corpus size.\nHowever, the increase in syntactic accuracy from the increase in corpus size for the n-gram FastText model is lower (in both relative and absolute terms). This could possibly indicate that advantages gained by incorporating morphological information could be less significant in case of larger corpus sizes (the corpuses used in the original paper seem to indicate this too)\nTraining times for gensim are slightly lower than the fastText no-ngram model, and significantly lower than the n-gram variant. This is quite impressive considering fastText is implemented in C++ and Gensim in Python (with calls to low-level BLAS routines for much of the heavy lifting). You could read this post for more details regarding word2vec optimisation in Gensim. Note that these times include importing any dependencies and serializing the models to disk, and not just the training times.\n\nConclusions\nThese preliminary results seem to indicate fastText embeddings are significantly better than word2vec at encoding syntactic information. This is expected, since most syntactic analogies are morphology based, and the char n-gram approach of fastText takes such information into account. The original word2vec model seems to perform better on semantic tasks, since words in semantic analogies are unrelated to their char n-grams, and the added information from irrelevant char n-grams worsens the embeddings. It'd be interesting to see how transferable these embeddings are for different kinds of tasks by comparing their performance in a downstream supervised task.\nReferences\n[1] Enriching Word Vectors with Subword Information\n[2] Efficient Estimation of Word Representations in Vector Space" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ledeprogram/algorithms
class9/homework/benzaquen_mercy_9_2.ipynb
gpl-3.0
[ "Using the readings, try and create a RandomForestClassifier for the iris dataset\nUsing a 25/75 training/test split, compare the results with the original decision tree model and describe the result to the best of your ability in your PR", "import pandas as pd\n%matplotlib inline\nfrom sklearn import datasets\nfrom sklearn import tree\nfrom sklearn import metrics\nimport numpy as np\nfrom sklearn import cross_validation\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.cross_validation import train_test_split\n\niris = datasets.load_iris()\n\niris\n\nx = iris.data[:,2:] \ny = iris.target", "RandomForestClassifier", "x_train, x_test, y_train, y_test = cross_validation.train_test_split(x, y, stratify=y,random_state=42)\nforest = RandomForestClassifier(n_estimators=5, random_state=2)\nforest.fit(x_train, y_train)\n\nprint(\"accuracy on training set: %f\" % forest.score(x_train, y_train))\nprint(\"accuracy on test set: %f\" % forest.score(x_test, y_test))", "Original decision tree model", "dt = tree.DecisionTreeClassifier()\n\nx_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.25,train_size=0.75)\n\ndt = dt.fit(x_train,y_train) \n\ndef measure_performance(X,y,clf, show_accuracy=True, show_classification_report=True, show_confussion_matrix=True):\n y_pred=clf.predict(X)\n if show_accuracy:\n print(\"Accuracy:{0:.3f}\".format(metrics.accuracy_score(y, y_pred)),\"\\n\")\n if show_classification_report:\n print(\"Classification report\")\n print(metrics.classification_report(y,y_pred),\"\\n\")\n if show_confussion_matrix:\n print(\"Confusion matrix\")\n print(metrics.confusion_matrix(y,y_pred),\"\\n\")\n\nmeasure_performance(x_train,y_train,dt) \n# I measure the performance of my classifier with train data\n#The accuracy is 1, which means is 100% accurate. \n#And my confusion matrix is not showing mistakes in the classification\n\nmeasure_performance(x_test,y_test,dt)\n# I measure the performance of my classifier with test data\n# Accuracy of 100%", "For the RandomForestClassifier\naccuracy on training set: 0.981982\naccuracy on test set: 0.923077\nFor the Original decision tree model\naccuracy on training set: 1.000 \naccuracy on test set: 0.974\nMy main takeaway is that random forests are a way of addressing the problem of overfitting. Decision trees tend to overfit the training data, and since random forests are made up of a number of these decision trees, they are all going to overfit the data in different ways. So what we do is averaging the results of all of the trees in our random forest to get a more accurate fit.\nThe accuracy of the training set for the Random Forest Classifier is of 98% (and I am not sure about the following ...) which means that the model is not overfitting. On the contrary, the accuracy of the training set for the desicion tree model is of 100%, which probably means is overfitting. The accuracy test for the decision tree model is better than the one for the random forest classifier, which confused me a little bit since I was expecting the one for the random forest classifier to be better. If the data is not overfitted, the model is more likely to be more accurate right?" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ajmendez/explore
cupid/clean.ipynb
mit
[ "OKCupid Clean Data\nOKCupid's website returns some partially hidden text when it is too long for their layout.\nLets skip these and just focus on the fully named places.", "%matplotlib inline\nimport time\nimport pylab\nimport numpy as np\nimport pandas as pd\nimport pycupid.locations\n\npeople = pd.read_json('/Users/ajmendez/data/okcupid/random.json')\nprint('Scraping archive found {:,d} random people'.format(len(people)))", "Feature: Lat/Lon\n\nLocations are generally [city name], [state abbr].\nHowever there are a number of locations that where too long for the search page and are abbreviated with a unicode u'\\u2026'\nLets ignore these places on our first pass and then return to them later -- ~14% loss of locations", "locations = people['location'].astype(unicode)#.replace(r'\\s+', np.nan, regex=True)\nisgood = (locations.str.extract((u'(\\u2026)')).isnull()) & (locations.str.len() > 0)\nnoriginal = len(locations.unique())\nunique_locations = locations[isgood].unique()\nnlocations = len(unique_locations)\nprint('There are a total of {:,d} unique locations and {:,d} good ones'.format(noriginal, nlocations))\nprint(' > missing locations: {:0.1f}%'.format((noriginal-nlocations)*100.0/noriginal))\nprint(' > missing people: {:0.1f}%'.format((len(locations)-len(np.where(isgood)[0]))*100.0/len(locations)))", "Geolocation APIs have hourly limits, so this was originally run using a cron job nightly to build up a large map of locations to (lat/lon)", "# does not seem to pickup the lat/lon notation from the old db\nlocation_map = pd.read_json('/Users/ajmendez/data/okcupid/location_map.json', orient='index')\nlocation_map.columns = ['lat', 'lon']\nprint('Location cache contains {:,d} locations'.format(len(location_map)))\n\n# load v2:\nlocation_map = pd.read_json('/Users/ajmendez/data/okcupid/locations_v2.json', orient='index')\n\ngeonames = pycupid.locations.getGN()\ninew = 0\nfor i, location in enumerate(unique_locations):\n if location in location_map.index:\n continue\n print u'Getting location: {}'.format(location)\n try:\n loc, (lat, lon) = geonames.geocode(location.encode('utf8'))\n except Exception as e:\n print u' > Failed: {}'.format(location)\n# raise e\n \n # too many loc* names!\n location_map.loc[location] = [lat,lon]\n inew += 1\n \n # give the API a bit of a break\n time.sleep(0.2)\n \n if inew > 1000:\n break\nprint len(location_map)\n\nlocation_map.to_json('/Users/ajmendez/data/okcupid/locations_v2.json', orient='index')", "User Table\nFor simplicity store lat/lon within user table. A location table and a user table would be better.", "finished = []\nfor i, location in enumerate(location_map.index):\n if location in finished:\n continue\n tmp = location_map.loc[location]\n isloc = (locations == location)\n people.loc[isloc, 'lat'] = tmp['lat']\n people.loc[isloc, 'lon'] = tmp['lon']\n people.loc[isloc, 'nloc'] = isloc.sum()\n finished.append(location)\n if (i%1000 == 0):\n print i,\n \n\n# better plots later, this is just a test\npeople.plot('lon', 'lat', kind='scatter', s=2, lw=0, alpha=0.1)\n\npeople.to_csv('/Users/ajmendez/data/okcupid/random_v2.csv', encoding='utf-8')", "Feature: Numbers in Usernames\nExtract the integers that are in each username", "people = pd.read_csv('/Users/ajmendez/data/okcupid/random_v2.csv')\n\ntmp = people['username'].str.extract((u'(\\d+)'))\npeople['username_number'] = tmp.apply(lambda x: int(x) if isinstance(x, (str, unicode)) else np.nan)\npeople['username_nlength'] = tmp.apply(lambda x: len(x) if isinstance(x, (str,unicode)) else 0)\n\npeople.to_csv('/Users/ajmendez/data/okcupid/random_v3.csv', encoding='utf-8')", "Feature: Name Groups", "names = ['dinosaur', 'saur','saurus', 'dino','jurassic', 'rex', 'sarus', \n 'pterodactyl', 'archaeopter', 'pteranod', 'pterodact']\npeople['hasdino'] = people['username'].str.lower().str.extract((u'({})'.format('|'.join(names)))).notnull()\n\npeople.to_csv('/Users/ajmendez/data/okcupid/random_v4.csv', encoding='utf-8')", "Write as json for archive tools", "people = pd.read_csv('/Users/ajmendez/data/okcupid/random_v2.csv')\npeople.to_json('/Users/ajmendez/data/okcupid/random_v2.json', orient='index')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
herruzojm/udacity-deep-learning
autoencoder/Convolutional_Autoencoder_Solution.ipynb
mit
[ "Convolutional Autoencoder\nSticking with the MNIST dataset, let's improve our autoencoder's performance using convolutional layers. Again, loading modules and the data.", "%matplotlib inline\n\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data', validation_size=0)\n\nimg = mnist.train.images[2]\nplt.imshow(img.reshape((28, 28)), cmap='Greys_r')", "Network Architecture\nThe encoder part of the network will be a typical convolutional pyramid. Each convolutional layer will be followed by a max-pooling layer to reduce the dimensions of the layers. The decoder though might be something new to you. The decoder needs to convert from a narrow representation to a wide reconstructed image. For example, the representation could be a 4x4x8 max-pool layer. This is the output of the encoder, but also the input to the decoder. We want to get a 28x28x1 image out from the decoder so we need to work our way back up from the narrow decoder input layer. A schematic of the network is shown below.\n<img src='assets/convolutional_autoencoder.png' width=500px>\nHere our final encoder layer has size 4x4x8 = 128. The original images have size 28x28 = 784, so the encoded vector is roughly 16% the size of the original image. These are just suggested sizes for each of the layers. Feel free to change the depths and sizes, but remember our goal here is to find a small representation of the input data.\nWhat's going on with the decoder\nOkay, so the decoder has these \"Upsample\" layers that you might not have seen before. First off, I'll discuss a bit what these layers aren't. Usually, you'll see transposed convolution layers used to increase the width and height of the layers. They work almost exactly the same as convolutional layers, but in reverse. A stride in the input layer results in a larger stride in the transposed convolution layer. For example, if you have a 3x3 kernel, a 3x3 patch in the input layer will be reduced to one unit in a convolutional layer. Comparatively, one unit in the input layer will be expanded to a 3x3 path in a transposed convolution layer. The TensorFlow API provides us with an easy way to create the layers, tf.nn.conv2d_transpose. \nHowever, transposed convolution layers can lead to artifacts in the final images, such as checkerboard patterns. This is due to overlap in the kernels which can be avoided by setting the stride and kernel size equal. In this Distill article from Augustus Odena, et al, the authors show that these checkerboard artifacts can be avoided by resizing the layers using nearest neighbor or bilinear interpolation (upsampling) followed by a convolutional layer. In TensorFlow, this is easily done with tf.image.resize_images, followed by a convolution. Be sure to read the Distill article to get a better understanding of deconvolutional layers and why we're using upsampling.\n\nExercise: Build the network shown above. Remember that a convolutional layer with strides of 1 and 'same' padding won't reduce the height and width. That is, if the input is 28x28 and the convolution layer has stride = 1 and 'same' padding, the convolutional layer will also be 28x28. The max-pool layers are used the reduce the width and height. A stride of 2 will reduce the size by 2. Odena et al claim that nearest neighbor interpolation works best for the upsampling, so make sure to include that as a parameter in tf.image.resize_images or use tf.image.resize_nearest_neighbor.", "inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')\ntargets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')\n\n### Encoder\nconv1 = tf.layers.conv2d(inputs_, 16, (3,3), padding='same', activation=tf.nn.relu)\n# Now 28x28x16\nmaxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same')\n# Now 14x14x16\nconv2 = tf.layers.conv2d(maxpool1, 8, (3,3), padding='same', activation=tf.nn.relu)\n# Now 14x14x8\nmaxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same')\n# Now 7x7x8\nconv3 = tf.layers.conv2d(maxpool2, 8, (3,3), padding='same', activation=tf.nn.relu)\n# Now 7x7x8\nencoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same')\n# Now 4x4x8\n\n### Decoder\nupsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7))\n# Now 7x7x8\nconv4 = tf.layers.conv2d(upsample1, 8, (3,3), padding='same', activation=tf.nn.relu)\n# Now 7x7x8\nupsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14))\n# Now 14x14x8\nconv5 = tf.layers.conv2d(upsample2, 8, (3,3), padding='same', activation=tf.nn.relu)\n# Now 14x14x8\nupsample3 = tf.image.resize_nearest_neighbor(conv5, (28,28))\n# Now 28x28x8\nconv6 = tf.layers.conv2d(upsample3, 16, (3,3), padding='same', activation=tf.nn.relu)\n# Now 28x28x16\n\nlogits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None)\n#Now 28x28x1\n\ndecoded = tf.nn.sigmoid(logits, name='decoded')\n\nloss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)\ncost = tf.reduce_mean(loss)\nopt = tf.train.AdamOptimizer(0.001).minimize(cost)", "Training\nAs before, here wi'll train the network. Instead of flattening the images though, we can pass them in as 28x28x1 arrays.", "sess = tf.Session()\n\nepochs = 20\nbatch_size = 200\nsess.run(tf.global_variables_initializer())\nfor e in range(epochs):\n for ii in range(mnist.train.num_examples//batch_size):\n batch = mnist.train.next_batch(batch_size)\n imgs = batch[0].reshape((-1, 28, 28, 1))\n batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs,\n targets_: imgs})\n\n print(\"Epoch: {}/{}...\".format(e+1, epochs),\n \"Training loss: {:.4f}\".format(batch_cost))\n\nfig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))\nin_imgs = mnist.test.images[:10]\nreconstructed = sess.run(decoded, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1))})\n\nfor images, row in zip([in_imgs, reconstructed], axes):\n for img, ax in zip(images, row):\n ax.imshow(img.reshape((28, 28)), cmap='Greys_r')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n\nfig.tight_layout(pad=0.1)\n\nsess.close()", "Denoising\nAs I've mentioned before, autoencoders like the ones you've built so far aren't too useful in practive. However, they can be used to denoise images quite successfully just by training the network on noisy images. We can create the noisy images ourselves by adding Gaussian noise to the training images, then clipping the values to be between 0 and 1. We'll use noisy images as input and the original, clean images as targets. Here's an example of the noisy images I generated and the denoised images.\n\nSince this is a harder problem for the network, we'll want to use deeper convolutional layers here, more feature maps. I suggest something like 32-32-16 for the depths of the convolutional layers in the encoder, and the same depths going backward through the decoder. Otherwise the architecture is the same as before.\n\nExercise: Build the network for the denoising autoencoder. It's the same as before, but with deeper layers. I suggest 32-32-16 for the depths, but you can play with these numbers, or add more layers.", "inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')\ntargets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')\n\n### Encoder\nconv1 = tf.layers.conv2d(inputs_, 32, (3,3), padding='same', activation=tf.nn.relu)\n# Now 28x28x32\nmaxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same')\n# Now 14x14x32\nconv2 = tf.layers.conv2d(maxpool1, 32, (3,3), padding='same', activation=tf.nn.relu)\n# Now 14x14x32\nmaxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same')\n# Now 7x7x32\nconv3 = tf.layers.conv2d(maxpool2, 16, (3,3), padding='same', activation=tf.nn.relu)\n# Now 7x7x16\nencoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same')\n# Now 4x4x16\n\n### Decoder\nupsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7))\n# Now 7x7x16\nconv4 = tf.layers.conv2d(upsample1, 16, (3,3), padding='same', activation=tf.nn.relu)\n# Now 7x7x16\nupsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14))\n# Now 14x14x16\nconv5 = tf.layers.conv2d(upsample2, 32, (3,3), padding='same', activation=tf.nn.relu)\n# Now 14x14x32\nupsample3 = tf.image.resize_nearest_neighbor(conv5, (28,28))\n# Now 28x28x32\nconv6 = tf.layers.conv2d(upsample3, 32, (3,3), padding='same', activation=tf.nn.relu)\n# Now 28x28x32\n\nlogits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None)\n#Now 28x28x1\n\ndecoded = tf.nn.sigmoid(logits, name='decoded')\n\nloss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)\ncost = tf.reduce_mean(loss)\nopt = tf.train.AdamOptimizer(0.001).minimize(cost)\n\nsess = tf.Session()\n\nepochs = 100\nbatch_size = 200\n# Set's how much noise we're adding to the MNIST images\nnoise_factor = 0.5\nsess.run(tf.global_variables_initializer())\nfor e in range(epochs):\n for ii in range(mnist.train.num_examples//batch_size):\n batch = mnist.train.next_batch(batch_size)\n # Get images from the batch\n imgs = batch[0].reshape((-1, 28, 28, 1))\n \n # Add random noise to the input images\n noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape)\n # Clip the images to be between 0 and 1\n noisy_imgs = np.clip(noisy_imgs, 0., 1.)\n \n # Noisy images as inputs, original images as targets\n batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs,\n targets_: imgs})\n\n print(\"Epoch: {}/{}...\".format(e+1, epochs),\n \"Training loss: {:.4f}\".format(batch_cost))", "Checking out the performance\nHere I'm adding noise to the test images and passing them through the autoencoder. It does a suprising great job of removing the noise, even though it's sometimes difficult to tell what the original number is.", "fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))\nin_imgs = mnist.test.images[:10]\nnoisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape)\nnoisy_imgs = np.clip(noisy_imgs, 0., 1.)\n\nreconstructed = sess.run(decoded, feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))})\n\nfor images, row in zip([noisy_imgs, reconstructed], axes):\n for img, ax in zip(images, row):\n ax.imshow(img.reshape((28, 28)), cmap='Greys_r')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\nfig.tight_layout(pad=0.1)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]